svn commit: r192034 - in head/sys: kern vm

Alan Cox alc at FreeBSD.org
Wed May 13 05:39:40 UTC 2009


Author: alc
Date: Wed May 13 05:39:39 2009
New Revision: 192034
URL: http://svn.freebsd.org/changeset/base/192034

Log:
  Eliminate page queues locking from bufdone_finish() through the
  following changes:
  
  Rename vfs_page_set_valid() to vfs_page_set_validclean() to reflect
  what this function actually does.  Suggested by: tegge
  
  Introduce a new version of vfs_page_set_valid() that does no more than
  what the function's name implies.  Specifically, it does not update
  the page's dirty mask, and thus it does not require the page queues
  lock to be held.
  
  Update two of the three callers to the old vfs_page_set_valid() to
  call vfs_page_set_validclean() instead because they actually require
  the page's dirty mask to be cleared.
  
  Introduce vm_page_set_valid().
  
  Reviewed by:	tegge

Modified:
  head/sys/kern/vfs_bio.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h

Modified: head/sys/kern/vfs_bio.c
==============================================================================
--- head/sys/kern/vfs_bio.c	Wed May 13 02:55:21 2009	(r192033)
+++ head/sys/kern/vfs_bio.c	Wed May 13 05:39:39 2009	(r192034)
@@ -98,7 +98,8 @@ static void vm_hold_free_pages(struct bu
 		vm_offset_t to);
 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
 		vm_offset_t to);
-static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
+static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
+static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
 		vm_page_t m);
 static void vfs_clean_pages(struct buf *bp);
 static void vfs_setdirty(struct buf *bp);
@@ -3277,7 +3278,6 @@ bufdone_finish(struct buf *bp)
 		vm_object_t obj;
 		int iosize;
 		struct vnode *vp = bp->b_vp;
-		boolean_t are_queues_locked;
 
 		obj = bp->b_bufobj->bo_object;
 
@@ -3314,11 +3314,6 @@ bufdone_finish(struct buf *bp)
 		    !(bp->b_ioflags & BIO_ERROR)) {
 			bp->b_flags |= B_CACHE;
 		}
-		if (bp->b_iocmd == BIO_READ) {
-			vm_page_lock_queues();
-			are_queues_locked = TRUE;
-		} else
-			are_queues_locked = FALSE;
 		for (i = 0; i < bp->b_npages; i++) {
 			int bogusflag = 0;
 			int resid;
@@ -3354,6 +3349,9 @@ bufdone_finish(struct buf *bp)
 			 * only need to do this here in the read case.
 			 */
 			if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
+				KASSERT((m->dirty & vm_page_bits(foff &
+				    PAGE_MASK, resid)) == 0, ("bufdone_finish:"
+				    " page %p has unexpected dirty bits", m));
 				vfs_page_set_valid(bp, foff, m);
 			}
 
@@ -3387,8 +3385,6 @@ bufdone_finish(struct buf *bp)
 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 			iosize -= resid;
 		}
-		if (are_queues_locked)
-			vm_page_unlock_queues();
 		vm_object_pip_wakeupn(obj, 0);
 		VM_OBJECT_UNLOCK(obj);
 	}
@@ -3454,6 +3450,35 @@ vfs_unbusy_pages(struct buf *bp)
 static void
 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
 {
+	vm_ooffset_t eoff;
+
+	/*
+	 * Compute the end offset, eoff, such that [off, eoff) does not span a
+	 * page boundary and eoff is not greater than the end of the buffer.
+	 * The end of the buffer, in this case, is our file EOF, not the
+	 * allocation size of the buffer.
+	 */
+	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
+	if (eoff > bp->b_offset + bp->b_bcount)
+		eoff = bp->b_offset + bp->b_bcount;
+
+	/*
+	 * Set valid range.  This is typically the entire buffer and thus the
+	 * entire page.
+	 */
+	if (eoff > off)
+		vm_page_set_valid(m, off & PAGE_MASK, eoff - off);
+}
+
+/*
+ * vfs_page_set_validclean:
+ *
+ *	Set the valid bits and clear the dirty bits in a page based on the
+ *	supplied offset.   The range is restricted to the buffer's size.
+ */
+static void
+vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
+{
 	vm_ooffset_t soff, eoff;
 
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -3545,7 +3570,7 @@ retry:
 		 */
 		if (clear_modify) {
 			pmap_remove_write(m);
-			vfs_page_set_valid(bp, foff, m);
+			vfs_page_set_validclean(bp, foff, m);
 		} else if (m->valid == VM_PAGE_BITS_ALL &&
 		    (bp->b_flags & B_CACHE) == 0) {
 			bp->b_pages[i] = bogus_page;
@@ -3591,7 +3616,7 @@ vfs_clean_pages(struct buf *bp)
 
 		if (eoff > bp->b_offset + bp->b_bufsize)
 			eoff = bp->b_offset + bp->b_bufsize;
-		vfs_page_set_valid(bp, foff, m);
+		vfs_page_set_validclean(bp, foff, m);
 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 		foff = noff;
 	}

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Wed May 13 02:55:21 2009	(r192033)
+++ head/sys/vm/vm_page.c	Wed May 13 05:39:39 2009	(r192034)
@@ -1852,6 +1852,51 @@ vm_page_bits(int base, int size)
 }
 
 /*
+ *	vm_page_set_valid:
+ *
+ *	Sets portions of a page valid.  The arguments are expected
+ *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
+ *	of any partial chunks touched by the range.  The invalid portion of
+ *	such chunks will be zeroed.
+ *
+ *	(base + size) must be less then or equal to PAGE_SIZE.
+ */
+void
+vm_page_set_valid(vm_page_t m, int base, int size)
+{
+	int endoff, frag;
+
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+	if (size == 0)	/* handle degenerate case */
+		return;
+
+	/*
+	 * If the base is not DEV_BSIZE aligned and the valid
+	 * bit is clear, we have to zero out a portion of the
+	 * first block.
+	 */
+	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
+	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
+		pmap_zero_page_area(m, frag, base - frag);
+
+	/*
+	 * If the ending offset is not DEV_BSIZE aligned and the 
+	 * valid bit is clear, we have to zero out a portion of
+	 * the last block.
+	 */
+	endoff = base + size;
+	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
+	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
+		pmap_zero_page_area(m, endoff,
+		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
+
+	/*
+	 * Set valid bits inclusive of any overlap.
+	 */
+	m->valid |= vm_page_bits(base, size);
+}
+
+/*
  *	vm_page_set_validclean:
  *
  *	Sets portions of a page valid and clean.  The arguments are expected

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Wed May 13 02:55:21 2009	(r192033)
+++ head/sys/vm/vm_page.h	Wed May 13 05:39:39 2009	(r192034)
@@ -321,6 +321,7 @@ vm_page_t vm_page_lookup (vm_object_t, v
 void vm_page_remove (vm_page_t);
 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
 void vm_page_requeue(vm_page_t m);
+void vm_page_set_valid(vm_page_t m, int base, int size);
 void vm_page_sleep(vm_page_t m, const char *msg);
 vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
 vm_offset_t vm_page_startup(vm_offset_t vaddr);


More information about the svn-src-head mailing list