svn commit: r358451 - in head/sys: kern vm

Jeff Roberson jeff at FreeBSD.org
Fri Feb 28 21:42:50 UTC 2020


Author: jeff
Date: Fri Feb 28 21:42:48 2020
New Revision: 358451
URL: https://svnweb.freebsd.org/changeset/base/358451

Log:
  Provide a lock free alternative to resolve bogus pages.  This is not likely
  to be much of a perf win, just a nice code simplification.
  
  Reviewed by:	markj, kib
  Differential Revision:	https://reviews.freebsd.org/D23866

Modified:
  head/sys/kern/kern_sendfile.c
  head/sys/kern/vfs_bio.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h

Modified: head/sys/kern/kern_sendfile.c
==============================================================================
--- head/sys/kern/kern_sendfile.c	Fri Feb 28 21:31:40 2020	(r358450)
+++ head/sys/kern/kern_sendfile.c	Fri Feb 28 21:42:48 2020	(r358451)
@@ -350,7 +350,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 {
 	vm_page_t *pa = sfio->pa;
 	int grabbed;
-	bool locked;
 
 	*nios = 0;
 	flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0;
@@ -359,8 +358,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 	 * First grab all the pages and wire them.  Note that we grab
 	 * only required pages.  Readahead pages are dealt with later.
 	 */
-	locked = false;
-
 	grabbed = vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(off),
 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages);
 	if (grabbed < npages) {
@@ -381,10 +378,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 			i++;
 			continue;
 		}
-		if (!locked) {
-			VM_OBJECT_WLOCK(obj);
-			locked = true;
-		}
 
 		/*
 		 * Next page is invalid.  Check if it belongs to pager.  It
@@ -396,8 +389,10 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 		 * stored in 'a', about how many pages we can pagein after
 		 * this page in a single I/O.
 		 */
+		VM_OBJECT_RLOCK(obj);
 		if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
 		    &a)) {
+			VM_OBJECT_RUNLOCK(obj);
 			pmap_zero_page(pa[i]);
 			vm_page_valid(pa[i]);
 			MPASS(pa[i]->dirty == 0);
@@ -405,6 +400,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 			i++;
 			continue;
 		}
+		VM_OBJECT_RUNLOCK(obj);
 
 		/*
 		 * We want to pagein as many pages as possible, limited only
@@ -435,11 +431,9 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 			}
 
 		refcount_acquire(&sfio->nios);
-		VM_OBJECT_WUNLOCK(obj);
 		rv = vm_pager_get_pages_async(obj, pa + i, count, NULL,
 		    i + count == npages ? &rhpages : NULL,
 		    &sendfile_iodone, sfio);
-		VM_OBJECT_WLOCK(obj);
 		if (__predict_false(rv != VM_PAGER_OK)) {
 			/*
 			 * Perform full pages recovery before returning EIO.
@@ -451,7 +445,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 			for (j = 0; j < npages; j++) {
 				if (j > i && j < i + count - 1 &&
 				    pa[j] == bogus_page)
-					pa[j] = vm_page_lookup(obj,
+					pa[j] = vm_page_relookup(obj,
 					    OFF_TO_IDX(vmoff(j, off)));
 				else if (j >= i)
 					vm_page_xunbusy(pa[j]);
@@ -460,7 +454,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 				    __func__, pa, j));
 				vm_page_unwire(pa[j], PQ_INACTIVE);
 			}
-			VM_OBJECT_WUNLOCK(obj);
 			return (EIO);
 		}
 
@@ -475,7 +468,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 		 */
 		for (j = i + 1; j < i + count - 1; j++)
 			if (pa[j] == bogus_page) {
-				pa[j] = vm_page_lookup(obj,
+				pa[j] = vm_page_relookup(obj,
 				    OFF_TO_IDX(vmoff(j, off)));
 				KASSERT(pa[j], ("%s: page %p[%d] disappeared",
 				    __func__, pa, j));
@@ -484,9 +477,6 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 		i += count;
 		(*nios)++;
 	}
-
-	if (locked)
-		VM_OBJECT_WUNLOCK(obj);
 
 	if (*nios == 0 && npages != 0)
 		SFSTAT_INC(sf_noiocnt);

Modified: head/sys/kern/vfs_bio.c
==============================================================================
--- head/sys/kern/vfs_bio.c	Fri Feb 28 21:31:40 2020	(r358450)
+++ head/sys/kern/vfs_bio.c	Fri Feb 28 21:42:48 2020	(r358451)
@@ -2878,11 +2878,8 @@ vfs_vmio_iodone(struct buf *bp)
 		 */
 		m = bp->b_pages[i];
 		if (m == bogus_page) {
-			if (bogus == false) {
-				bogus = true;
-				VM_OBJECT_RLOCK(obj);
-			}
-			m = vm_page_lookup(obj, OFF_TO_IDX(foff));
+			bogus = true;
+			m = vm_page_relookup(obj, OFF_TO_IDX(foff));
 			if (m == NULL)
 				panic("biodone: page disappeared!");
 			bp->b_pages[i] = m;
@@ -2905,8 +2902,6 @@ vfs_vmio_iodone(struct buf *bp)
 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 		iosize -= resid;
 	}
-	if (bogus)
-		VM_OBJECT_RUNLOCK(obj);
 	vm_object_pip_wakeupn(obj, bp->b_npages);
 	if (bogus && buf_mapped(bp)) {
 		BUF_CHECK_MAPPED(bp);
@@ -4470,22 +4465,16 @@ vfs_unbusy_pages(struct buf *bp)
 	int i;
 	vm_object_t obj;
 	vm_page_t m;
-	bool bogus;
 
 	runningbufwakeup(bp);
 	if (!(bp->b_flags & B_VMIO))
 		return;
 
 	obj = bp->b_bufobj->bo_object;
-	bogus = false;
 	for (i = 0; i < bp->b_npages; i++) {
 		m = bp->b_pages[i];
 		if (m == bogus_page) {
-			if (bogus == false) {
-				bogus = true;
-				VM_OBJECT_RLOCK(obj);
-			}
-			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
+			m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i);
 			if (!m)
 				panic("vfs_unbusy_pages: page missing\n");
 			bp->b_pages[i] = m;
@@ -4498,8 +4487,6 @@ vfs_unbusy_pages(struct buf *bp)
 		}
 		vm_page_sunbusy(m);
 	}
-	if (bogus)
-		VM_OBJECT_RUNLOCK(obj);
 	vm_object_pip_wakeupn(obj, bp->b_npages);
 }
 

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Fri Feb 28 21:31:40 2020	(r358450)
+++ head/sys/vm/vm_page.c	Fri Feb 28 21:42:48 2020	(r358451)
@@ -1671,6 +1671,24 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
 }
 
 /*
+ *	vm_page_relookup:
+ *
+ *	Returns a page that must already have been busied by
+ *	the caller.  Used for bogus page replacement.
+ */
+vm_page_t
+vm_page_relookup(vm_object_t object, vm_pindex_t pindex)
+{
+	vm_page_t m;
+
+	m = vm_radix_lookup_unlocked(&object->rtree, pindex);
+	KASSERT(m != NULL && vm_page_busied(m) &&
+	    m->object == object && m->pindex == pindex,
+	    ("vm_page_relookup: Invalid page %p", m));
+	return (m);
+}
+
+/*
  * This should only be used by lockless functions for releasing transient
  * incorrect acquires.  The page may have been freed after we acquired a
  * busy lock.  In this case busy_lock == VPB_FREED and we have nothing

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Fri Feb 28 21:31:40 2020	(r358450)
+++ head/sys/vm/vm_page.h	Fri Feb 28 21:42:48 2020	(r358451)
@@ -653,6 +653,7 @@ void vm_page_reference(vm_page_t m);
 #define	VPR_NOREUSE	0x02
 void vm_page_release(vm_page_t m, int flags);
 void vm_page_release_locked(vm_page_t m, int flags);
+vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
 bool vm_page_remove(vm_page_t);
 bool vm_page_remove_xbusy(vm_page_t);
 int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);


More information about the svn-src-head mailing list