svn commit: r270011 - head/sys/vm

Konstantin Belousov kib at FreeBSD.org
Fri Aug 15 07:30:14 UTC 2014


Author: kib
Date: Fri Aug 15 07:30:14 2014
New Revision: 270011
URL: http://svnweb.freebsd.org/changeset/base/270011

Log:
  Implement 'fast path' for the vm page fault handler.  Or, it could be
  called a scalable path.  When several preconditions hold, the vm
  object lock for the object containing the faulted page is taken in
  read mode, instead of write, which allows parallel faults processing
  in the region.
  
  Namely, the fast path is taken when the faulted page already exists
  and does not need copy on write, is already fully valid, and not busy.
  For technical reasons, fast path is avoided when the fault is the
  first write on the vnode object, or when the fault is for wiring or
  debugger read or write.
  
  On the fast path, pmap_enter(9) is passed the PMAP_ENTER_NOSLEEP flag,
  since object lock is kept.  Pmap might fail to create the entry, in
  which case the fallback to slow path is performed.
  
  Reviewed by:	alc
  Tested by:	pho (previous version)
  Hardware provided and hosted by:	The FreeBSD Foundation and
  	 Sentex Data Communications
  Sponsored by:	The FreeBSD Foundation
  MFC after:	2 week

Modified:
  head/sys/vm/vm_fault.c

Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c	Fri Aug 15 04:35:34 2014	(r270010)
+++ head/sys/vm/vm_fault.c	Fri Aug 15 07:30:14 2014	(r270011)
@@ -237,6 +237,7 @@ vm_fault_hold(vm_map_t map, vm_offset_t 
 	int hardfault;
 	struct faultstate fs;
 	struct vnode *vp;
+	vm_page_t m;
 	int locked, error;
 
 	hardfault = 0;
@@ -290,6 +291,55 @@ RetryFault:;
 		goto RetryFault;
 	}
 
+	if (wired)
+		fault_type = prot | (fault_type & VM_PROT_COPY);
+
+	if (fs.vp == NULL /* avoid locked vnode leak */ &&
+	    (fault_flags & (VM_FAULT_CHANGE_WIRING | VM_FAULT_DIRTY)) == 0 &&
+	    /* avoid calling vm_object_set_writeable_dirty() */
+	    ((prot & VM_PROT_WRITE) == 0 ||
+	    fs.first_object->type != OBJT_VNODE ||
+	    (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) {
+		VM_OBJECT_RLOCK(fs.first_object);
+		if ((prot & VM_PROT_WRITE) != 0 &&
+		    fs.first_object->type == OBJT_VNODE &&
+		    (fs.first_object->flags & OBJ_MIGHTBEDIRTY) == 0)
+			goto fast_failed;
+		m = vm_page_lookup(fs.first_object, fs.first_pindex);
+		if (m == NULL || vm_page_busied(m) ||
+		    m->valid != VM_PAGE_BITS_ALL)
+			goto fast_failed;
+		result = pmap_enter(fs.map->pmap, vaddr, m, prot,
+		   fault_type | PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED :
+		   0), 0);
+		if (result != KERN_SUCCESS)
+			goto fast_failed;
+		if (m_hold != NULL) {
+			*m_hold = m;
+			vm_page_lock(m);
+			vm_page_hold(m);
+			vm_page_unlock(m);
+		}
+		if ((fault_type & VM_PROT_WRITE) != 0 &&
+		    (m->oflags & VPO_UNMANAGED) == 0) {
+			vm_page_dirty(m);
+			vm_pager_page_unswapped(m);
+		}
+		VM_OBJECT_RUNLOCK(fs.first_object);
+		if (!wired)
+			vm_fault_prefault(&fs, vaddr, 0, 0);
+		vm_map_lookup_done(fs.map, fs.entry);
+		curthread->td_ru.ru_minflt++;
+		return (KERN_SUCCESS);
+fast_failed:
+		if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {
+			VM_OBJECT_RUNLOCK(fs.first_object);
+			VM_OBJECT_WLOCK(fs.first_object);
+		}
+	} else {
+		VM_OBJECT_WLOCK(fs.first_object);
+	}
+
 	/*
 	 * Make a reference to this object to prevent its disposal while we
 	 * are messing with it.  Once we have the reference, the map is free
@@ -300,15 +350,11 @@ RetryFault:;
 	 * truncation operations) during I/O.  This must be done after
 	 * obtaining the vnode lock in order to avoid possible deadlocks.
 	 */
-	VM_OBJECT_WLOCK(fs.first_object);
 	vm_object_reference_locked(fs.first_object);
 	vm_object_pip_add(fs.first_object, 1);
 
 	fs.lookup_still_valid = TRUE;
 
-	if (wired)
-		fault_type = prot | (fault_type & VM_PROT_COPY);
-
 	fs.first_m = NULL;
 
 	/*


More information about the svn-src-head mailing list