svn commit: r251717 - in user/attilio/vmobj-readlock/sys: kern vm

Attilio Rao attilio at FreeBSD.org
Thu Jun 13 22:26:47 UTC 2013


Author: attilio
Date: Thu Jun 13 22:26:45 2013
New Revision: 251717
URL: http://svnweb.freebsd.org/changeset/base/251717

Log:
  Modify vm_fault_hold() into vm_fault_handle() and give the possibility
  to operate other type of page manipulations when it is faulted in,
  like soft busy, via a specific flag.
  
  Use vm_fault_handle() and the busy op on proc_rwmem().
  
  Sponsored by:	EMC / Isilon storage division

Modified:
  user/attilio/vmobj-readlock/sys/kern/sys_process.c
  user/attilio/vmobj-readlock/sys/vm/vm_extern.h
  user/attilio/vmobj-readlock/sys/vm/vm_fault.c
  user/attilio/vmobj-readlock/sys/vm/vm_map.h

Modified: user/attilio/vmobj-readlock/sys/kern/sys_process.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/kern/sys_process.c	Thu Jun 13 22:24:48 2013	(r251716)
+++ user/attilio/vmobj-readlock/sys/kern/sys_process.c	Thu Jun 13 22:26:45 2013	(r251717)
@@ -263,6 +263,7 @@ proc_rwmem(struct proc *p, struct uio *u
 	writing = uio->uio_rw == UIO_WRITE;
 	reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ;
 	fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL;
+	fault_flags |= VM_FAULT_IOBUSY;
 
 	/*
 	 * Only map in one page at a time.  We don't have to, but it
@@ -270,6 +271,7 @@ proc_rwmem(struct proc *p, struct uio *u
 	 */
 	do {
 		vm_offset_t uva;
+		vm_object_t obj;
 		u_int len;
 		vm_page_t m;
 
@@ -287,9 +289,9 @@ proc_rwmem(struct proc *p, struct uio *u
 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
 
 		/*
-		 * Fault and hold the page on behalf of the process.
+		 * Fault and busy the page on behalf of the process.
 		 */
-		error = vm_fault_hold(map, pageno, reqprot, fault_flags, &m);
+		error = vm_fault_handle(map, pageno, reqprot, fault_flags, &m);
 		if (error != KERN_SUCCESS) {
 			if (error == KERN_RESOURCE_SHORTAGE)
 				error = ENOMEM;
@@ -315,9 +317,9 @@ proc_rwmem(struct proc *p, struct uio *u
 		/*
 		 * Release the page.
 		 */
-		vm_page_lock(m);
-		vm_page_unhold(m);
-		vm_page_unlock(m);
+		VM_OBJECT_WLOCK(m->object);
+		vm_page_io_finish(m);
+		VM_OBJECT_WUNLOCK(m->object);
 
 	} while (error == 0 && uio->uio_resid > 0);
 

Modified: user/attilio/vmobj-readlock/sys/vm/vm_extern.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/vm/vm_extern.h	Thu Jun 13 22:24:48 2013	(r251716)
+++ user/attilio/vmobj-readlock/sys/vm/vm_extern.h	Thu Jun 13 22:26:45 2013	(r251717)
@@ -63,7 +63,7 @@ void vm_fault_copy_entry(vm_map_t, vm_ma
     vm_ooffset_t *);
 int vm_fault_disable_pagefaults(void);
 void vm_fault_enable_pagefaults(int save);
-int vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
+int vm_fault_handle(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
     int fault_flags, vm_page_t *m_hold);
 int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
     vm_prot_t prot, vm_page_t *ma, int max_count);

Modified: user/attilio/vmobj-readlock/sys/vm/vm_fault.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/vm/vm_fault.c	Thu Jun 13 22:24:48 2013	(r251716)
+++ user/attilio/vmobj-readlock/sys/vm/vm_fault.c	Thu Jun 13 22:26:45 2013	(r251717)
@@ -221,8 +221,8 @@ vm_fault(vm_map_t map, vm_offset_t vaddr
 	if (map != kernel_map && KTRPOINT(td, KTR_FAULT))
 		ktrfault(vaddr, fault_type);
 #endif
-	result = vm_fault_hold(map, trunc_page(vaddr), fault_type, fault_flags,
-	    NULL);
+	result = vm_fault_handle(map, trunc_page(vaddr), fault_type,
+	    fault_flags, NULL);
 #ifdef KTRACE
 	if (map != kernel_map && KTRPOINT(td, KTR_FAULTEND))
 		ktrfaultend(result);
@@ -231,7 +231,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr
 }
 
 int
-vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
+vm_fault_handle(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
     int fault_flags, vm_page_t *m_hold)
 {
 	vm_prot_t prot;
@@ -929,7 +929,10 @@ vnode_locked:
 		vm_page_activate(fs.m);
 	if (m_hold != NULL) {
 		*m_hold = fs.m;
-		vm_page_hold(fs.m);
+		if (fault_flags & VM_FAULT_IOBUSY)
+			vm_page_io_start(fs.m);
+		else
+			vm_page_hold(fs.m);
 	}
 	vm_page_wakeup_locked(fs.m);
 	vm_page_unlock(fs.m);
@@ -1131,7 +1134,7 @@ vm_fault_quick_hold_pages(vm_map_t map, 
 		 * and hold these pages.
 		 */
 		for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE)
-			if (*mp == NULL && vm_fault_hold(map, va, prot,
+			if (*mp == NULL && vm_fault_handle(map, va, prot,
 			    VM_FAULT_NORMAL, mp) != KERN_SUCCESS)
 				goto error;
 	}

Modified: user/attilio/vmobj-readlock/sys/vm/vm_map.h
==============================================================================
--- user/attilio/vmobj-readlock/sys/vm/vm_map.h	Thu Jun 13 22:24:48 2013	(r251716)
+++ user/attilio/vmobj-readlock/sys/vm/vm_map.h	Thu Jun 13 22:26:45 2013	(r251717)
@@ -328,6 +328,7 @@ long vmspace_resident_count(struct vmspa
 #define VM_FAULT_NORMAL 0		/* Nothing special */
 #define VM_FAULT_CHANGE_WIRING 1	/* Change the wiring as appropriate */
 #define	VM_FAULT_DIRTY 2		/* Dirty the page; use w/VM_PROT_COPY */
+#define	VM_FAULT_IOBUSY 4		/* Busy the faulted page */
 
 /*
  * Initially, mappings are slightly sequential.  The maximum window size must


More information about the svn-src-user mailing list