svn commit: r223677 - in head/sys: amd64/amd64 arm/arm fs/tmpfs i386/i386 i386/xen ia64/ia64 kern mips/mips ufs/ffs vm

Alan Cox alc at FreeBSD.org
Wed Jun 29 16:40:41 UTC 2011


Author: alc
Date: Wed Jun 29 16:40:41 2011
New Revision: 223677
URL: http://svn.freebsd.org/changeset/base/223677

Log:
  Add a new option, OBJPR_NOTMAPPED, to vm_object_page_remove().  Passing this
  option to vm_object_page_remove() asserts that the specified range of pages
  is not mapped, or more precisely that none of these pages have any managed
  mappings.  Thus, vm_object_page_remove() need not call pmap_remove_all() on
  the pages.
  
  This change not only saves time by eliminating pointless calls to
  pmap_remove_all(), but it also eliminates an inconsistency in the use of
  pmap_remove_all() versus related functions, like pmap_remove_write().  It
  eliminates harmless but pointless calls to pmap_remove_all() that were being
  performed on PG_UNMANAGED pages.
  
  Update all of the existing assertions on pmap_remove_all() to reflect this
  change.
  
  Reviewed by:	kib

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/arm/arm/pmap.c
  head/sys/fs/tmpfs/tmpfs_subr.c
  head/sys/i386/i386/pmap.c
  head/sys/i386/xen/pmap.c
  head/sys/ia64/ia64/pmap.c
  head/sys/kern/uipc_shm.c
  head/sys/kern/vfs_subr.c
  head/sys/mips/mips/pmap.c
  head/sys/ufs/ffs/ffs_inode.c
  head/sys/vm/vm_map.c
  head/sys/vm/vm_object.c
  head/sys/vm/vm_object.h
  head/sys/vm/vnode_pager.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/amd64/amd64/pmap.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -2847,8 +2847,8 @@ pmap_remove_all(vm_page_t m)
 	vm_offset_t va;
 	vm_page_t free;
 
-	KASSERT((m->flags & PG_FICTITIOUS) == 0,
-	    ("pmap_remove_all: page %p is fictitious", m));
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_remove_all: page %p is not managed", m));
 	free = NULL;
 	vm_page_lock_queues();
 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));

Modified: head/sys/arm/arm/pmap.c
==============================================================================
--- head/sys/arm/arm/pmap.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/arm/arm/pmap.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -3120,8 +3120,8 @@ pmap_remove_all(vm_page_t m)
 	pmap_t curpm;
 	int flags = 0;
 
-	KASSERT((m->flags & PG_FICTITIOUS) == 0,
-	    ("pmap_remove_all: page %p is fictitious", m));
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_remove_all: page %p is not managed", m));
 	if (TAILQ_EMPTY(&m->md.pv_list))
 		return;
 	vm_page_lock_queues();

Modified: head/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- head/sys/fs/tmpfs/tmpfs_subr.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/fs/tmpfs/tmpfs_subr.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -926,7 +926,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t
 		if (newpages < oldpages) {
 			swap_pager_freespace(uobj, newpages, oldpages -
 			    newpages);
-			vm_object_page_remove(uobj, newpages, 0, FALSE);
+			vm_object_page_remove(uobj, newpages, 0, 0);
 		}
 
 		/*

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/i386/i386/pmap.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -2927,8 +2927,8 @@ pmap_remove_all(vm_page_t m)
 	vm_offset_t va;
 	vm_page_t free;
 
-	KASSERT((m->flags & PG_FICTITIOUS) == 0,
-	    ("pmap_remove_all: page %p is fictitious", m));
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_remove_all: page %p is not managed", m));
 	free = NULL;
 	vm_page_lock_queues();
 	sched_pin();

Modified: head/sys/i386/xen/pmap.c
==============================================================================
--- head/sys/i386/xen/pmap.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/i386/xen/pmap.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -2421,8 +2421,8 @@ pmap_remove_all(vm_page_t m)
 	pt_entry_t *pte, tpte;
 	vm_page_t free;
 
-	KASSERT((m->flags & PG_FICTITIOUS) == 0,
-	    ("pmap_remove_all: page %p is fictitious", m));
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_remove_all: page %p is not managed", m));
 	free = NULL;
 	vm_page_lock_queues();
 	sched_pin();

Modified: head/sys/ia64/ia64/pmap.c
==============================================================================
--- head/sys/ia64/ia64/pmap.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/ia64/ia64/pmap.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -1424,8 +1424,8 @@ pmap_remove_all(vm_page_t m)
 	pmap_t oldpmap;
 	pv_entry_t pv;
 
-	KASSERT((m->flags & PG_FICTITIOUS) == 0,
-	    ("pmap_remove_all: page %p is fictitious", m));
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_remove_all: page %p is not managed", m));
 	vm_page_lock_queues();
 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 		struct ia64_lpte *pte;

Modified: head/sys/kern/uipc_shm.c
==============================================================================
--- head/sys/kern/uipc_shm.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/kern/uipc_shm.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -264,7 +264,7 @@ shm_dotruncate(struct shmfd *shmfd, off_
 		/* Toss in memory pages. */
 		if (nobjsize < object->size)
 			vm_object_page_remove(object, nobjsize, object->size,
-			    FALSE);
+			    0);
 
 		/* Toss pages from swap. */
 		if (object->type == OBJT_SWAP)

Modified: head/sys/kern/vfs_subr.c
==============================================================================
--- head/sys/kern/vfs_subr.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/kern/vfs_subr.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -1190,8 +1190,8 @@ bufobj_invalbuf(struct bufobj *bo, int f
 	 */
 	if (bo->bo_object != NULL && (flags & (V_ALT | V_NORMAL)) == 0) {
 		VM_OBJECT_LOCK(bo->bo_object);
-		vm_object_page_remove(bo->bo_object, 0, 0,
-			(flags & V_SAVE) ? TRUE : FALSE);
+		vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
+		    OBJPR_CLEANONLY : 0);
 		VM_OBJECT_UNLOCK(bo->bo_object);
 	}
 

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/mips/mips/pmap.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -1711,8 +1711,8 @@ pmap_remove_all(vm_page_t m)
 	pv_entry_t pv;
 	pt_entry_t *pte, tpte;
 
-	KASSERT((m->flags & PG_FICTITIOUS) == 0,
-	    ("pmap_remove_all: page %p is fictitious", m));
+	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+	    ("pmap_remove_all: page %p is not managed", m));
 	vm_page_lock_queues();
 
 	if (m->md.pv_flags & PV_TABLE_REF)

Modified: head/sys/ufs/ffs/ffs_inode.c
==============================================================================
--- head/sys/ufs/ffs/ffs_inode.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/ufs/ffs/ffs_inode.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -128,7 +128,7 @@ ffs_pages_remove(struct vnode *vp, vm_pi
 	if ((object = vp->v_object) == NULL)
 		return;
 	VM_OBJECT_LOCK(object);
-	vm_object_page_remove(object, start, end, FALSE);
+	vm_object_page_remove(object, start, end, 0);
 	VM_OBJECT_UNLOCK(object);
 }
 

Modified: head/sys/vm/vm_map.c
==============================================================================
--- head/sys/vm/vm_map.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/vm/vm_map.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -2708,7 +2708,15 @@ vm_map_entry_delete(vm_map_t map, vm_map
 		    ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
 		    object == kernel_object || object == kmem_object)) {
 			vm_object_collapse(object);
-			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
+
+			/*
+			 * The option OBJPR_NOTMAPPED can be passed here
+			 * because vm_map_delete() already performed
+			 * pmap_remove() on the only mapping to this range
+			 * of pages. 
+			 */
+			vm_object_page_remove(object, offidxstart, offidxend,
+			    OBJPR_NOTMAPPED);
 			if (object->type == OBJT_SWAP)
 				swap_pager_freespace(object, offidxstart, count);
 			if (offidxend >= object->size &&

Modified: head/sys/vm/vm_object.c
==============================================================================
--- head/sys/vm/vm_object.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/vm/vm_object.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -923,6 +923,10 @@ vm_object_page_collect_flush(vm_object_t
  * We invalidate (remove) all pages from the address space
  * for semantic correctness.
  *
+ * If the backing object is a device object with unmanaged pages, then any
+ * mappings to the specified range of pages must be removed before this
+ * function is called.
+ *
  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
  * may start out with a NULL object.
  */
@@ -978,12 +982,19 @@ vm_object_sync(vm_object_t object, vm_oo
 	}
 	if ((object->type == OBJT_VNODE ||
 	     object->type == OBJT_DEVICE) && invalidate) {
-		boolean_t purge;
-		purge = old_msync || (object->type == OBJT_DEVICE);
-		vm_object_page_remove(object,
-		    OFF_TO_IDX(offset),
-		    OFF_TO_IDX(offset + size + PAGE_MASK),
-		    purge ? FALSE : TRUE);
+		if (object->type == OBJT_DEVICE)
+			/*
+			 * The option OBJPR_NOTMAPPED must be passed here
+			 * because vm_object_page_remove() cannot remove
+			 * unmanaged mappings.
+			 */
+			flags = OBJPR_NOTMAPPED;
+		else if (old_msync)
+			flags = 0;
+		else
+			flags = OBJPR_CLEANONLY;
+		vm_object_page_remove(object, OFF_TO_IDX(offset),
+		    OFF_TO_IDX(offset + size + PAGE_MASK), flags);
 	}
 	VM_OBJECT_UNLOCK(object);
 }
@@ -1754,76 +1765,70 @@ vm_object_collapse(vm_object_t object)
  *	vm_object_page_remove:
  *
  *	For the given object, either frees or invalidates each of the
- *	specified pages.  In general, a page is freed.  However, if a
- *	page is wired for any reason other than the existence of a
- *	managed, wired mapping, then it may be invalidated but not
- *	removed from the object.  Pages are specified by the given
- *	range ["start", "end") and Boolean "clean_only".  As a
- *	special case, if "end" is zero, then the range extends from
- *	"start" to the end of the object.  If "clean_only" is TRUE,
- *	then only the non-dirty pages within the specified range are
- *	affected.
- *
- *	In general, this operation should only be performed on objects
- *	that contain managed pages.  There are two exceptions.  First,
- *	it may be performed on the kernel and kmem objects.  Second,
- *	it may be used by msync(..., MS_INVALIDATE) to invalidate
- *	device-backed pages.  In both of these cases, "clean_only"
- *	must be FALSE.
+ *	specified pages.  In general, a page is freed.  However, if a page is
+ *	wired for any reason other than the existence of a managed, wired
+ *	mapping, then it may be invalidated but not removed from the object.
+ *	Pages are specified by the given range ["start", "end") and the option
+ *	OBJPR_CLEANONLY.  As a special case, if "end" is zero, then the range
+ *	extends from "start" to the end of the object.  If the option
+ *	OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
+ *	specified range are affected.  If the option OBJPR_NOTMAPPED is
+ *	specified, then the pages within the specified range must have no
+ *	mappings.  Otherwise, if this option is not specified, any mappings to
+ *	the specified pages are removed before the pages are freed or
+ *	invalidated.
+ *
+ *	In general, this operation should only be performed on objects that
+ *	contain managed pages.  There are, however, two exceptions.  First, it
+ *	is performed on the kernel and kmem objects by vm_map_entry_delete().
+ *	Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
+ *	backed pages.  In both of these cases, the option OBJPR_CLEANONLY must
+ *	not be specified and the option OBJPR_NOTMAPPED must be specified.
  *
  *	The object must be locked.
  */
 void
 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
-    boolean_t clean_only)
+    int options)
 {
 	vm_page_t p, next;
 	int wirings;
 
 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+	KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_PHYS) ||
+	    (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
+	    ("vm_object_page_remove: illegal options for object %p", object));
 	if (object->resident_page_count == 0)
 		goto skipmemq;
-
-	/*
-	 * Since physically-backed objects do not use managed pages, we can't
-	 * remove pages from the object (we must instead remove the page
-	 * references, and then destroy the object).
-	 */
-	KASSERT(object->type != OBJT_PHYS || object == kernel_object ||
-	    object == kmem_object,
-	    ("attempt to remove pages from a physical object"));
-
 	vm_object_pip_add(object, 1);
 again:
 	p = vm_page_find_least(object, start);
 
 	/*
-	 * Assert: the variable p is either (1) the page with the
-	 * least pindex greater than or equal to the parameter pindex
-	 * or (2) NULL.
+	 * Here, the variable "p" is either (1) the page with the least pindex
+	 * greater than or equal to the parameter "start" or (2) NULL. 
 	 */
-	for (;
-	     p != NULL && (p->pindex < end || end == 0);
-	     p = next) {
+	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
 		next = TAILQ_NEXT(p, listq);
 
 		/*
-		 * If the page is wired for any reason besides the
-		 * existence of managed, wired mappings, then it cannot
-		 * be freed.  For example, fictitious pages, which
-		 * represent device memory, are inherently wired and
-		 * cannot be freed.  They can, however, be invalidated
-		 * if "clean_only" is FALSE.
+		 * If the page is wired for any reason besides the existence
+		 * of managed, wired mappings, then it cannot be freed.  For
+		 * example, fictitious pages, which represent device memory,
+		 * are inherently wired and cannot be freed.  They can,
+		 * however, be invalidated if the option OBJPR_CLEANONLY is
+		 * not specified.
 		 */
 		vm_page_lock(p);
 		if ((wirings = p->wire_count) != 0 &&
 		    (wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
-			/* Fictitious pages do not have managed mappings. */
-			if ((p->flags & PG_FICTITIOUS) == 0)
+			if ((options & OBJPR_NOTMAPPED) == 0) {
 				pmap_remove_all(p);
-			/* Account for removal of managed, wired mappings. */
-			p->wire_count -= wirings;
-			if (!clean_only) {
+				/* Account for removal of wired mappings. */
+				if (wirings != 0)
+					p->wire_count -= wirings;
+			}
+			if ((options & OBJPR_CLEANONLY) == 0) {
 				p->valid = 0;
 				vm_page_undirty(p);
 			}
@@ -1834,17 +1839,20 @@ again:
 			goto again;
 		KASSERT((p->flags & PG_FICTITIOUS) == 0,
 		    ("vm_object_page_remove: page %p is fictitious", p));
-		if (clean_only && p->valid) {
-			pmap_remove_write(p);
+		if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) {
+			if ((options & OBJPR_NOTMAPPED) == 0)
+				pmap_remove_write(p);
 			if (p->dirty) {
 				vm_page_unlock(p);
 				continue;
 			}
 		}
-		pmap_remove_all(p);
-		/* Account for removal of managed, wired mappings. */
-		if (wirings != 0)
-			p->wire_count -= wirings;
+		if ((options & OBJPR_NOTMAPPED) == 0) {
+			pmap_remove_all(p);
+			/* Account for removal of wired mappings. */
+			if (wirings != 0)
+				p->wire_count -= wirings;
+		}
 		vm_page_free(p);
 		vm_page_unlock(p);
 	}
@@ -1991,9 +1999,8 @@ vm_object_coalesce(vm_object_t prev_obje
 	 * deallocation.
 	 */
 	if (next_pindex < prev_object->size) {
-		vm_object_page_remove(prev_object,
-				      next_pindex,
-				      next_pindex + next_size, FALSE);
+		vm_object_page_remove(prev_object, next_pindex, next_pindex +
+		    next_size, 0);
 		if (prev_object->type == OBJT_SWAP)
 			swap_pager_freespace(prev_object,
 					     next_pindex, next_size);

Modified: head/sys/vm/vm_object.h
==============================================================================
--- head/sys/vm/vm_object.h	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/vm/vm_object.h	Wed Jun 29 16:40:41 2011	(r223677)
@@ -168,6 +168,12 @@ struct vm_object {
 #define OBJPC_INVAL	0x2			/* invalidate */
 #define OBJPC_NOSYNC	0x4			/* skip if PG_NOSYNC */
 
+/*
+ * The following options are supported by vm_object_page_remove().
+ */
+#define	OBJPR_CLEANONLY	0x1		/* Don't remove dirty pages. */
+#define	OBJPR_NOTMAPPED	0x2		/* Don't unmap pages. */
+
 TAILQ_HEAD(object_q, vm_object);
 
 extern struct object_q vm_object_list;	/* list of allocated objects */
@@ -219,7 +225,8 @@ void vm_object_set_writeable_dirty (vm_o
 void vm_object_init (void);
 void vm_object_page_clean(vm_object_t object, vm_ooffset_t start,
     vm_ooffset_t end, int flags);
-void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
+void vm_object_page_remove(vm_object_t object, vm_pindex_t start,
+    vm_pindex_t end, int options);
 boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t);
 void vm_object_print(long addr, boolean_t have_addr, long count, char *modif);
 void vm_object_reference (vm_object_t);

Modified: head/sys/vm/vnode_pager.c
==============================================================================
--- head/sys/vm/vnode_pager.c	Wed Jun 29 16:20:52 2011	(r223676)
+++ head/sys/vm/vnode_pager.c	Wed Jun 29 16:40:41 2011	(r223677)
@@ -387,7 +387,7 @@ vnode_pager_setsize(vp, nsize)
 		 */
 		if (nobjsize < object->size)
 			vm_object_page_remove(object, nobjsize, object->size,
-			    FALSE);
+			    0);
 		/*
 		 * this gets rid of garbage at the end of a page that is now
 		 * only partially backed by the vnode.


More information about the svn-src-head mailing list