testing new pmap code

Alan Cox alc at rice.edu
Wed Jul 23 16:27:58 UTC 2014


Folks,

About a week ago I committed some new pmap code on all architectures,
including arm.  However, that code isn't "active" until the attached
patch is committed.  Before I commit this patch, I'd like to ask people
to test it on arm "classic" and especially v6.  (The v6 pmap changes
were more complicated.)

Here is how to test it.  Before you apply the kernel patch, compile and
run the attached program, mlockall2A.c, in a loop.  Something like this:

# while ( 1 )
while? ./mlockall2A
while? sysctl vm.stats.vm.v_wire_count
while? sleep 7
while? end

You should see the wire count steadily increase, because there is a
wired page leak.

Now, apply the kernel patch, and repeat the same test.  This time, the
wire count should stabilize.

If you're testing on arm v6, substitute

sysctl vm.stats.vm.v_wire_count vm.pmap.section

for

sysctl vm.stats.vm.v_wire_count

so that I can verify that we're handling superpages correctly.

Thanks,
Alan

-------------- next part --------------
Index: vm/vm_extern.h
===================================================================
--- vm/vm_extern.h	(revision 268597)
+++ vm/vm_extern.h	(working copy)
@@ -81,7 +81,6 @@ int vm_fault_hold(vm_map_t map, vm_offset_t vaddr,
     int fault_flags, vm_page_t *m_hold);
 int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
     vm_prot_t prot, vm_page_t *ma, int max_count);
-void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
 int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
 int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
 void vm_waitproc(struct proc *);
Index: vm/vm_fault.c
===================================================================
--- vm/vm_fault.c	(revision 268597)
+++ vm/vm_fault.c	(working copy)
@@ -106,6 +106,7 @@ __FBSDID("$FreeBSD$");
 #define PFFOR 4
 
 static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
+static void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
 
 #define	VM_FAULT_READ_BEHIND	8
 #define	VM_FAULT_READ_MAX	(1 + VM_FAULT_READ_AHEAD_MAX)
@@ -1186,7 +1187,7 @@ vm_fault_wire(vm_map_t map, vm_offset_t start, vm_
  *
  *	Unwire a range of virtual addresses in a map.
  */
-void
+static void
 vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
     boolean_t fictitious)
 {
Index: vm/vm_map.c
===================================================================
--- vm/vm_map.c	(revision 268597)
+++ vm/vm_map.c	(working copy)
@@ -132,6 +132,7 @@ static void _vm_map_init(vm_map_t map, pmap_t pmap
     vm_offset_t max);
 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
+static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
 #ifdef INVARIANTS
 static void vm_map_zdtor(void *mem, int size, void *arg);
 static void vmspace_zdtor(void *mem, int size, void *arg);
@@ -2393,16 +2394,10 @@ done:
 		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
 			if (user_unwire)
 				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
-			entry->wired_count--;
-			if (entry->wired_count == 0) {
-				/*
-				 * Retain the map lock.
-				 */
-				vm_fault_unwire(map, entry->start, entry->end,
-				    entry->object.vm_object != NULL &&
-				    (entry->object.vm_object->flags &
-				    OBJ_FICTITIOUS) != 0);
-			}
+			if (entry->wired_count == 1)
+				vm_map_entry_unwire(map, entry);
+			else
+				entry->wired_count--;
 		}
 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
 		    ("vm_map_unwire: in-transition flag missing %p", entry));
@@ -2635,19 +2630,12 @@ done:
 			 * unnecessary.
 			 */
 			entry->wired_count = 0;
-		} else {
-			if (!user_wire ||
-			    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
+		} else if (!user_wire ||
+		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
+			if (entry->wired_count == 1)
+				vm_map_entry_unwire(map, entry);
+			else
 				entry->wired_count--;
-			if (entry->wired_count == 0) {
-				/*
-				 * Retain the map lock.
-				 */
-				vm_fault_unwire(map, entry->start, entry->end,
-				    entry->object.vm_object != NULL &&
-				    (entry->object.vm_object->flags &
-				    OBJ_FICTITIOUS) != 0);
-			}
 		}
 	next_entry_done:
 		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
@@ -2783,9 +2771,11 @@ vm_map_sync(
 static void
 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
 {
-	vm_fault_unwire(map, entry->start, entry->end,
-	    entry->object.vm_object != NULL &&
-	    (entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0);
+
+	VM_MAP_ASSERT_LOCKED(map);
+	pmap_unwire(map->pmap, entry->start, entry->end);
+	vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
+	    entry->start, PQ_ACTIVE);
 	entry->wired_count = 0;
 }
 
Index: vm/vm_object.c
===================================================================
--- vm/vm_object.c	(revision 268597)
+++ vm/vm_object.c	(working copy)
@@ -2200,6 +2200,78 @@ vm_object_set_writeable_dirty(vm_object_t object)
 	vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
 }
 
+/*
+ *	vm_object_unwire:
+ *
+ *	For each page offset within the specified range of the given object,
+ *	find the highest-level page in the shadow chain and unwire it.  A page
+ *	must exist at every page offset, and the highest-level page must be
+ *	wired.
+ */
+void
+vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
+    uint8_t queue)
+{
+	vm_object_t tobject;
+	vm_page_t m, tm;
+	vm_pindex_t end_pindex, pindex, tpindex;
+	int depth, locked_depth;
+
+	KASSERT((offset & PAGE_MASK) == 0,
+	    ("vm_object_unwire: offset is not page aligned"));
+	KASSERT((length & PAGE_MASK) == 0,
+	    ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
+	/* The wired count of a fictitious page never changes. */
+	if ((object->flags & OBJ_FICTITIOUS) != 0)
+		return;
+	pindex = OFF_TO_IDX(offset);
+	end_pindex = pindex + atop(length);
+	locked_depth = 1;
+	VM_OBJECT_RLOCK(object);
+	m = vm_page_find_least(object, pindex);
+	while (pindex < end_pindex) {
+		if (m == NULL || pindex < m->pindex) {
+			/*
+			 * The first object in the shadow chain doesn't
+			 * contain a page at the current index.  Therefore,
+			 * the page must exist in a backing object.
+			 */
+			tobject = object;
+			tpindex = pindex;
+			depth = 0;
+			do {
+				tpindex +=
+				    OFF_TO_IDX(tobject->backing_object_offset);
+				tobject = tobject->backing_object;
+				KASSERT(tobject != NULL,
+				    ("vm_object_unwire: missing page"));
+				if ((tobject->flags & OBJ_FICTITIOUS) != 0)
+					goto next_page;
+				depth++;
+				if (depth == locked_depth) {
+					locked_depth++;
+					VM_OBJECT_RLOCK(tobject);
+				}
+			} while ((tm = vm_page_lookup(tobject, tpindex)) ==
+			    NULL);
+		} else {
+			tm = m;
+			m = TAILQ_NEXT(m, listq);
+		}
+		vm_page_lock(tm);
+		vm_page_unwire(tm, queue);
+		vm_page_unlock(tm);
+next_page:
+		pindex++;
+	}
+	/* Release the accumulated object locks. */
+	for (depth = 0; depth < locked_depth; depth++) {
+		tobject = object->backing_object;
+		VM_OBJECT_RUNLOCK(object);
+		object = tobject;
+	}
+}
+
 #include "opt_ddb.h"
 #ifdef DDB
 #include <sys/kernel.h>
Index: vm/vm_object.h
===================================================================
--- vm/vm_object.h	(revision 268597)
+++ vm/vm_object.h	(working copy)
@@ -290,6 +290,8 @@ void vm_object_shadow (vm_object_t *, vm_ooffset_t
 void vm_object_split(vm_map_entry_t);
 boolean_t vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
     boolean_t);
+void vm_object_unwire(vm_object_t object, vm_ooffset_t offset,
+    vm_size_t length, uint8_t queue);
 #endif				/* _KERNEL */
 
 #endif				/* _VM_OBJECT_ */
-------------- next part --------------
#include <sys/mman.h>

#include <err.h>
#include <stdlib.h>

int
main(void)
{
	size_t pagesize[2];
	void *p;
	int flags = MAP_ANON | MAP_PRIVATE, sizes;

	if ((sizes = getpagesizes(pagesize, 2)) == -1)
		err(1, "getpagesizes");

	if (sizes < 2) {
		pagesize[1] = 4 * 1024 * 1024;
		warnx("Faking pagesize[1] as %zu", pagesize[1]);
	}

	if (mlockall(MCL_CURRENT | MCL_FUTURE) == -1)
		err(1, "mlock");

	if ((p = mmap(NULL, 3 * pagesize[1], PROT_READ | PROT_WRITE, flags, -1,
	    0)) == MAP_FAILED)
		err(1, "mmap");

	if (mprotect(p, pagesize[1] + pagesize[0], PROT_NONE) == -1)
		err(1, "mprotect");

	return (0);
}


More information about the freebsd-arm mailing list