svn commit: r236763 - in user/attilio/vmc-playground/sys: kern vm

Attilio Rao attilio at FreeBSD.org
Fri Jun 8 18:44:55 UTC 2012


Author: attilio
Date: Fri Jun  8 18:44:54 2012
New Revision: 236763
URL: http://svn.freebsd.org/changeset/base/236763

Log:
  Revert r231027 and fix the prototype for vm_radix_remove().
  The target of this is getting at the point where the recovery path is
  completely removed as we could count on pre-allocation once the
  path compressed trie is implemented.

Modified:
  user/attilio/vmc-playground/sys/kern/subr_uio.c
  user/attilio/vmc-playground/sys/vm/device_pager.c
  user/attilio/vmc-playground/sys/vm/sg_pager.c
  user/attilio/vmc-playground/sys/vm/vm_fault.c
  user/attilio/vmc-playground/sys/vm/vm_object.c
  user/attilio/vmc-playground/sys/vm/vm_page.c
  user/attilio/vmc-playground/sys/vm/vm_page.h
  user/attilio/vmc-playground/sys/vm/vm_radix.c

Modified: user/attilio/vmc-playground/sys/kern/subr_uio.c
==============================================================================
--- user/attilio/vmc-playground/sys/kern/subr_uio.c	Fri Jun  8 18:32:09 2012	(r236762)
+++ user/attilio/vmc-playground/sys/kern/subr_uio.c	Fri Jun  8 18:44:54 2012	(r236763)
@@ -85,7 +85,6 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t k
 	vm_map_entry_t entry;
 	vm_pindex_t upindex;
 	vm_prot_t prot;
-	vm_page_bits_t vbits;
 	boolean_t wired;
 
 	KASSERT((uaddr & PAGE_MASK) == 0,
@@ -96,7 +95,6 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t k
 	 * unwired in sf_buf_mext().
 	 */
 	kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
-	vbits = kern_pg->valid;
 	kern_pg->valid = VM_PAGE_BITS_ALL;
 	KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
 	    ("vm_pgmoveco: kern_pg is not correctly wired"));
@@ -107,13 +105,6 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t k
 		return(EFAULT);
 	}
 	VM_OBJECT_LOCK(uobject);
-	if (vm_page_insert(kern_pg, uobject, upindex) != 0) {
-		kern_pg->valid = vbits;
-		VM_OBJECT_UNLOCK(uobject);
-		vm_map_lookup_done(map, entry);
-		return(ENOMEM);
-	}
-	vm_page_dirty(kern_pg);
 retry:
 	if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
 		if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
@@ -131,6 +122,8 @@ retry:
 		if (uobject->backing_object != NULL)
 			pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
 	}
+	vm_page_insert(kern_pg, uobject, upindex);
+	vm_page_dirty(kern_pg);
 	VM_OBJECT_UNLOCK(uobject);
 	vm_map_lookup_done(map, entry);
 	return(KERN_SUCCESS);

Modified: user/attilio/vmc-playground/sys/vm/device_pager.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/device_pager.c	Fri Jun  8 18:32:09 2012	(r236762)
+++ user/attilio/vmc-playground/sys/vm/device_pager.c	Fri Jun  8 18:44:54 2012	(r236763)
@@ -344,14 +344,11 @@ old_dev_pager_fault(vm_object_t object, 
 		 */
 		page = vm_page_getfake(paddr, memattr);
 		VM_OBJECT_LOCK(object);
-		if (vm_page_insert(page, object, offset) != 0) {
-			vm_page_putfake(page);
-			return (VM_PAGER_FAIL);
-		}
 		vm_page_lock(*mres);
 		vm_page_free(*mres);
 		vm_page_unlock(*mres);
 		*mres = page;
+		vm_page_insert(page, object, pidx);
 	}
 	page->valid = VM_PAGE_BITS_ALL;
 	return (VM_PAGER_OK);

Modified: user/attilio/vmc-playground/sys/vm/sg_pager.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/sg_pager.c	Fri Jun  8 18:32:09 2012	(r236762)
+++ user/attilio/vmc-playground/sys/vm/sg_pager.c	Fri Jun  8 18:44:54 2012	(r236763)
@@ -179,10 +179,6 @@ sg_pager_getpages(vm_object_t object, vm
 	/* Construct a new fake page. */
 	page = vm_page_getfake(paddr, memattr);
 	VM_OBJECT_LOCK(object);
-	if (vm_page_insert(page, object, offset) != 0) {
-		vm_page_putfake(page);
-		return (VM_PAGER_FAIL);
-	}
 	TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq);
 
 	/* Free the original pages and insert this fake page into the object. */
@@ -191,6 +187,7 @@ sg_pager_getpages(vm_object_t object, vm
 		vm_page_free(m[i]);
 		vm_page_unlock(m[i]);
 	}
+	vm_page_insert(page, object, offset);
 	m[reqpage] = page;
 	page->valid = VM_PAGE_BITS_ALL;
 

Modified: user/attilio/vmc-playground/sys/vm/vm_fault.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_fault.c	Fri Jun  8 18:32:09 2012	(r236762)
+++ user/attilio/vmc-playground/sys/vm/vm_fault.c	Fri Jun  8 18:44:54 2012	(r236763)
@@ -762,7 +762,9 @@ vnode_locked:
 				 * process'es object.  The page is 
 				 * automatically made dirty.
 				 */
+				vm_page_lock(fs.m);
 				vm_page_rename(fs.m, fs.first_object, fs.first_pindex);
+				vm_page_unlock(fs.m);
 				vm_page_busy(fs.m);
 				fs.first_m = fs.m;
 				fs.m = NULL;

Modified: user/attilio/vmc-playground/sys/vm/vm_object.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_object.c	Fri Jun  8 18:32:09 2012	(r236762)
+++ user/attilio/vmc-playground/sys/vm/vm_object.c	Fri Jun  8 18:44:54 2012	(r236763)
@@ -1418,7 +1418,9 @@ retry:
 			vm_reserv_rename(m, new_object, orig_object,
 			    offidxstart);
 #endif
+			vm_page_lock(m);
 			vm_page_rename(m, new_object, idx);
+			vm_page_unlock(m);
 			/*
 			 * page automatically made dirty by rename and
 			 * cache handled
@@ -1673,7 +1675,9 @@ restart:
 			 * If the page was mapped to a process, it can remain 
 			 * mapped through the rename.
 			 */
+			vm_page_lock(p);
 			vm_page_rename(p, object, new_pindex);
+			vm_page_unlock(p);
 			/* page automatically made dirty by rename */
 		}
 	}

Modified: user/attilio/vmc-playground/sys/vm/vm_page.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_page.c	Fri Jun  8 18:32:09 2012	(r236762)
+++ user/attilio/vmc-playground/sys/vm/vm_page.c	Fri Jun  8 18:44:54 2012	(r236763)
@@ -109,7 +109,6 @@ __FBSDID("$FreeBSD$");
 #include <vm/uma.h>
 #include <vm/uma_int.h>
 
-#include <machine/cpu.h>
 #include <machine/md_var.h>
 
 /*
@@ -819,15 +818,13 @@ vm_page_dirty(vm_page_t m)
  *	The object and page must be locked.
  *	This routine may not block.
  */
-int
+void
 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
 {
 	vm_page_t neighbor;
-	vm_pindex_t cpindex;
 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 	if (m->object != NULL)
 		panic("vm_page_insert: page already inserted");
-	cpindex = m->pindex;
 
 	/*
 	 * Record the object/offset pair in this page
@@ -848,13 +845,8 @@ vm_page_insert(vm_page_t m, vm_object_t 
 		} else 
 			TAILQ_INSERT_TAIL(&object->memq, m, listq);
 	}
-
-	if (vm_radix_insert(&object->rtree, pindex, m) != 0) {
-		TAILQ_REMOVE(&object->memq, m, listq);
-		m->object = NULL;
-		m->pindex = cpindex;
-		return (ENOMEM);
-	}
+	if (vm_radix_insert(&object->rtree, pindex, m) != 0)
+		panic("vm_page_insert: unable to insert the new page");
 
 	/*
 	 * show that the object has one more resident page.
@@ -872,7 +864,6 @@ vm_page_insert(vm_page_t m, vm_object_t 
 	 */
 	if (m->aflags & PGA_WRITEABLE)
 		vm_object_set_writeable_dirty(object);
-	return (0);
 }
 
 /*
@@ -1017,20 +1008,9 @@ vm_page_prev(vm_page_t m)
 void
 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
 {
-	u_int i;
-
-	MPASS(m->object != NULL);
-	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
-	VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
 
-	vm_page_lock(m);
 	vm_page_remove(m);
-	vm_page_unlock(m);
-	while (vm_page_insert(m, new_object, new_pindex) != 0) {
-		pagedaemon_wakeup();
-		for (i = 0; i < 10000000; i++)
-			cpu_spinwait();
-	}
+	vm_page_insert(m, new_object, new_pindex);
 	vm_page_dirty(m);
 }
 
@@ -1311,19 +1291,7 @@ vm_page_alloc(vm_object_t object, vm_pin
 		if (object->memattr != VM_MEMATTR_DEFAULT &&
 		    object->type != OBJT_DEVICE && object->type != OBJT_SG)
 			pmap_page_set_memattr(m, object->memattr);
-		if (vm_page_insert(m, object, pindex) != 0) {
-
-			/* See the comment below about hold count handling. */
-			if (vp != NULL)
-				vdrop(vp);
-			vm_page_lock(m);
-			if (req & VM_ALLOC_WIRED)
-				vm_page_unwire(m, 0);
-			vm_page_free(m);
-			vm_page_unlock(m);
-			pagedaemon_wakeup();
-			return (NULL);
-		}
+		vm_page_insert(m, object, pindex);
 	} else
 		m->pindex = pindex;
 
@@ -1390,7 +1358,6 @@ vm_page_alloc_contig(vm_object_t object,
 {
 	struct vnode *drop;
 	vm_page_t deferred_vdrop_list, m, m_ret;
-	vm_pindex_t cpindex;
 	u_int flags, oflags;
 	int req_class;
 
@@ -1477,7 +1444,6 @@ retry:
 		    memattr == VM_MEMATTR_DEFAULT)
 			memattr = object->memattr;
 	}
-	cpindex = pindex;
 	for (m = m_ret; m < &m_ret[npages]; m++) {
 		m->aflags = 0;
 		m->flags &= flags;
@@ -1487,30 +1453,12 @@ retry:
 		m->oflags = oflags;
 		if (memattr != VM_MEMATTR_DEFAULT)
 			pmap_page_set_memattr(m, memattr);
+		if (object != NULL)
+			vm_page_insert(m, object, pindex);
+		else
+			m->pindex = pindex;
 		pindex++;
 	}
-	for (m = m_ret; m < &m_ret[npages]; m++) {
-		if (object != NULL) {
-			if (vm_page_insert(m, object, cpindex) != 0) {
-				while (deferred_vdrop_list != NULL) {
-		vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
-					deferred_vdrop_list =
-					    deferred_vdrop_list->pageq.tqe_next;
-				}
-				for (m = m_ret; m < &m_ret[npages]; m++) {
-					vm_page_lock(m);
-					if (req & VM_ALLOC_WIRED)
-						vm_page_unwire(m, 0);
-					vm_page_free(m);
-					vm_page_unlock(m);
-				}
-				pagedaemon_wakeup();
-				return (NULL);
-			}
-		} else
-			m->pindex = cpindex;
-		cpindex++;
-	}
 	while (deferred_vdrop_list != NULL) {
 		vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
 		deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
@@ -2732,8 +2680,11 @@ vm_page_cowfault(vm_page_t m)
 	pindex = m->pindex;
 
  retry_alloc:
+	pmap_remove_all(m);
+	vm_page_remove(m);
 	mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
 	if (mnew == NULL) {
+		vm_page_insert(m, object, pindex);
 		vm_page_unlock(m);
 		VM_OBJECT_UNLOCK(object);
 		VM_WAIT;
@@ -2759,9 +2710,8 @@ vm_page_cowfault(vm_page_t m)
 		vm_page_lock(mnew);
 		vm_page_free(mnew);
 		vm_page_unlock(mnew);
+		vm_page_insert(m, object, pindex);
 	} else { /* clear COW & copy page */
-		pmap_remove_all(m);
-		vm_page_remove(m);
 		if (!so_zerocp_fullpage)
 			pmap_copy_page(m, mnew);
 		mnew->valid = VM_PAGE_BITS_ALL;

Modified: user/attilio/vmc-playground/sys/vm/vm_page.h
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_page.h	Fri Jun  8 18:32:09 2012	(r236762)
+++ user/attilio/vmc-playground/sys/vm/vm_page.h	Fri Jun  8 18:44:54 2012	(r236763)
@@ -378,7 +378,7 @@ void vm_page_deactivate (vm_page_t);
 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
-int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
+void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
 vm_page_t vm_page_next(vm_page_t m);
 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);

Modified: user/attilio/vmc-playground/sys/vm/vm_radix.c
==============================================================================
--- user/attilio/vmc-playground/sys/vm/vm_radix.c	Fri Jun  8 18:32:09 2012	(r236762)
+++ user/attilio/vmc-playground/sys/vm/vm_radix.c	Fri Jun  8 18:44:54 2012	(r236763)
@@ -100,7 +100,6 @@
 #endif
 
 CTASSERT(VM_RADIX_HEIGHT >= VM_RADIX_LIMIT);
-CTASSERT((sizeof(u_int) * NBBY) >= VM_RADIX_LIMIT);
 
 struct vm_radix_node {
 	void		*rn_child[VM_RADIX_COUNT];	/* Child nodes. */
@@ -293,24 +292,6 @@ vm_radix_setroot(struct vm_radix *rtree,
 	rtree->rt_root = root;
 }
 
-static inline void
-vm_radix_unwind_heightup(struct vm_radix *rtree, struct vm_radix_node *root,
-    struct vm_radix_node *iroot, int ilevel)
-{
-	struct vm_radix_node *rnode;
-
-	CTR4(KTR_VM, "unwind: tree %p, root %p, iroot %p, ilevel %d",
-	    rtree, root, iroot, ilevel);
-	while (iroot != root && root != NULL) {
-		rnode = root;
-		MPASS(rnode->rn_count == 0 || rnode->rn_count == 1);
-		rnode->rn_count = 0;
-		root = rnode->rn_child[0];
-		vm_radix_node_put(rnode);
-	}
-	vm_radix_setroot(rtree, iroot, ilevel);
-}
-
 static inline void *
 vm_radix_match(void *child, int color)
 {
@@ -362,9 +343,10 @@ vm_radix_reclaim_allnodes_internal(struc
 int
 vm_radix_insert(struct vm_radix *rtree, vm_pindex_t index, void *val)
 {
-	struct vm_radix_node *iroot, *rnode, *root;
-	u_int allocmsk;
-	int clev, ilevel, level, slot;
+	struct vm_radix_node *rnode;
+	struct vm_radix_node *root;
+	int level;
+	int slot;
 
 	CTR4(KTR_VM,
 	    "insert: tree %p, " KFRMT64(index) ", val %p", rtree,
@@ -376,8 +358,6 @@ vm_radix_insert(struct vm_radix *rtree, 
 	 * Increase the height by adding nodes at the root until
 	 * there is sufficient space.
 	 */
-	ilevel = level;
-	iroot = root;
 	while (level == 0 || index > VM_RADIX_MAX(level)) {
 		CTR5(KTR_VM,
 	    "insert: expanding " KFRMT64(index) ">" KFRMT64(mxl) ", height %d",
@@ -398,8 +378,6 @@ vm_radix_insert(struct vm_radix *rtree, 
 	    "insert: tree %p, root %p, " KFRMT64(index) ", level %d ENOMEM",
 				    rtree, root, KSPLT64L(index),
 				    KSPLT64H(index), level);
-				vm_radix_unwind_heightup(rtree, root, iroot,
-				    ilevel);
 				return (ENOMEM);
 			}
 			/*
@@ -417,8 +395,6 @@ vm_radix_insert(struct vm_radix *rtree, 
 	}
 
 	/* Now that the tree is tall enough, fill in the path to the index. */
-	allocmsk = 0;
-	clev = level;
 	rnode = root;
 	for (level = level - 1; level > 0; level--) {
 		slot = vm_radix_slot(index, level);
@@ -434,35 +410,9 @@ vm_radix_insert(struct vm_radix *rtree, 
 			"insert: tree %p, rnode %p, child %p, count %u ENOMEM",
 		    		    rtree, rnode, rnode->rn_child[slot],
 				    rnode->rn_count);
-				MPASS(level != clev || allocmsk == 0);
-				while (allocmsk != 0) {
-					rnode = root;
-					level = clev;
-					level--;
-					slot = vm_radix_slot(index, level);
-					CTR4(KTR_VM,
-		    "insert: unwind root %p, level %d, slot %d, allocmsk: 0x%x",
-					    root, level, slot, allocmsk);
-					MPASS(level >= (ffs(allocmsk) - 1));
-					while (level > (ffs(allocmsk) - 1)) {
-						MPASS(level > 0);
-						slot = vm_radix_slot(index,
-						    level);
-						rnode = rnode->rn_child[slot];
-						level--;
-					}
-					MPASS((allocmsk & (1 << level)) != 0);
-					allocmsk &= ~(1 << level);
-					rnode->rn_count--;
-				vm_radix_node_put(rnode->rn_child[slot]);
-					rnode->rn_child[slot] = NULL;
-				}
-				vm_radix_unwind_heightup(rtree, root, iroot,
-				    ilevel);
-		    		return (ENOMEM);
+				return (ENOMEM);
 			}
 			rnode->rn_count++;
-			allocmsk |= (1 << level);
 	    	}
 		CTR6(KTR_VM,
 	    "insert: tree %p, " KFRMT64(index) ", level %d, slot %d, rnode %p",
@@ -816,12 +766,11 @@ restart:
  * tree is adjusted after deletion.  The value stored at index is returned
  * panics if the key is not present.
  */
-void *
+void
 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index, int color)
 {
 	struct vm_radix_node *stack[VM_RADIX_LIMIT];
 	struct vm_radix_node *rnode, *root;
-	void *val;
 	int level;
 	int slot;
 
@@ -830,7 +779,6 @@ vm_radix_remove(struct vm_radix *rtree, 
 	    ("vm_radix_remove: %p index out of range %jd.", rtree,
 	    VM_RADIX_MAX(level)));
 	rnode = root;
-	val = NULL;
 	level--;
 	/*
 	 * Find the node and record the path in stack.
@@ -850,8 +798,7 @@ vm_radix_remove(struct vm_radix *rtree, 
 	KASSERT(rnode != NULL,
 		("vm_radix_remove: index not present in the tree.\n"));
 	slot = vm_radix_slot(index, 0);
-	val = vm_radix_match(rnode->rn_child[slot], color);
-	KASSERT(val != NULL,
+	KASSERT(vm_radix_match(rnode->rn_child[slot], color) != NULL,
 		("vm_radix_remove: index not present in the tree.\n"));
 
 	for (;;) {
@@ -891,7 +838,6 @@ vm_radix_remove(struct vm_radix *rtree, 
 		slot = vm_radix_slot(index, level);
 			
 	}
-	return (val);
 }
 
 /*


More information about the svn-src-user mailing list