svn commit: r317621 - in user/markj/PQ_LAUNDRY_11/sys: cddl/compat/opensolaris/sys cddl/contrib/opensolaris/uts/common/fs/zfs fs/tmpfs kern vm

Mark Johnston markj at FreeBSD.org
Mon May 1 01:50:30 UTC 2017


Author: markj
Date: Mon May  1 01:50:27 2017
New Revision: 317621
URL: https://svnweb.freebsd.org/changeset/base/317621

Log:
  MFC r308691 (by alc):
  Remove most of the code for implementing PG_CACHED pages.

Modified:
  user/markj/PQ_LAUNDRY_11/sys/cddl/compat/opensolaris/sys/vnode.h
  user/markj/PQ_LAUNDRY_11/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
  user/markj/PQ_LAUNDRY_11/sys/fs/tmpfs/tmpfs_subr.c
  user/markj/PQ_LAUNDRY_11/sys/kern/kern_exec.c
  user/markj/PQ_LAUNDRY_11/sys/kern/uipc_shm.c
  user/markj/PQ_LAUNDRY_11/sys/vm/swap_pager.c
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_fault.c
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_mmap.c
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_object.c
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_object.h
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_page.c
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_page.h
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_phys.c
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_reserv.c
  user/markj/PQ_LAUNDRY_11/sys/vm/vm_reserv.h
  user/markj/PQ_LAUNDRY_11/sys/vm/vnode_pager.c
Directory Properties:
  user/markj/PQ_LAUNDRY_11/   (props changed)

Modified: user/markj/PQ_LAUNDRY_11/sys/cddl/compat/opensolaris/sys/vnode.h
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/cddl/compat/opensolaris/sys/vnode.h	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/cddl/compat/opensolaris/sys/vnode.h	Mon May  1 01:50:27 2017	(r317621)
@@ -75,8 +75,7 @@ vn_is_readonly(vnode_t *vp)
 #define	vn_mountedvfs(vp)	((vp)->v_mountedhere)
 #define	vn_has_cached_data(vp)	\
 	((vp)->v_object != NULL && \
-	 ((vp)->v_object->resident_page_count > 0 || \
-	  !vm_object_cache_is_empty((vp)->v_object)))
+	 (vp)->v_object->resident_page_count > 0)
 #define	vn_exists(vp)		do { } while (0)
 #define	vn_invalid(vp)		do { } while (0)
 #define	vn_renamepath(tdvp, svp, tnm, lentnm)	do { } while (0)

Modified: user/markj/PQ_LAUNDRY_11/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	Mon May  1 01:50:27 2017	(r317621)
@@ -426,10 +426,6 @@ page_busy(vnode_t *vp, int64_t start, in
 				continue;
 			}
 			vm_page_sbusy(pp);
-		} else if (pp == NULL) {
-			pp = vm_page_alloc(obj, OFF_TO_IDX(start),
-			    VM_ALLOC_SYSTEM | VM_ALLOC_IFCACHED |
-			    VM_ALLOC_SBUSY);
 		} else {
 			ASSERT(pp != NULL && !pp->valid);
 			pp = NULL;

Modified: user/markj/PQ_LAUNDRY_11/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/fs/tmpfs/tmpfs_subr.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/fs/tmpfs/tmpfs_subr.c	Mon May  1 01:50:27 2017	(r317621)
@@ -1401,12 +1401,9 @@ retry:
 					VM_WAIT;
 					VM_OBJECT_WLOCK(uobj);
 					goto retry;
-				} else if (m->valid != VM_PAGE_BITS_ALL)
-					rv = vm_pager_get_pages(uobj, &m, 1,
-					    NULL, NULL);
-				else
-					/* A cached page was reactivated. */
-					rv = VM_PAGER_OK;
+				}
+				rv = vm_pager_get_pages(uobj, &m, 1, NULL,
+				    NULL);
 				vm_page_lock(m);
 				if (rv == VM_PAGER_OK) {
 					vm_page_deactivate(m);

Modified: user/markj/PQ_LAUNDRY_11/sys/kern/kern_exec.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/kern/kern_exec.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/kern/kern_exec.c	Mon May  1 01:50:27 2017	(r317621)
@@ -1006,7 +1006,7 @@ exec_map_first_page(imgp)
 					break;
 			} else {
 				ma[i] = vm_page_alloc(object, i,
-				    VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
+				    VM_ALLOC_NORMAL);
 				if (ma[i] == NULL)
 					break;
 			}

Modified: user/markj/PQ_LAUNDRY_11/sys/kern/uipc_shm.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/kern/uipc_shm.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/kern/uipc_shm.c	Mon May  1 01:50:27 2017	(r317621)
@@ -455,12 +455,9 @@ retry:
 					VM_WAIT;
 					VM_OBJECT_WLOCK(object);
 					goto retry;
-				} else if (m->valid != VM_PAGE_BITS_ALL)
-					rv = vm_pager_get_pages(object, &m, 1,
-					    NULL, NULL);
-				else
-					/* A cached page was reactivated. */
-					rv = VM_PAGER_OK;
+				}
+				rv = vm_pager_get_pages(object, &m, 1, NULL,
+				    NULL);
 				vm_page_lock(m);
 				if (rv == VM_PAGER_OK) {
 					vm_page_deactivate(m);

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/swap_pager.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/swap_pager.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/swap_pager.c	Mon May  1 01:50:27 2017	(r317621)
@@ -1126,7 +1126,7 @@ swap_pager_getpages(vm_object_t object, 
 	if (shift != 0) {
 		for (i = 1; i <= shift; i++) {
 			p = vm_page_alloc(object, m[0]->pindex - i,
-			    VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
+			    VM_ALLOC_NORMAL);
 			if (p == NULL) {
 				/* Shift allocated pages to the left. */
 				for (j = 0; j < i - 1; j++)
@@ -1144,8 +1144,7 @@ swap_pager_getpages(vm_object_t object, 
 	if (rahead != NULL) {
 		for (i = 0; i < *rahead; i++) {
 			p = vm_page_alloc(object,
-			    m[reqcount - 1]->pindex + i + 1,
-			    VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
+			    m[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
 			if (p == NULL)
 				break;
 			bp->b_pages[shift + reqcount + i] = p;

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_fault.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_fault.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_fault.c	Mon May  1 01:50:27 2017	(r317621)
@@ -756,8 +756,7 @@ RetryFault:;
 				unlock_and_deallocate(&fs);
 				VM_WAITPFAULT;
 				goto RetryFault;
-			} else if (fs.m->valid == VM_PAGE_BITS_ALL)
-				break;
+			}
 		}
 
 readrest:

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_mmap.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_mmap.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_mmap.c	Mon May  1 01:50:27 2017	(r317621)
@@ -849,9 +849,6 @@ RestartScan:
 					pindex = OFF_TO_IDX(current->offset +
 					    (addr - current->start));
 					m = vm_page_lookup(object, pindex);
-					if (m == NULL &&
-					    vm_page_is_cached(object, pindex))
-						mincoreinfo = MINCORE_INCORE;
 					if (m != NULL && m->valid == 0)
 						m = NULL;
 					if (m != NULL)

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_object.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_object.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_object.c	Mon May  1 01:50:27 2017	(r317621)
@@ -178,9 +178,6 @@ vm_object_zdtor(void *mem, int size, voi
 	    ("object %p has reservations",
 	    object));
 #endif
-	KASSERT(vm_object_cache_is_empty(object),
-	    ("object %p has cached pages",
-	    object));
 	KASSERT(object->paging_in_progress == 0,
 	    ("object %p paging_in_progress = %d",
 	    object, object->paging_in_progress));
@@ -212,8 +209,6 @@ vm_object_zinit(void *mem, int size, int
 	object->paging_in_progress = 0;
 	object->resident_page_count = 0;
 	object->shadow_count = 0;
-	object->cache.rt_root = 0;
-	object->cache.rt_flags = 0;
 
 	mtx_lock(&vm_object_list_mtx);
 	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
@@ -792,8 +787,6 @@ vm_object_terminate(vm_object_t object)
 	if (__predict_false(!LIST_EMPTY(&object->rvq)))
 		vm_reserv_break_all(object);
 #endif
-	if (__predict_false(!vm_object_cache_is_empty(object)))
-		vm_page_cache_free(object, 0, 0);
 
 	KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT ||
 	    object->type == OBJT_SWAP,
@@ -1135,13 +1128,6 @@ shadowlookup:
 		} else if ((tobject->flags & OBJ_UNMANAGED) != 0)
 			goto unlock_tobject;
 		m = vm_page_lookup(tobject, tpindex);
-		if (m == NULL && advise == MADV_WILLNEED) {
-			/*
-			 * If the page is cached, reactivate it.
-			 */
-			m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED |
-			    VM_ALLOC_NOBUSY);
-		}
 		if (m == NULL) {
 			/*
 			 * There may be swap even if there is no backing page
@@ -1406,19 +1392,6 @@ retry:
 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
 		TAILQ_FOREACH(m, &new_object->memq, listq)
 			vm_page_xunbusy(m);
-
-		/*
-		 * Transfer any cached pages from orig_object to new_object.
-		 * If swap_pager_copy() found swapped out pages within the
-		 * specified range of orig_object, then it changed
-		 * new_object's type to OBJT_SWAP when it transferred those
-		 * pages to new_object.  Otherwise, new_object's type
-		 * should still be OBJT_DEFAULT and orig_object should not
-		 * contain any cached pages within the specified range.
-		 */
-		if (__predict_false(!vm_object_cache_is_empty(orig_object)))
-			vm_page_cache_transfer(orig_object, offidxstart,
-			    new_object);
 	}
 	VM_OBJECT_WUNLOCK(orig_object);
 	VM_OBJECT_WUNLOCK(new_object);
@@ -1758,13 +1731,6 @@ vm_object_collapse(vm_object_t object)
 				    backing_object,
 				    object,
 				    OFF_TO_IDX(object->backing_object_offset), TRUE);
-
-				/*
-				 * Free any cached pages from backing_object.
-				 */
-				if (__predict_false(
-				    !vm_object_cache_is_empty(backing_object)))
-					vm_page_cache_free(backing_object, 0, 0);
 			}
 			/*
 			 * Object now shadows whatever backing_object did.
@@ -1893,7 +1859,7 @@ vm_object_page_remove(vm_object_t object
 	    (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
 	    ("vm_object_page_remove: illegal options for object %p", object));
 	if (object->resident_page_count == 0)
-		goto skipmemq;
+		return;
 	vm_object_pip_add(object, 1);
 again:
 	p = vm_page_find_least(object, start);
@@ -1950,9 +1916,6 @@ next:
 		vm_page_unlock(p);
 	}
 	vm_object_pip_wakeup(object);
-skipmemq:
-	if (__predict_false(!vm_object_cache_is_empty(object)))
-		vm_page_cache_free(object, start, end);
 }
 
 /*

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_object.h
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_object.h	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_object.h	Mon May  1 01:50:27 2017	(r317621)
@@ -118,7 +118,6 @@ struct vm_object {
 	vm_ooffset_t backing_object_offset;/* Offset in backing object */
 	TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
 	LIST_HEAD(, vm_reserv) rvq;	/* list of reservations */
-	struct vm_radix cache;		/* (o + f) root of the cache page radix trie */
 	void *handle;
 	union {
 		/*
@@ -306,13 +305,6 @@ void vm_object_pip_wakeup(vm_object_t ob
 void vm_object_pip_wakeupn(vm_object_t object, short i);
 void vm_object_pip_wait(vm_object_t object, char *waitid);
 
-static __inline boolean_t
-vm_object_cache_is_empty(vm_object_t object)
-{
-
-	return (vm_radix_is_empty(&object->cache));
-}
-
 void umtx_shm_object_init(vm_object_t object);
 void umtx_shm_object_terminated(vm_object_t object);
 extern int umtx_shm_vnobj_persistent;

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_page.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_page.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_page.c	Mon May  1 01:50:27 2017	(r317621)
@@ -155,8 +155,7 @@ static int vm_pageout_pages_needed;
 
 static uma_zone_t fakepg_zone;
 
-static struct vnode *vm_page_alloc_init(vm_page_t m);
-static void vm_page_cache_turn_free(vm_page_t m);
+static void vm_page_alloc_check(vm_page_t m);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
 static void vm_page_enqueue(uint8_t queue, vm_page_t m);
 static void vm_page_free_wakeup(void);
@@ -1140,9 +1139,7 @@ void
 vm_page_dirty_KBI(vm_page_t m)
 {
 
-	/* These assertions refer to this operation by its public name. */
-	KASSERT((m->flags & PG_CACHED) == 0,
-	    ("vm_page_dirty: page in cache!"));
+	/* Refer to this operation by its public name. */
 	KASSERT(m->valid == VM_PAGE_BITS_ALL,
 	    ("vm_page_dirty: page is invalid!"));
 	m->dirty = VM_PAGE_BITS_ALL;
@@ -1485,142 +1482,6 @@ vm_page_rename(vm_page_t m, vm_object_t 
 }
 
 /*
- *	Convert all of the given object's cached pages that have a
- *	pindex within the given range into free pages.  If the value
- *	zero is given for "end", then the range's upper bound is
- *	infinity.  If the given object is backed by a vnode and it
- *	transitions from having one or more cached pages to none, the
- *	vnode's hold count is reduced.
- */
-void
-vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
-{
-	vm_page_t m;
-	boolean_t empty;
-
-	mtx_lock(&vm_page_queue_free_mtx);
-	if (__predict_false(vm_radix_is_empty(&object->cache))) {
-		mtx_unlock(&vm_page_queue_free_mtx);
-		return;
-	}
-	while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) {
-		if (end != 0 && m->pindex >= end)
-			break;
-		vm_radix_remove(&object->cache, m->pindex);
-		vm_page_cache_turn_free(m);
-	}
-	empty = vm_radix_is_empty(&object->cache);
-	mtx_unlock(&vm_page_queue_free_mtx);
-	if (object->type == OBJT_VNODE && empty)
-		vdrop(object->handle);
-}
-
-/*
- *	Returns the cached page that is associated with the given
- *	object and offset.  If, however, none exists, returns NULL.
- *
- *	The free page queue must be locked.
- */
-static inline vm_page_t
-vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
-{
-
-	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
-	return (vm_radix_lookup(&object->cache, pindex));
-}
-
-/*
- *	Remove the given cached page from its containing object's
- *	collection of cached pages.
- *
- *	The free page queue must be locked.
- */
-static void
-vm_page_cache_remove(vm_page_t m)
-{
-
-	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
-	KASSERT((m->flags & PG_CACHED) != 0,
-	    ("vm_page_cache_remove: page %p is not cached", m));
-	vm_radix_remove(&m->object->cache, m->pindex);
-	m->object = NULL;
-	vm_cnt.v_cache_count--;
-}
-
-/*
- *	Transfer all of the cached pages with offset greater than or
- *	equal to 'offidxstart' from the original object's cache to the
- *	new object's cache.  However, any cached pages with offset
- *	greater than or equal to the new object's size are kept in the
- *	original object.  Initially, the new object's cache must be
- *	empty.  Offset 'offidxstart' in the original object must
- *	correspond to offset zero in the new object.
- *
- *	The new object must be locked.
- */
-void
-vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
-    vm_object_t new_object)
-{
-	vm_page_t m;
-
-	/*
-	 * Insertion into an object's collection of cached pages
-	 * requires the object to be locked.  In contrast, removal does
-	 * not.
-	 */
-	VM_OBJECT_ASSERT_WLOCKED(new_object);
-	KASSERT(vm_radix_is_empty(&new_object->cache),
-	    ("vm_page_cache_transfer: object %p has cached pages",
-	    new_object));
-	mtx_lock(&vm_page_queue_free_mtx);
-	while ((m = vm_radix_lookup_ge(&orig_object->cache,
-	    offidxstart)) != NULL) {
-		/*
-		 * Transfer all of the pages with offset greater than or
-		 * equal to 'offidxstart' from the original object's
-		 * cache to the new object's cache.
-		 */
-		if ((m->pindex - offidxstart) >= new_object->size)
-			break;
-		vm_radix_remove(&orig_object->cache, m->pindex);
-		/* Update the page's object and offset. */
-		m->object = new_object;
-		m->pindex -= offidxstart;
-		if (vm_radix_insert(&new_object->cache, m))
-			vm_page_cache_turn_free(m);
-	}
-	mtx_unlock(&vm_page_queue_free_mtx);
-}
-
-/*
- *	Returns TRUE if a cached page is associated with the given object and
- *	offset, and FALSE otherwise.
- *
- *	The object must be locked.
- */
-boolean_t
-vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
-{
-	vm_page_t m;
-
-	/*
-	 * Insertion into an object's collection of cached pages requires the
-	 * object to be locked.  Therefore, if the object is locked and the
-	 * object's collection is empty, there is no need to acquire the free
-	 * page queues lock in order to prove that the specified page doesn't
-	 * exist.
-	 */
-	VM_OBJECT_ASSERT_WLOCKED(object);
-	if (__predict_true(vm_object_cache_is_empty(object)))
-		return (FALSE);
-	mtx_lock(&vm_page_queue_free_mtx);
-	m = vm_page_cache_lookup(object, pindex);
-	mtx_unlock(&vm_page_queue_free_mtx);
-	return (m != NULL);
-}
-
-/*
  *	vm_page_alloc:
  *
  *	Allocate and return a page that is associated with the specified
@@ -1636,9 +1497,6 @@ vm_page_is_cached(vm_object_t object, vm
  *	optional allocation flags:
  *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
  *				intends to allocate
- *	VM_ALLOC_IFCACHED	return page only if it is cached
- *	VM_ALLOC_IFNOTCACHED	return NULL, do not reactivate if the page
- *				is cached
  *	VM_ALLOC_NOBUSY		do not exclusive busy the page
  *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
  *	VM_ALLOC_NOOBJ		page is not associated with an object and
@@ -1652,8 +1510,6 @@ vm_page_is_cached(vm_object_t object, vm
 vm_page_t
 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
 {
-	struct vnode *vp = NULL;
-	vm_object_t m_object;
 	vm_page_t m, mpred;
 	int flags, req_class;
 
@@ -1696,31 +1552,12 @@ vm_page_alloc(vm_object_t object, vm_pin
 		 * Allocate from the free queue if the number of free pages
 		 * exceeds the minimum for the request class.
 		 */
-		if (object != NULL &&
-		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
-			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
-				mtx_unlock(&vm_page_queue_free_mtx);
-				return (NULL);
-			}
-			if (vm_phys_unfree_page(m))
-				vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
-#if VM_NRESERVLEVEL > 0
-			else if (!vm_reserv_reactivate_page(m))
-#else
-			else
-#endif
-				panic("vm_page_alloc: cache page %p is missing"
-				    " from the free queue", m);
-		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
-			mtx_unlock(&vm_page_queue_free_mtx);
-			return (NULL);
 #if VM_NRESERVLEVEL > 0
-		} else if (object == NULL || (object->flags & (OBJ_COLORED |
+		if (object == NULL || (object->flags & (OBJ_COLORED |
 		    OBJ_FICTITIOUS)) != OBJ_COLORED || (m =
-		    vm_reserv_alloc_page(object, pindex, mpred)) == NULL) {
-#else
-		} else {
+		    vm_reserv_alloc_page(object, pindex, mpred)) == NULL)
 #endif
+		{
 			m = vm_phys_alloc_pages(object != NULL ?
 			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
 #if VM_NRESERVLEVEL > 0
@@ -1746,35 +1583,11 @@ vm_page_alloc(vm_object_t object, vm_pin
 	 *  At this point we had better have found a good page.
 	 */
 	KASSERT(m != NULL, ("vm_page_alloc: missing page"));
-	KASSERT(m->queue == PQ_NONE,
-	    ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
-	KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
-	KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
-	KASSERT(!vm_page_busied(m), ("vm_page_alloc: page %p is busy", m));
-	KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
-	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
-	    ("vm_page_alloc: page %p has unexpected memattr %d", m,
-	    pmap_page_get_memattr(m)));
-	if ((m->flags & PG_CACHED) != 0) {
-		KASSERT((m->flags & PG_ZERO) == 0,
-		    ("vm_page_alloc: cached page %p is PG_ZERO", m));
-		KASSERT(m->valid != 0,
-		    ("vm_page_alloc: cached page %p is invalid", m));
-		if (m->object != object || m->pindex != pindex)
-			m->valid = 0;
-		m_object = m->object;
-		vm_page_cache_remove(m);
-		if (m_object->type == OBJT_VNODE &&
-		    vm_object_cache_is_empty(m_object))
-			vp = m_object->handle;
-	} else {
-		KASSERT(m->valid == 0,
-		    ("vm_page_alloc: free page %p is valid", m));
-		vm_phys_freecnt_adj(m, -1);
-		if ((m->flags & PG_ZERO) != 0)
-			vm_page_zero_count--;
-	}
+	vm_phys_freecnt_adj(m, -1);
+	if ((m->flags & PG_ZERO) != 0)
+		vm_page_zero_count--;
 	mtx_unlock(&vm_page_queue_free_mtx);
+	vm_page_alloc_check(m);
 
 	/*
 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
@@ -1806,9 +1619,6 @@ vm_page_alloc(vm_object_t object, vm_pin
 
 	if (object != NULL) {
 		if (vm_page_insert_after(m, object, pindex, mpred)) {
-			/* See the comment below about hold count. */
-			if (vp != NULL)
-				vdrop(vp);
 			pagedaemon_wakeup();
 			if (req & VM_ALLOC_WIRED) {
 				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
@@ -1829,15 +1639,6 @@ vm_page_alloc(vm_object_t object, vm_pin
 		m->pindex = pindex;
 
 	/*
-	 * The following call to vdrop() must come after the above call
-	 * to vm_page_insert() in case both affect the same object and
-	 * vnode.  Otherwise, the affected vnode's hold count could
-	 * temporarily become zero.
-	 */
-	if (vp != NULL)
-		vdrop(vp);
-
-	/*
 	 * Don't wakeup too often - wakeup the pageout daemon when
 	 * we would be nearly out of memory.
 	 */
@@ -1847,16 +1648,6 @@ vm_page_alloc(vm_object_t object, vm_pin
 	return (m);
 }
 
-static void
-vm_page_alloc_contig_vdrop(struct spglist *lst)
-{
-
-	while (!SLIST_EMPTY(lst)) {
-		vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv);
-		SLIST_REMOVE_HEAD(lst, plinks.s.ss);
-	}
-}
-
 /*
  *	vm_page_alloc_contig:
  *
@@ -1901,8 +1692,6 @@ vm_page_alloc_contig(vm_object_t object,
     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
     vm_paddr_t boundary, vm_memattr_t memattr)
 {
-	struct vnode *drop;
-	struct spglist deferred_vdrop_list;
 	vm_page_t m, m_tmp, m_ret;
 	u_int flags;
 	int req_class;
@@ -1928,7 +1717,6 @@ vm_page_alloc_contig(vm_object_t object,
 	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
 		req_class = VM_ALLOC_SYSTEM;
 
-	SLIST_INIT(&deferred_vdrop_list);
 	mtx_lock(&vm_page_queue_free_mtx);
 	if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
 	    vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
@@ -1950,17 +1738,7 @@ retry:
 		return (NULL);
 	}
 	if (m_ret != NULL)
-		for (m = m_ret; m < &m_ret[npages]; m++) {
-			drop = vm_page_alloc_init(m);
-			if (drop != NULL) {
-				/*
-				 * Enqueue the vnode for deferred vdrop().
-				 */
-				m->plinks.s.pv = drop;
-				SLIST_INSERT_HEAD(&deferred_vdrop_list, m,
-				    plinks.s.ss);
-			}
-		}
+		vm_phys_freecnt_adj(m_ret, -npages);
 	else {
 #if VM_NRESERVLEVEL > 0
 		if (vm_reserv_reclaim_contig(npages, low, high, alignment,
@@ -1968,9 +1746,14 @@ retry:
 			goto retry;
 #endif
 	}
+	for (m = m_ret; m < &m_ret[npages]; m++)
+		if ((m->flags & PG_ZERO) != 0)
+			vm_page_zero_count--;
 	mtx_unlock(&vm_page_queue_free_mtx);
 	if (m_ret == NULL)
 		return (NULL);
+	for (m = m_ret; m < &m_ret[npages]; m++)
+		vm_page_alloc_check(m);
 
 	/*
 	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
@@ -2003,8 +1786,6 @@ retry:
 		m->oflags = VPO_UNMANAGED;
 		if (object != NULL) {
 			if (vm_page_insert(m, object, pindex)) {
-				vm_page_alloc_contig_vdrop(
-				    &deferred_vdrop_list);
 				if (vm_paging_needed())
 					pagedaemon_wakeup();
 				if ((req & VM_ALLOC_WIRED) != 0)
@@ -2029,59 +1810,28 @@ retry:
 			pmap_page_set_memattr(m, memattr);
 		pindex++;
 	}
-	vm_page_alloc_contig_vdrop(&deferred_vdrop_list);
 	if (vm_paging_needed())
 		pagedaemon_wakeup();
 	return (m_ret);
 }
 
 /*
- * Initialize a page that has been freshly dequeued from a freelist.
- * The caller has to drop the vnode returned, if it is not NULL.
- *
- * This function may only be used to initialize unmanaged pages.
- *
- * To be called with vm_page_queue_free_mtx held.
+ * Check a page that has been freshly dequeued from a freelist.
  */
-static struct vnode *
-vm_page_alloc_init(vm_page_t m)
+static void
+vm_page_alloc_check(vm_page_t m)
 {
-	struct vnode *drop;
-	vm_object_t m_object;
 
 	KASSERT(m->queue == PQ_NONE,
-	    ("vm_page_alloc_init: page %p has unexpected queue %d",
-	    m, m->queue));
-	KASSERT(m->wire_count == 0,
-	    ("vm_page_alloc_init: page %p is wired", m));
-	KASSERT(m->hold_count == 0,
-	    ("vm_page_alloc_init: page %p is held", m));
-	KASSERT(!vm_page_busied(m),
-	    ("vm_page_alloc_init: page %p is busy", m));
-	KASSERT(m->dirty == 0,
-	    ("vm_page_alloc_init: page %p is dirty", m));
+	    ("page %p has unexpected queue %d", m, m->queue));
+	KASSERT(m->wire_count == 0, ("page %p is wired", m));
+	KASSERT(m->hold_count == 0, ("page %p is held", m));
+	KASSERT(!vm_page_busied(m), ("page %p is busy", m));
+	KASSERT(m->dirty == 0, ("page %p is dirty", m));
 	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
-	    ("vm_page_alloc_init: page %p has unexpected memattr %d",
+	    ("page %p has unexpected memattr %d",
 	    m, pmap_page_get_memattr(m)));
-	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
-	drop = NULL;
-	if ((m->flags & PG_CACHED) != 0) {
-		KASSERT((m->flags & PG_ZERO) == 0,
-		    ("vm_page_alloc_init: cached page %p is PG_ZERO", m));
-		m->valid = 0;
-		m_object = m->object;
-		vm_page_cache_remove(m);
-		if (m_object->type == OBJT_VNODE &&
-		    vm_object_cache_is_empty(m_object))
-			drop = m_object->handle;
-	} else {
-		KASSERT(m->valid == 0,
-		    ("vm_page_alloc_init: free page %p is valid", m));
-		vm_phys_freecnt_adj(m, -1);
-		if ((m->flags & PG_ZERO) != 0)
-			vm_page_zero_count--;
-	}
-	return (drop);
+	KASSERT(m->valid == 0, ("free page %p is valid", m));
 }
 
 /*
@@ -2107,7 +1857,6 @@ vm_page_alloc_init(vm_page_t m)
 vm_page_t
 vm_page_alloc_freelist(int flind, int req)
 {
-	struct vnode *drop;
 	vm_page_t m;
 	u_int flags;
 	int req_class;
@@ -2141,8 +1890,11 @@ vm_page_alloc_freelist(int flind, int re
 		mtx_unlock(&vm_page_queue_free_mtx);
 		return (NULL);
 	}
-	drop = vm_page_alloc_init(m);
+	vm_phys_freecnt_adj(m, -1);
+	if ((m->flags & PG_ZERO) != 0)
+		vm_page_zero_count--;
 	mtx_unlock(&vm_page_queue_free_mtx);
+	vm_page_alloc_check(m);
 
 	/*
 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
@@ -2162,8 +1914,6 @@ vm_page_alloc_freelist(int flind, int re
 	}
 	/* Unmanaged pages don't use "act_count". */
 	m->oflags = VPO_UNMANAGED;
-	if (drop != NULL)
-		vdrop(drop);
 	if (vm_paging_needed())
 		pagedaemon_wakeup();
 	return (m);
@@ -2289,38 +2039,8 @@ retry:
 			/* Don't care: PG_NODUMP, PG_ZERO. */
 			if (object->type != OBJT_DEFAULT &&
 			    object->type != OBJT_SWAP &&
-			    object->type != OBJT_VNODE)
+			    object->type != OBJT_VNODE) {
 				run_ext = 0;
-			else if ((m->flags & PG_CACHED) != 0 ||
-			    m != vm_page_lookup(object, m->pindex)) {
-				/*
-				 * The page is cached or recently converted
-				 * from cached to free.
-				 */
-#if VM_NRESERVLEVEL > 0
-				if (level >= 0) {
-					/*
-					 * The page is reserved.  Extend the
-					 * current run by one page.
-					 */
-					run_ext = 1;
-				} else
-#endif
-				if ((order = m->order) < VM_NFREEORDER) {
-					/*
-					 * The page is enqueued in the
-					 * physical memory allocator's cache/
-					 * free page queues.  Moreover, it is
-					 * the first page in a power-of-two-
-					 * sized run of contiguous cache/free
-					 * pages.  Add these pages to the end
-					 * of the current run, and jump
-					 * ahead.
-					 */
-					run_ext = 1 << order;
-					m_inc = 1 << order;
-				} else
-					run_ext = 0;
 #if VM_NRESERVLEVEL > 0
 			} else if ((options & VPSC_NOSUPER) != 0 &&
 			    (level = vm_reserv_level_iffullpop(m)) >= 0) {
@@ -2487,15 +2207,7 @@ retry:
 			    object->type != OBJT_SWAP &&
 			    object->type != OBJT_VNODE)
 				error = EINVAL;
-			else if ((m->flags & PG_CACHED) != 0 ||
-			    m != vm_page_lookup(object, m->pindex)) {
-				/*
-				 * The page is cached or recently converted
-				 * from cached to free.
-				 */
-				VM_OBJECT_WUNLOCK(object);
-				goto cached;
-			} else if (object->memattr != VM_MEMATTR_DEFAULT)
+			else if (object->memattr != VM_MEMATTR_DEFAULT)
 				error = EINVAL;
 			else if (m->queue != PQ_NONE && !vm_page_busied(m)) {
 				KASSERT(pmap_page_get_memattr(m) ==
@@ -2596,7 +2308,6 @@ retry:
 unlock:
 			VM_OBJECT_WUNLOCK(object);
 		} else {
-cached:
 			mtx_lock(&vm_page_queue_free_mtx);
 			order = m->order;
 			if (order < VM_NFREEORDER) {
@@ -2995,27 +2706,6 @@ vm_page_free_wakeup(void)
 }
 
 /*
- *	Turn a cached page into a free page, by changing its attributes.
- *	Keep the statistics up-to-date.
- *
- *	The free page queue must be locked.
- */
-static void
-vm_page_cache_turn_free(vm_page_t m)
-{
-
-	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
-
-	m->object = NULL;
-	m->valid = 0;
-	KASSERT((m->flags & PG_CACHED) != 0,
-	    ("vm_page_cache_turn_free: page %p is not cached", m));
-	m->flags &= ~PG_CACHED;
-	vm_cnt.v_cache_count--;
-	vm_phys_freecnt_adj(m, 1);
-}
-
-/*
  *	vm_page_free_toq:
  *
  *	Returns the given page to the free list,
@@ -3418,8 +3108,7 @@ retrylookup:
 		VM_WAIT;
 		VM_OBJECT_WLOCK(object);
 		goto retrylookup;
-	} else if (m->valid != 0)
-		return (m);
+	}
 	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
 		pmap_zero_page(m);
 	return (m);

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_page.h
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_page.h	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_page.h	Mon May  1 01:50:27 2017	(r317621)
@@ -326,7 +326,6 @@ extern struct mtx_padalign pa_lock[];
  * Page flags.  If changed at any other time than page allocation or
  * freeing, the modification must be protected by the vm_page lock.
  */
-#define	PG_CACHED	0x0001		/* page is cached */
 #define	PG_FICTITIOUS	0x0004		/* physical page doesn't exist */
 #define	PG_ZERO		0x0008		/* page is zeroed */
 #define	PG_MARKER	0x0010		/* special queue marker page */
@@ -409,8 +408,6 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa)
 #define	VM_ALLOC_ZERO		0x0040	/* (acfg) Try to obtain a zeroed page */
 #define	VM_ALLOC_NOOBJ		0x0100	/* (acg) No associated object */
 #define	VM_ALLOC_NOBUSY		0x0200	/* (acg) Do not busy the page */
-#define	VM_ALLOC_IFCACHED	0x0400	/* (ag) Fail if page is not cached */
-#define	VM_ALLOC_IFNOTCACHED	0x0800	/* (ag) Fail if page is cached */
 #define	VM_ALLOC_IGN_SBUSY	0x1000	/* (g) Ignore shared busy flag */
 #define	VM_ALLOC_NODUMP		0x2000	/* (ag) don't include in dump */
 #define	VM_ALLOC_SBUSY		0x4000	/* (acg) Shared busy the page */
@@ -453,8 +450,6 @@ vm_page_t vm_page_alloc_contig(vm_object
     vm_paddr_t boundary, vm_memattr_t memattr);
 vm_page_t vm_page_alloc_freelist(int, int);
 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
-void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t);
-void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
 int vm_page_try_to_free (vm_page_t);
 void vm_page_deactivate (vm_page_t);
 void vm_page_deactivate_noreuse(vm_page_t);
@@ -464,7 +459,6 @@ vm_page_t vm_page_find_least(vm_object_t
 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
-boolean_t vm_page_is_cached(vm_object_t object, vm_pindex_t pindex);
 void vm_page_launder(vm_page_t m);
 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
 vm_page_t vm_page_next(vm_page_t m);

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_phys.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_phys.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_phys.c	Mon May  1 01:50:27 2017	(r317621)
@@ -1314,7 +1314,7 @@ vm_phys_zero_pages_idle(void)
 	for (;;) {
 		TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
 			for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
-				if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
+				if ((m_tmp->flags & PG_ZERO) == 0) {
 					vm_phys_unfree_page(m_tmp);
 					vm_phys_freecnt_adj(m, -1);
 					mtx_unlock(&vm_page_queue_free_mtx);

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_reserv.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_reserv.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_reserv.c	Mon May  1 01:50:27 2017	(r317621)
@@ -908,45 +908,6 @@ vm_reserv_level_iffullpop(vm_page_t m)
 }
 
 /*
- * Prepare for the reactivation of a cached page.
- *
- * First, suppose that the given page "m" was allocated individually, i.e., not
- * as part of a reservation, and cached.  Then, suppose a reservation
- * containing "m" is allocated by the same object.  Although "m" and the
- * reservation belong to the same object, "m"'s pindex may not match the
- * reservation's.
- *
- * The free page queue must be locked.
- */
-boolean_t
-vm_reserv_reactivate_page(vm_page_t m)
-{
-	vm_reserv_t rv;
-	int index;
-
-	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
-	rv = vm_reserv_from_page(m);
-	if (rv->object == NULL)
-		return (FALSE);
-	KASSERT((m->flags & PG_CACHED) != 0,
-	    ("vm_reserv_reactivate_page: page %p is not cached", m));
-	if (m->object == rv->object &&
-	    m->pindex - rv->pindex == (index = VM_RESERV_INDEX(m->object,
-	    m->pindex)))
-		vm_reserv_populate(rv, index);
-	else {
-		KASSERT(rv->inpartpopq,
-	    ("vm_reserv_reactivate_page: reserv %p's inpartpopq is FALSE",
-		    rv));
-		TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
-		rv->inpartpopq = FALSE;
-		/* Don't release "m" to the physical memory allocator. */
-		vm_reserv_break(rv, m);
-	}
-	return (TRUE);
-}
-
-/*
  * Breaks the given partially-populated reservation, releasing its cached and
  * free pages to the physical memory allocator.
  *

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vm_reserv.h
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vm_reserv.h	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vm_reserv.h	Mon May  1 01:50:27 2017	(r317621)
@@ -56,7 +56,6 @@ void		vm_reserv_init(void);
 bool		vm_reserv_is_page_free(vm_page_t m);
 int		vm_reserv_level(vm_page_t m);
 int		vm_reserv_level_iffullpop(vm_page_t m);
-boolean_t	vm_reserv_reactivate_page(vm_page_t m);
 boolean_t	vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low,
 		    vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
 boolean_t	vm_reserv_reclaim_inactive(void);

Modified: user/markj/PQ_LAUNDRY_11/sys/vm/vnode_pager.c
==============================================================================
--- user/markj/PQ_LAUNDRY_11/sys/vm/vnode_pager.c	Mon May  1 01:42:26 2017	(r317620)
+++ user/markj/PQ_LAUNDRY_11/sys/vm/vnode_pager.c	Mon May  1 01:50:27 2017	(r317621)
@@ -466,10 +466,6 @@ vnode_pager_setsize(struct vnode *vp, vm
 			 * replacement from working properly.
 			 */
 			vm_page_clear_dirty(m, base, PAGE_SIZE - base);
-		} else if ((nsize & PAGE_MASK) &&
-		    vm_page_is_cached(object, OFF_TO_IDX(nsize))) {
-			vm_page_cache_free(object, OFF_TO_IDX(nsize),
-			    nobjsize);
 		}
 	}
 	object->un_pager.vnp.vnp_size = nsize;
@@ -894,8 +890,7 @@ vnode_pager_generic_getpages(struct vnod
 		for (tpindex = m[0]->pindex - 1;
 		    tpindex >= startpindex && tpindex < m[0]->pindex;
 		    tpindex--, i++) {
-			p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
-			    VM_ALLOC_IFNOTCACHED);
+			p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
 			if (p == NULL) {
 				/* Shift the array. */
 				for (int j = 0; j < i; j++)
@@ -932,8 +927,7 @@ vnode_pager_generic_getpages(struct vnod
 
 		for (tpindex = m[count - 1]->pindex + 1;
 		    tpindex < endpindex; i++, tpindex++) {
-			p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
-			    VM_ALLOC_IFNOTCACHED);
+			p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
 			if (p == NULL)
 				break;
 			bp->b_pages[i] = p;


More information about the svn-src-user mailing list