svn commit: r284310 - in head/sys: fs/tmpfs kern vm

Gleb Smirnoff glebius at FreeBSD.org
Fri Jun 12 11:32:23 UTC 2015


Author: glebius
Date: Fri Jun 12 11:32:20 2015
New Revision: 284310
URL: https://svnweb.freebsd.org/changeset/base/284310

Log:
  Make KPI of vm_pager_get_pages() more strict: if a pager changes a page
  in the requested array, then it is responsible for disposition of previous
  page and is responsible for updating the entry in the requested array.
  Now consumers of KPI do not need to re-lookup the pages after call to
  vm_pager_get_pages().
  
  Reviewed by:	kib
  Sponsored by:	Netflix
  Sponsored by:	Nginx, Inc.

Modified:
  head/sys/fs/tmpfs/tmpfs_subr.c
  head/sys/kern/kern_exec.c
  head/sys/kern/uipc_shm.c
  head/sys/kern/uipc_syscalls.c
  head/sys/vm/vm_fault.c
  head/sys/vm/vm_glue.c
  head/sys/vm/vm_object.c

Modified: head/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- head/sys/fs/tmpfs/tmpfs_subr.c	Fri Jun 12 11:21:35 2015	(r284309)
+++ head/sys/fs/tmpfs/tmpfs_subr.c	Fri Jun 12 11:32:20 2015	(r284310)
@@ -1320,7 +1320,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t
 	struct tmpfs_mount *tmp;
 	struct tmpfs_node *node;
 	vm_object_t uobj;
-	vm_page_t m, ma[1];
+	vm_page_t m;
 	vm_pindex_t idx, newpages, oldpages;
 	off_t oldsize;
 	int base, rv;
@@ -1367,11 +1367,9 @@ retry:
 					VM_WAIT;
 					VM_OBJECT_WLOCK(uobj);
 					goto retry;
-				} else if (m->valid != VM_PAGE_BITS_ALL) {
-					ma[0] = m;
-					rv = vm_pager_get_pages(uobj, ma, 1, 0);
-					m = vm_page_lookup(uobj, idx);
-				} else
+				} else if (m->valid != VM_PAGE_BITS_ALL)
+					rv = vm_pager_get_pages(uobj, &m, 1, 0);
+				else
 					/* A cached page was reactivated. */
 					rv = VM_PAGER_OK;
 				vm_page_lock(m);

Modified: head/sys/kern/kern_exec.c
==============================================================================
--- head/sys/kern/kern_exec.c	Fri Jun 12 11:21:35 2015	(r284309)
+++ head/sys/kern/kern_exec.c	Fri Jun 12 11:32:20 2015	(r284310)
@@ -966,13 +966,10 @@ exec_map_first_page(imgp)
 		}
 		initial_pagein = i;
 		rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
-		ma[0] = vm_page_lookup(object, 0);
-		if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
-			if (ma[0] != NULL) {
-				vm_page_lock(ma[0]);
-				vm_page_free(ma[0]);
-				vm_page_unlock(ma[0]);
-			}
+		if (rv != VM_PAGER_OK) {
+			vm_page_lock(ma[0]);
+			vm_page_free(ma[0]);
+			vm_page_unlock(ma[0]);
 			VM_OBJECT_WUNLOCK(object);
 			return (EIO);
 		}

Modified: head/sys/kern/uipc_shm.c
==============================================================================
--- head/sys/kern/uipc_shm.c	Fri Jun 12 11:21:35 2015	(r284309)
+++ head/sys/kern/uipc_shm.c	Fri Jun 12 11:32:20 2015	(r284310)
@@ -189,14 +189,6 @@ uiomove_object_page(vm_object_t obj, siz
 	if (m->valid != VM_PAGE_BITS_ALL) {
 		if (vm_pager_has_page(obj, idx, NULL, NULL)) {
 			rv = vm_pager_get_pages(obj, &m, 1, 0);
-			m = vm_page_lookup(obj, idx);
-			if (m == NULL) {
-				printf(
-		    "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
-				    obj, idx, rv);
-				VM_OBJECT_WUNLOCK(obj);
-				return (EIO);
-			}
 			if (rv != VM_PAGER_OK) {
 				printf(
 	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
@@ -423,7 +415,7 @@ static int
 shm_dotruncate(struct shmfd *shmfd, off_t length)
 {
 	vm_object_t object;
-	vm_page_t m, ma[1];
+	vm_page_t m;
 	vm_pindex_t idx, nobjsize;
 	vm_ooffset_t delta;
 	int base, rv;
@@ -465,12 +457,10 @@ retry:
 					VM_WAIT;
 					VM_OBJECT_WLOCK(object);
 					goto retry;
-				} else if (m->valid != VM_PAGE_BITS_ALL) {
-					ma[0] = m;
-					rv = vm_pager_get_pages(object, ma, 1,
+				} else if (m->valid != VM_PAGE_BITS_ALL)
+					rv = vm_pager_get_pages(object, &m, 1,
 					    0);
-					m = vm_page_lookup(object, idx);
-				} else
+				else
 					/* A cached page was reactivated. */
 					rv = VM_PAGER_OK;
 				vm_page_lock(m);

Modified: head/sys/kern/uipc_syscalls.c
==============================================================================
--- head/sys/kern/uipc_syscalls.c	Fri Jun 12 11:21:35 2015	(r284309)
+++ head/sys/kern/uipc_syscalls.c	Fri Jun 12 11:32:20 2015	(r284310)
@@ -2026,10 +2026,7 @@ sendfile_readpage(vm_object_t obj, struc
 		if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
 			rv = vm_pager_get_pages(obj, &m, 1, 0);
 			SFSTAT_INC(sf_iocnt);
-			m = vm_page_lookup(obj, pindex);
-			if (m == NULL)
-				error = EIO;
-			else if (rv != VM_PAGER_OK) {
+			if (rv != VM_PAGER_OK) {
 				vm_page_lock(m);
 				vm_page_free(m);
 				vm_page_unlock(m);

Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c	Fri Jun 12 11:21:35 2015	(r284309)
+++ head/sys/vm/vm_fault.c	Fri Jun 12 11:32:20 2015	(r284310)
@@ -679,19 +679,12 @@ vnode_locked:
 				/*
 				 * Found the page. Leave it busy while we play
 				 * with it.
-				 */
-
-				/*
-				 * Relookup in case pager changed page. Pager
+				 *
+				 * Pager could have changed the page.  Pager
 				 * is responsible for disposition of old page
 				 * if moved.
 				 */
-				fs.m = vm_page_lookup(fs.object, fs.pindex);
-				if (!fs.m) {
-					unlock_and_deallocate(&fs);
-					goto RetryFault;
-				}
-
+				fs.m = marray[reqpage];
 				hardfault++;
 				break; /* break to PAGE HAS BEEN FOUND */
 			}

Modified: head/sys/vm/vm_glue.c
==============================================================================
--- head/sys/vm/vm_glue.c	Fri Jun 12 11:21:35 2015	(r284309)
+++ head/sys/vm/vm_glue.c	Fri Jun 12 11:32:20 2015	(r284310)
@@ -230,7 +230,7 @@ vsunlock(void *addr, size_t len)
 static vm_page_t
 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
 {
-	vm_page_t m, ma[1];
+	vm_page_t m;
 	vm_pindex_t pindex;
 	int rv;
 
@@ -238,11 +238,7 @@ vm_imgact_hold_page(vm_object_t object, 
 	pindex = OFF_TO_IDX(offset);
 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
 	if (m->valid != VM_PAGE_BITS_ALL) {
-		ma[0] = m;
-		rv = vm_pager_get_pages(object, ma, 1, 0);
-		m = vm_page_lookup(object, pindex);
-		if (m == NULL)
-			goto out;
+		rv = vm_pager_get_pages(object, &m, 1, 0);
 		if (rv != VM_PAGER_OK) {
 			vm_page_lock(m);
 			vm_page_free(m);
@@ -571,7 +567,7 @@ vm_thread_swapin(struct thread *td)
 {
 	vm_object_t ksobj;
 	vm_page_t ma[KSTACK_MAX_PAGES];
-	int i, j, k, pages, rv;
+	int i, j, pages, rv;
 
 	pages = td->td_kstack_pages;
 	ksobj = td->td_kstack_obj;
@@ -593,9 +589,12 @@ vm_thread_swapin(struct thread *td)
 			if (rv != VM_PAGER_OK)
 	panic("vm_thread_swapin: cannot get kstack for proc: %d",
 				    td->td_proc->p_pid);
+			/*
+			 * All pages in the array are in place, due to the
+			 * pager is always the swap pager, which doesn't
+			 * free or remove wired non-req pages from object.
+			 */
 			vm_object_pip_wakeup(ksobj);
-			for (k = i; k < j; k++)
-				ma[k] = vm_page_lookup(ksobj, k);
 			vm_page_xunbusy(ma[i]);
 		} else if (vm_page_xbusied(ma[i]))
 			vm_page_xunbusy(ma[i]);

Modified: head/sys/vm/vm_object.c
==============================================================================
--- head/sys/vm/vm_object.c	Fri Jun 12 11:21:35 2015	(r284309)
+++ head/sys/vm/vm_object.c	Fri Jun 12 11:32:20 2015	(r284310)
@@ -2046,7 +2046,7 @@ vm_object_page_cache(vm_object_t object,
 boolean_t
 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
 {
-	vm_page_t m, ma[1];
+	vm_page_t m;
 	vm_pindex_t pindex;
 	int rv;
 
@@ -2054,11 +2054,7 @@ vm_object_populate(vm_object_t object, v
 	for (pindex = start; pindex < end; pindex++) {
 		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
 		if (m->valid != VM_PAGE_BITS_ALL) {
-			ma[0] = m;
-			rv = vm_pager_get_pages(object, ma, 1, 0);
-			m = vm_page_lookup(object, pindex);
-			if (m == NULL)
-				break;
+			rv = vm_pager_get_pages(object, &m, 1, 0);
 			if (rv != VM_PAGER_OK) {
 				vm_page_lock(m);
 				vm_page_free(m);


More information about the svn-src-head mailing list