Tmpfs elimination of double-copy

Kostik Belousov kostikbel at gmail.com
Mon Jun 21 13:26:52 UTC 2010


Hi,
Below is the patch that eliminates second copy of the data kept by tmpfs
in case a file is mapped. Also, it removes potential deadlocks due to
tmpfs doing copyin/out while page is busy. It is possible that patch
also fixes known issue with sendfile(2) of tmpfs file, but I did not
verified this.

Patch essentially consists of three parts:
- move of vm_object' vnp_size from the type-discriminated union to the
  vm_object proper;
- making vm not choke when vm object held in the struct vnode' v_object
  is default or swap object instead of vnode object;
- use of the swap object that keeps data for tmpfs VREG file, also as
  v_object.

Peter Holm helped me with the patch, apparently we survive fsx and stress2.

diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
index adeabfb..0cfe0d9 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
@@ -339,7 +339,7 @@ again:
 
 			if (vm_page_sleep_if_busy(m, FALSE, "zfsmwb"))
 				goto again;
-			fsize = obj->un_pager.vnp.vnp_size;
+			fsize = obj->vnp_size;
 			vm_page_busy(m);
 			vm_page_lock_queues();
 			vm_page_undirty(m);
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index b6c5cfe..7297f5a 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -379,13 +379,17 @@ loop:
 		/* FALLTHROUGH */
 	case VLNK:
 		/* FALLTHROUGH */
-	case VREG:
-		/* FALLTHROUGH */
 	case VSOCK:
 		break;
 	case VFIFO:
 		vp->v_op = &tmpfs_fifoop_entries;
 		break;
+	case VREG:
+		VI_LOCK(vp);
+		KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs"));
+		vp->v_object = node->tn_reg.tn_aobj;
+		VI_UNLOCK(vp);
+		break;
 	case VDIR:
 		MPASS(node->tn_dir.tn_parent != NULL);
 		if (node->tn_dir.tn_parent == node)
@@ -396,7 +400,6 @@ loop:
 		panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type);
 	}
 
-	vnode_pager_setsize(vp, node->tn_size);
 	error = insmntque(vp, mp);
 	if (error)
 		vp = NULL;
@@ -849,11 +852,13 @@ tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
 int
 tmpfs_reg_resize(struct vnode *vp, off_t newsize)
 {
-	int error;
-	size_t newpages, oldpages;
 	struct tmpfs_mount *tmp;
 	struct tmpfs_node *node;
+	vm_object_t uobj;
+	vm_page_t m;
 	off_t oldsize;
+	size_t newpages, oldpages, zerolen;
+	int error;
 
 	MPASS(vp->v_type == VREG);
 	MPASS(newsize >= 0);
@@ -883,41 +888,38 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize)
 	TMPFS_UNLOCK(tmp);
 
 	node->tn_size = newsize;
-	vnode_pager_setsize(vp, newsize);
+	uobj = node->tn_reg.tn_aobj;
+	VM_OBJECT_LOCK(uobj);
 	if (newsize < oldsize) {
-		size_t zerolen = round_page(newsize) - newsize;
-		vm_object_t uobj = node->tn_reg.tn_aobj;
-		vm_page_t m;
-
 		/*
 		 * free "backing store"
 		 */
-		VM_OBJECT_LOCK(uobj);
 		if (newpages < oldpages) {
-			swap_pager_freespace(uobj,
-						newpages, oldpages - newpages);
-			vm_object_page_remove(uobj,
-				OFF_TO_IDX(newsize + PAGE_MASK), 0, FALSE);
+			swap_pager_freespace(uobj, newpages, oldpages -
+			    newpages);
+			vm_object_page_remove(uobj, OFF_TO_IDX(newsize +
+			    PAGE_MASK), 0, FALSE);
 		}
 
 		/*
 		 * zero out the truncated part of the last page.
 		 */
-
+		zerolen = round_page(newsize) - newsize;
 		if (zerolen > 0) {
 			m = vm_page_grab(uobj, OFF_TO_IDX(newsize),
 			    VM_ALLOC_NOBUSY | VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
 			pmap_zero_page_area(m, PAGE_SIZE - zerolen,
 				zerolen);
 		}
-		VM_OBJECT_UNLOCK(uobj);
-
 	}
+	uobj->size = newpages;
+	uobj->vnp_size = newsize;
+	VM_OBJECT_UNLOCK(uobj);
 
 	error = 0;
 
 out:
-	return error;
+	return (error);
 }
 
 /* --------------------------------------------------------------------- */
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index 88e0939..97d3cc7 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -433,7 +433,6 @@ tmpfs_setattr(struct vop_setattr_args *v)
 	return error;
 }
 
-/* --------------------------------------------------------------------- */
 static int
 tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
     vm_offset_t offset, size_t tlen, struct uio *uio)
@@ -449,12 +448,14 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
 		if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
 			error = vm_pager_get_pages(tobj, &m, 1, 0);
 			if (error != 0) {
+				vm_page_wakeup(m);
 				printf("tmpfs get pages from pager error [read]\n");
 				goto out;
 			}
 		} else
 			vm_page_zero_invalid(m, TRUE);
 	}
+	vm_page_wakeup(m);
 	VM_OBJECT_UNLOCK(tobj);
 	error = uiomove_fromphys(&m, offset, tlen, uio);
 	VM_OBJECT_LOCK(tobj);
@@ -462,124 +463,26 @@ out:
 	vm_page_lock(m);
 	vm_page_unwire(m, TRUE);
 	vm_page_unlock(m);
-	vm_page_wakeup(m);
 	vm_object_pip_subtract(tobj, 1);
 	VM_OBJECT_UNLOCK(tobj);
 
 	return (error);
 }
 
-static __inline int
-tmpfs_nocacheread_buf(vm_object_t tobj, vm_pindex_t idx,
-    vm_offset_t offset, size_t tlen, void *buf)
-{
-	struct uio uio;
-	struct iovec iov;
-
-	uio.uio_iovcnt = 1;
-	uio.uio_iov = &iov;
-	iov.iov_base = buf;
-	iov.iov_len = tlen;
-
-	uio.uio_offset = 0;
-	uio.uio_resid = tlen;
-	uio.uio_rw = UIO_READ;
-	uio.uio_segflg = UIO_SYSSPACE;
-	uio.uio_td = curthread;
-
-	return (tmpfs_nocacheread(tobj, idx, offset, tlen, &uio));
-}
-
-static int
-tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
-{
-	struct sf_buf	*sf;
-	vm_pindex_t	idx;
-	vm_page_t	m;
-	vm_offset_t	offset;
-	off_t		addr;
-	size_t		tlen;
-	char		*ma;
-	int		error;
-
-	addr = uio->uio_offset;
-	idx = OFF_TO_IDX(addr);
-	offset = addr & PAGE_MASK;
-	tlen = MIN(PAGE_SIZE - offset, len);
-
-	if ((vobj == NULL) ||
-	    (vobj->resident_page_count == 0 && vobj->cache == NULL))
-		goto nocache;
-
-	VM_OBJECT_LOCK(vobj);
-lookupvpg:
-	if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
-	    vm_page_is_valid(m, offset, tlen)) {
-		if ((m->oflags & VPO_BUSY) != 0) {
-			/*
-			 * Reference the page before unlocking and sleeping so
-			 * that the page daemon is less likely to reclaim it.  
-			 */
-			vm_page_lock_queues();
-			vm_page_flag_set(m, PG_REFERENCED);
-			vm_page_sleep(m, "tmfsmr");
-			goto lookupvpg;
-		}
-		vm_page_busy(m);
-		VM_OBJECT_UNLOCK(vobj);
-		error = uiomove_fromphys(&m, offset, tlen, uio);
-		VM_OBJECT_LOCK(vobj);
-		vm_page_wakeup(m);
-		VM_OBJECT_UNLOCK(vobj);
-		return	(error);
-	} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
-		if ((m->oflags & VPO_BUSY) != 0) {
-			/*
-			 * Reference the page before unlocking and sleeping so
-			 * that the page daemon is less likely to reclaim it.  
-			 */
-			vm_page_lock_queues();
-			vm_page_flag_set(m, PG_REFERENCED);
-			vm_page_sleep(m, "tmfsmr");
-			goto lookupvpg;
-		}
-		vm_page_busy(m);
-		VM_OBJECT_UNLOCK(vobj);
-		sched_pin();
-		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
-		ma = (char *)sf_buf_kva(sf);
-		error = tmpfs_nocacheread_buf(tobj, idx, offset, tlen,
-		    ma + offset);
-		if (error == 0) {
-			uio->uio_offset += tlen;
-			uio->uio_resid -= tlen;
-		}
-		sf_buf_free(sf);
-		sched_unpin();
-		VM_OBJECT_LOCK(vobj);
-		vm_page_wakeup(m);
-		VM_OBJECT_UNLOCK(vobj);
-		return	(error);
-	}
-	VM_OBJECT_UNLOCK(vobj);
-nocache:
-	error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);
-
-	return	(error);
-}
-
 static int
 tmpfs_read(struct vop_read_args *v)
 {
 	struct vnode *vp = v->a_vp;
 	struct uio *uio = v->a_uio;
-
 	struct tmpfs_node *node;
 	vm_object_t uobj;
 	size_t len;
 	int resid;
-
 	int error = 0;
+	vm_pindex_t	idx;
+	vm_offset_t	offset;
+	off_t		addr;
+	size_t		tlen;
 
 	node = VP_TO_TMPFS_NODE(vp);
 
@@ -603,7 +506,11 @@ tmpfs_read(struct vop_read_args *v)
 		len = MIN(node->tn_size - uio->uio_offset, resid);
 		if (len == 0)
 			break;
-		error = tmpfs_mappedread(vp->v_object, uobj, len, uio);
+		addr = uio->uio_offset;
+		idx = OFF_TO_IDX(addr);
+		offset = addr & PAGE_MASK;
+		tlen = MIN(PAGE_SIZE - offset, len);
+		error = tmpfs_nocacheread(uobj, idx, offset, tlen, uio);
 		if ((error != 0) || (resid == uio->uio_resid))
 			break;
 	}
@@ -616,10 +523,10 @@ out:
 /* --------------------------------------------------------------------- */
 
 static int
-tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
+tmpfs_mappedwrite(vm_object_t tobj, size_t len, struct uio *uio)
 {
 	vm_pindex_t	idx;
-	vm_page_t	vpg, tpg;
+	vm_page_t	tpg;
 	vm_offset_t	offset;
 	off_t		addr;
 	size_t		tlen;
@@ -632,37 +539,6 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
 	offset = addr & PAGE_MASK;
 	tlen = MIN(PAGE_SIZE - offset, len);
 
-	if ((vobj == NULL) ||
-	    (vobj->resident_page_count == 0 && vobj->cache == NULL)) {
-		vpg = NULL;
-		goto nocache;
-	}
-
-	VM_OBJECT_LOCK(vobj);
-lookupvpg:
-	if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
-	    vm_page_is_valid(vpg, offset, tlen)) {
-		if ((vpg->oflags & VPO_BUSY) != 0) {
-			/*
-			 * Reference the page before unlocking and sleeping so
-			 * that the page daemon is less likely to reclaim it.  
-			 */
-			vm_page_lock_queues();
-			vm_page_flag_set(vpg, PG_REFERENCED);
-			vm_page_sleep(vpg, "tmfsmw");
-			goto lookupvpg;
-		}
-		vm_page_busy(vpg);
-		vm_page_undirty(vpg);
-		VM_OBJECT_UNLOCK(vobj);
-		error = uiomove_fromphys(&vpg, offset, tlen, uio);
-	} else {
-		if (__predict_false(vobj->cache != NULL))
-			vm_page_cache_free(vobj, idx, idx + 1);
-		VM_OBJECT_UNLOCK(vobj);
-		vpg = NULL;
-	}
-nocache:
 	VM_OBJECT_LOCK(tobj);
 	vm_object_pip_add(tobj, 1);
 	tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
@@ -671,23 +547,18 @@ nocache:
 		if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
 			error = vm_pager_get_pages(tobj, &tpg, 1, 0);
 			if (error != 0) {
+				vm_page_wakeup(tpg);
 				printf("tmpfs get pages from pager error [write]\n");
 				goto out;
 			}
 		} else
 			vm_page_zero_invalid(tpg, TRUE);
 	}
+	vm_page_wakeup(tpg);
 	VM_OBJECT_UNLOCK(tobj);
-	if (vpg == NULL)
-		error = uiomove_fromphys(&tpg, offset, tlen, uio);
-	else {
-		KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
-		pmap_copy_page(vpg, tpg);
-	}
+	error = uiomove_fromphys(&tpg, offset, tlen, uio);
 	VM_OBJECT_LOCK(tobj);
 out:
-	if (vobj != NULL)
-		VM_OBJECT_LOCK(vobj);
 	if (error == 0) {
 		KASSERT(tpg->valid == VM_PAGE_BITS_ALL,
 		    ("parts of tpg invalid"));
@@ -696,11 +567,6 @@ out:
 	vm_page_lock(tpg);
 	vm_page_unwire(tpg, TRUE);
 	vm_page_unlock(tpg);
-	vm_page_wakeup(tpg);
-	if (vpg != NULL)
-		vm_page_wakeup(vpg);
-	if (vobj != NULL)
-		VM_OBJECT_UNLOCK(vobj);
 	vm_object_pip_subtract(tobj, 1);
 	VM_OBJECT_UNLOCK(tobj);
 
@@ -759,7 +625,7 @@ tmpfs_write(struct vop_write_args *v)
 		len = MIN(node->tn_size - uio->uio_offset, resid);
 		if (len == 0)
 			break;
-		error = tmpfs_mappedwrite(vp->v_object, uobj, len, uio);
+		error = tmpfs_mappedwrite(uobj, len, uio);
 		if ((error != 0) || (resid == uio->uio_resid))
 			break;
 	}
@@ -1425,7 +1291,7 @@ tmpfs_reclaim(struct vop_reclaim_args *v)
 	node = VP_TO_TMPFS_NODE(vp);
 	tmp = VFS_TO_TMPFS(vp->v_mount);
 
-	vnode_destroy_vobject(vp);
+	vp->v_object = NULL;
 	cache_purge(vp);
 
 	TMPFS_NODE_LOCK(node);
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index c48e0f5..754092f 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -447,7 +447,7 @@ __elfN(load_section)(struct vmspace *vmspace,
 	 * While I'm here, might as well check for something else that
 	 * is invalid: filsz cannot be greater than memsz.
 	 */
-	if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
+	if ((off_t)filsz + offset > object->vnp_size ||
 	    filsz > memsz) {
 		uprintf("elf_load_section: truncated ELF file\n");
 		return (ENOEXEC);
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index adcb852..ee80b3e 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -2033,12 +2033,12 @@ retry_space:
 			 */
 			pgoff = (vm_offset_t)(off & PAGE_MASK);
 			xfsize = omin(PAGE_SIZE - pgoff,
-			    obj->un_pager.vnp.vnp_size - uap->offset -
+			    obj->vnp_size - uap->offset -
 			    fsbytes - loopbytes);
 			if (uap->nbytes)
 				rem = (uap->nbytes - fsbytes - loopbytes);
 			else
-				rem = obj->un_pager.vnp.vnp_size -
+				rem = obj->vnp_size -
 				    uap->offset - fsbytes - loopbytes;
 			xfsize = omin(rem, xfsize);
 			xfsize = omin(space - loopbytes, xfsize);
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 3d72123..ff06892 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -1222,7 +1222,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
 			error = EINVAL;
 			goto done;
 		}
-		if (obj->handle != vp) {
+		if (obj->type == OBJT_VNODE && obj->handle != vp) {
 			vput(vp);
 			vp = (struct vnode*)obj->handle;
 			vget(vp, LK_SHARED, td);
@@ -1261,7 +1261,14 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
 	objsize = round_page(va.va_size);
 	if (va.va_nlink == 0)
 		flags |= MAP_NOSYNC;
-	obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred);
+	if (obj->type == OBJT_VNODE)
+		obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
+		    td->td_ucred);
+	else {
+		KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
+		    ("wrong object type"));
+		vm_object_reference(obj);
+	}
 	if (obj == NULL) {
 		error = ENOMEM;
 		goto done;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 6a9f129..0120d32 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -106,15 +106,6 @@ struct vm_object {
 	void *handle;
 	union {
 		/*
-		 * VNode pager
-		 *
-		 *	vnp_size - current size of file
-		 */
-		struct {
-			off_t vnp_size;
-		} vnp;
-
-		/*
 		 * Device pager
 		 *
 		 *	devp_pglist - list of allocated pages
@@ -145,6 +136,7 @@ struct vm_object {
 	} un_pager;
 	struct uidinfo *uip;
 	vm_ooffset_t charge;
+	off_t vnp_size; /* current size of file for vnode pager */
 };
 
 /*
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index f497d41..a1cfc01 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -212,8 +212,7 @@ retry:
 		msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0);
 	}
 
-	if (vp->v_usecount == 0)
-		panic("vnode_pager_alloc: no vnode reference");
+	KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference"));
 
 	if (object == NULL) {
 		/*
@@ -221,7 +220,7 @@ retry:
 		 */
 		object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
 
-		object->un_pager.vnp.vnp_size = size;
+		object->vnp_size = size;
 
 		object->handle = handle;
 		VI_LOCK(vp);
@@ -301,7 +300,7 @@ vnode_pager_haspage(object, pindex, before, after)
 	 * If the offset is beyond end of file we do
 	 * not have the page.
 	 */
-	if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
+	if (IDX_TO_OFF(pindex) >= object->vnp_size)
 		return FALSE;
 
 	bsize = vp->v_mount->mnt_stat.f_iosize;
@@ -333,9 +332,8 @@ vnode_pager_haspage(object, pindex, before, after)
 			*after *= pagesperblock;
 			numafter = pagesperblock - (poff + 1);
 			if (IDX_TO_OFF(pindex + numafter) >
-			    object->un_pager.vnp.vnp_size) {
-				numafter =
-		    		    OFF_TO_IDX(object->un_pager.vnp.vnp_size) -
+			    object->vnp_size) {
+				numafter = OFF_TO_IDX(object->vnp_size) -
 				    pindex;
 			}
 			*after += numafter;
@@ -369,11 +367,11 @@ vnode_pager_setsize(vp, nsize)
 	vm_page_t m;
 	vm_pindex_t nobjsize;
 
-	if ((object = vp->v_object) == NULL)
+	if ((object = vp->v_object) == NULL || object->type != OBJT_VNODE)
 		return;
 /* 	ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
 	VM_OBJECT_LOCK(object);
-	if (nsize == object->un_pager.vnp.vnp_size) {
+	if (nsize == object->vnp_size) {
 		/*
 		 * Hasn't changed size
 		 */
@@ -381,7 +379,7 @@ vnode_pager_setsize(vp, nsize)
 		return;
 	}
 	nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
-	if (nsize < object->un_pager.vnp.vnp_size) {
+	if (nsize < object->vnp_size) {
 		/*
 		 * File has shrunk. Toss any cached pages beyond the new EOF.
 		 */
@@ -436,7 +434,7 @@ vnode_pager_setsize(vp, nsize)
 			    nobjsize);
 		}
 	}
-	object->un_pager.vnp.vnp_size = nsize;
+	object->vnp_size = nsize;
 	object->size = nobjsize;
 	VM_OBJECT_UNLOCK(object);
 }
@@ -513,7 +511,7 @@ vnode_pager_input_smlfs(object, m)
 			continue;
 
 		address = IDX_TO_OFF(m->pindex) + i * bsize;
-		if (address >= object->un_pager.vnp.vnp_size) {
+		if (address >= object->vnp_size) {
 			fileaddr = -1;
 		} else {
 			error = vnode_pager_addr(vp, address, &fileaddr, NULL);
@@ -590,12 +588,12 @@ vnode_pager_input_old(object, m)
 	/*
 	 * Return failure if beyond current EOF
 	 */
-	if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
+	if (IDX_TO_OFF(m->pindex) >= object->vnp_size) {
 		return VM_PAGER_BAD;
 	} else {
 		size = PAGE_SIZE;
-		if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
-			size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
+		if (IDX_TO_OFF(m->pindex) + size > object->vnp_size)
+			size = object->vnp_size - IDX_TO_OFF(m->pindex);
 		vp = object->handle;
 		VM_OBJECT_UNLOCK(object);
 
@@ -815,13 +813,13 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
 		}
 		if (firstaddr == -1) {
 			VM_OBJECT_LOCK(object);
-			if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
+			if (i == reqpage && foff < object->vnp_size) {
 				panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
 				    (intmax_t)firstaddr, (uintmax_t)(foff >> 32),
 				    (uintmax_t)foff,
 				    (uintmax_t)
-				    (object->un_pager.vnp.vnp_size >> 32),
-				    (uintmax_t)object->un_pager.vnp.vnp_size);
+				    (object->vnp_size >> 32),
+				    (uintmax_t)object->vnp_size);
 			}
 			vm_page_lock(m[i]);
 			vm_page_free(m[i]);
@@ -876,8 +874,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
 	 */
 	size = count * PAGE_SIZE;
 	KASSERT(count > 0, ("zero count"));
-	if ((foff + size) > object->un_pager.vnp.vnp_size)
-		size = object->un_pager.vnp.vnp_size - foff;
+	if ((foff + size) > object->vnp_size)
+		size = object->vnp_size - foff;
 	KASSERT(size > 0, ("zero size"));
 
 	/*
@@ -944,7 +942,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
 		nextoff = tfoff + PAGE_SIZE;
 		mt = m[i];
 
-		if (nextoff <= object->un_pager.vnp.vnp_size) {
+		if (nextoff <= object->vnp_size) {
 			/*
 			 * Read filled up entire page.
 			 */
@@ -964,9 +962,9 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
 			 * read.
 			 */
 			vm_page_set_valid(mt, 0,
-			    object->un_pager.vnp.vnp_size - tfoff);
+			    object->vnp_size - tfoff);
 			KASSERT((mt->dirty & vm_page_bits(0,
-			    object->un_pager.vnp.vnp_size - tfoff)) == 0,
+			    object->vnp_size - tfoff)) == 0,
 			    ("vnode_pager_generic_getpages: page %p is dirty",
 			    mt));
 		}
@@ -1116,11 +1114,11 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
 	 * this will screw up bogus page replacement.
 	 */
 	VM_OBJECT_LOCK(object);
-	if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
-		if (object->un_pager.vnp.vnp_size > poffset) {
+	if (maxsize + poffset > object->vnp_size) {
+		if (object->vnp_size > poffset) {
 			int pgoff;
 
-			maxsize = object->un_pager.vnp.vnp_size - poffset;
+			maxsize = object->vnp_size - poffset;
 			ncount = btoc(maxsize);
 			if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
 				/*
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 196 bytes
Desc: not available
Url : http://lists.freebsd.org/pipermail/freebsd-fs/attachments/20100621/c50c1818/attachment.pgp


More information about the freebsd-fs mailing list