svn commit: r346286 - stable/11/sys/fs/tmpfs

Konstantin Belousov kib at FreeBSD.org
Tue Apr 16 17:43:17 UTC 2019


Author: kib
Date: Tue Apr 16 17:43:14 2019
New Revision: 346286
URL: https://svnweb.freebsd.org/changeset/base/346286

Log:
  MFC r345425, r345514, r345799, r345800, r345803, r346157:
  Enable tmpfs rw->ro remounts.

Modified:
  stable/11/sys/fs/tmpfs/tmpfs.h
  stable/11/sys/fs/tmpfs/tmpfs_fifoops.c
  stable/11/sys/fs/tmpfs/tmpfs_subr.c
  stable/11/sys/fs/tmpfs/tmpfs_vfsops.c
  stable/11/sys/fs/tmpfs/tmpfs_vnops.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/fs/tmpfs/tmpfs.h
==============================================================================
--- stable/11/sys/fs/tmpfs/tmpfs.h	Tue Apr 16 17:33:08 2019	(r346285)
+++ stable/11/sys/fs/tmpfs/tmpfs.h	Tue Apr 16 17:43:14 2019	(r346286)
@@ -330,6 +330,11 @@ LIST_HEAD(tmpfs_node_list, tmpfs_node);
  */
 struct tmpfs_mount {
 	/*
+	 * Original value of the "size" parameter, for reference purposes,
+	 * mostly.
+	 */
+	off_t			tm_size_max;
+	/*
 	 * Maximum number of memory pages available for use by the file
 	 * system, set during mount time.  This variable must never be
 	 * used directly as it may be bigger than the current amount of
@@ -437,8 +442,8 @@ void	tmpfs_dir_destroy(struct tmpfs_mount *, struct tm
 struct tmpfs_dirent *	tmpfs_dir_lookup(struct tmpfs_node *node,
 			    struct tmpfs_node *f,
 			    struct componentname *cnp);
-int	tmpfs_dir_getdents(struct tmpfs_node *, struct uio *, int,
-	    u_long *, int *);
+int	tmpfs_dir_getdents(struct tmpfs_mount *, struct tmpfs_node *,
+	    struct uio *, int, u_long *, int *);
 int	tmpfs_dir_whiteout_add(struct vnode *, struct componentname *);
 void	tmpfs_dir_whiteout_remove(struct vnode *, struct componentname *);
 int	tmpfs_reg_resize(struct vnode *, off_t, boolean_t);
@@ -452,7 +457,8 @@ int	tmpfs_chtimes(struct vnode *, struct vattr *, stru
 void	tmpfs_itimes(struct vnode *, const struct timespec *,
 	    const struct timespec *);
 
-void	tmpfs_set_status(struct tmpfs_node *node, int status);
+void	tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node,
+	    int status);
 void	tmpfs_update(struct vnode *);
 int	tmpfs_truncate(struct vnode *, off_t);
 struct tmpfs_dirent *tmpfs_dir_first(struct tmpfs_node *dnode,

Modified: stable/11/sys/fs/tmpfs/tmpfs_fifoops.c
==============================================================================
--- stable/11/sys/fs/tmpfs/tmpfs_fifoops.c	Tue Apr 16 17:33:08 2019	(r346285)
+++ stable/11/sys/fs/tmpfs/tmpfs_fifoops.c	Tue Apr 16 17:43:14 2019	(r346286)
@@ -54,7 +54,8 @@ tmpfs_fifo_close(struct vop_close_args *v)
 	struct tmpfs_node *node;
 
 	node = VP_TO_TMPFS_NODE(v->a_vp);
-	tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
+	tmpfs_set_status(VFS_TO_TMPFS(v->a_vp->v_mount), node,
+	    TMPFS_NODE_ACCESSED);
 	tmpfs_update(v->a_vp);
 	return (fifo_specops.vop_close(v));
 }

Modified: stable/11/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- stable/11/sys/fs/tmpfs/tmpfs_subr.c	Tue Apr 16 17:33:08 2019	(r346285)
+++ stable/11/sys/fs/tmpfs/tmpfs_subr.c	Tue Apr 16 17:43:14 2019	(r346286)
@@ -213,6 +213,8 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount 
 		 */
 		return (EBUSY);
 	}
+	if ((mp->mnt_kern_flag & MNT_RDONLY) != 0)
+		return (EROFS);
 
 	nnode = (struct tmpfs_node *)uma_zalloc_arg(tmp->tm_node_pool, tmp,
 	    M_WAITOK);
@@ -1104,7 +1106,8 @@ tmpfs_dir_destroy(struct tmpfs_mount *tmp, struct tmpf
  * error happens.
  */
 static int
-tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
+tmpfs_dir_getdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
+    struct uio *uio)
 {
 	int error;
 	struct dirent dent;
@@ -1124,7 +1127,7 @@ tmpfs_dir_getdotdent(struct tmpfs_node *node, struct u
 	else
 		error = uiomove(&dent, dent.d_reclen, uio);
 
-	tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
+	tmpfs_set_status(tm, node, TMPFS_NODE_ACCESSED);
 
 	return (error);
 }
@@ -1137,7 +1140,8 @@ tmpfs_dir_getdotdent(struct tmpfs_node *node, struct u
  * error happens.
  */
 static int
-tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
+tmpfs_dir_getdotdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node,
+    struct uio *uio)
 {
 	int error;
 	struct dirent dent;
@@ -1168,7 +1172,7 @@ tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struc
 	else
 		error = uiomove(&dent, dent.d_reclen, uio);
 
-	tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
+	tmpfs_set_status(tm, node, TMPFS_NODE_ACCESSED);
 
 	return (error);
 }
@@ -1181,8 +1185,8 @@ tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struc
  * error code if another error happens.
  */
 int
-tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, int maxcookies,
-    u_long *cookies, int *ncookies)
+tmpfs_dir_getdents(struct tmpfs_mount *tm, struct tmpfs_node *node,
+    struct uio *uio, int maxcookies, u_long *cookies, int *ncookies)
 {
 	struct tmpfs_dir_cursor dc;
 	struct tmpfs_dirent *de;
@@ -1203,7 +1207,7 @@ tmpfs_dir_getdents(struct tmpfs_node *node, struct uio
 	 */
 	switch (uio->uio_offset) {
 	case TMPFS_DIRCOOKIE_DOT:
-		error = tmpfs_dir_getdotdent(node, uio);
+		error = tmpfs_dir_getdotdent(tm, node, uio);
 		if (error != 0)
 			return (error);
 		uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
@@ -1211,7 +1215,7 @@ tmpfs_dir_getdents(struct tmpfs_node *node, struct uio
 			cookies[(*ncookies)++] = off = uio->uio_offset;
 		/* FALLTHROUGH */
 	case TMPFS_DIRCOOKIE_DOTDOT:
-		error = tmpfs_dir_getdotdotdent(node, uio);
+		error = tmpfs_dir_getdotdotdent(tm, node, uio);
 		if (error != 0)
 			return (error);
 		de = tmpfs_dir_first(node, &dc);
@@ -1313,7 +1317,7 @@ tmpfs_dir_getdents(struct tmpfs_node *node, struct uio
 	node->tn_dir.tn_readdir_lastn = off;
 	node->tn_dir.tn_readdir_lastp = de;
 
-	tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
+	tmpfs_set_status(tm, node, TMPFS_NODE_ACCESSED);
 	return error;
 }
 
@@ -1760,10 +1764,10 @@ tmpfs_chtimes(struct vnode *vp, struct vattr *vap,
 }
 
 void
-tmpfs_set_status(struct tmpfs_node *node, int status)
+tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, int status)
 {
 
-	if ((node->tn_status & status) == status)
+	if ((node->tn_status & status) == status || tm->tm_ronly)
 		return;
 	TMPFS_NODE_LOCK(node);
 	node->tn_status |= status;

Modified: stable/11/sys/fs/tmpfs/tmpfs_vfsops.c
==============================================================================
--- stable/11/sys/fs/tmpfs/tmpfs_vfsops.c	Tue Apr 16 17:33:08 2019	(r346285)
+++ stable/11/sys/fs/tmpfs/tmpfs_vfsops.c	Tue Apr 16 17:43:14 2019	(r346286)
@@ -52,9 +52,14 @@ __FBSDID("$FreeBSD$");
 #include <sys/kernel.h>
 #include <sys/rwlock.h>
 #include <sys/stat.h>
+#include <sys/sx.h>
 #include <sys/sysctl.h>
 
 #include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_param.h>
 
@@ -82,7 +87,7 @@ static const char *tmpfs_opts[] = {
 };
 
 static const char *tmpfs_updateopts[] = {
-	"from", "export", NULL
+	"from", "export", "size", NULL
 };
 
 static int
@@ -128,7 +133,228 @@ tmpfs_node_fini(void *mem, int size)
 	mtx_destroy(&node->tn_interlock);
 }
 
+/*
+ * Handle updates of time from writes to mmaped regions.  Use
+ * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since
+ * unmap of the tmpfs-backed vnode does not call vinactive(), due to
+ * vm object type is OBJT_SWAP.
+ * If lazy, only handle delayed update of mtime due to the writes to
+ * mapped files.
+ */
+static void
+tmpfs_update_mtime(struct mount *mp, bool lazy)
+{
+	struct vnode *vp, *mvp;
+	struct vm_object *obj;
+
+	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
+		if (vp->v_type != VREG) {
+			VI_UNLOCK(vp);
+			continue;
+		}
+		obj = vp->v_object;
+		KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
+		    (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
+
+		/*
+		 * In lazy case, do unlocked read, avoid taking vnode
+		 * lock if not needed.  Lost update will be handled on
+		 * the next call.
+		 * For non-lazy case, we must flush all pending
+		 * metadata changes now.
+		 */
+		if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) {
+			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK,
+			    curthread) != 0)
+				continue;
+			tmpfs_check_mtime(vp);
+			if (!lazy)
+				tmpfs_update(vp);
+			vput(vp);
+		} else {
+			VI_UNLOCK(vp);
+			continue;
+		}
+	}
+}
+
+struct tmpfs_check_rw_maps_arg {
+	bool found;
+};
+
+static bool
+tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
+    vm_map_entry_t entry __unused, void *arg)
+{
+	struct tmpfs_check_rw_maps_arg *a;
+
+	a = arg;
+	a->found = true;
+	return (true);
+}
+
+/*
+ * Revoke write permissions from all mappings of regular files
+ * belonging to the specified tmpfs mount.
+ */
+static bool
+tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
+    vm_map_entry_t entry, void *arg __unused)
+{
+
+	/*
+	 * XXXKIB: might be invalidate the mapping
+	 * instead ?  The process is not going to be
+	 * happy in any case.
+	 */
+	entry->max_protection &= ~VM_PROT_WRITE;
+	if ((entry->protection & VM_PROT_WRITE) != 0) {
+		entry->protection &= ~VM_PROT_WRITE;
+		pmap_protect(map->pmap, entry->start, entry->end,
+		    entry->protection);
+	}
+	return (false);
+}
+
+static void
+tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
+    vm_map_entry_t, void *), void *cb_arg)
+{
+	struct proc *p;
+	struct vmspace *vm;
+	vm_map_t map;
+	vm_map_entry_t entry;
+	vm_object_t object;
+	struct vnode *vp;
+	int gen;
+	bool terminate;
+
+	terminate = false;
+	sx_slock(&allproc_lock);
+again:
+	gen = allproc_gen;
+	FOREACH_PROC_IN_SYSTEM(p) {
+		PROC_LOCK(p);
+		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
+		    P_SYSTEM | P_WEXIT)) != 0) {
+			PROC_UNLOCK(p);
+			continue;
+		}
+		vm = vmspace_acquire_ref(p);
+		_PHOLD_LITE(p);
+		PROC_UNLOCK(p);
+		if (vm == NULL) {
+			PRELE(p);
+			continue;
+		}
+		sx_sunlock(&allproc_lock);
+		map = &vm->vm_map;
+
+		vm_map_lock(map);
+		if (map->busy)
+			vm_map_wait_busy(map);
+		for (entry = map->header.next; entry != &map->header;
+		    entry = entry->next) {
+			if ((entry->eflags & (MAP_ENTRY_GUARD |
+			    MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
+			    (entry->max_protection & VM_PROT_WRITE) == 0)
+				continue;
+			object = entry->object.vm_object;
+			if (object == NULL || object->type != OBJT_SWAP ||
+			    (object->flags & OBJ_TMPFS_NODE) == 0)
+				continue;
+			/*
+			 * No need to dig into shadow chain, mapping
+			 * of the object not at top is readonly.
+			 */
+
+			VM_OBJECT_RLOCK(object);
+			if (object->type == OBJT_DEAD) {
+				VM_OBJECT_RUNLOCK(object);
+				continue;
+			}
+			MPASS(object->ref_count > 1);
+			if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) !=
+			    (OBJ_TMPFS_NODE | OBJ_TMPFS)) {
+				VM_OBJECT_RUNLOCK(object);
+				continue;
+			}
+			vp = object->un_pager.swp.swp_tmpfs;
+			if (vp->v_mount != mp) {
+				VM_OBJECT_RUNLOCK(object);
+				continue;
+			}
+
+			terminate = cb(mp, map, entry, cb_arg);
+			VM_OBJECT_RUNLOCK(object);
+			if (terminate)
+				break;
+		}
+		vm_map_unlock(map);
+
+		vmspace_free(vm);
+		sx_slock(&allproc_lock);
+		PRELE(p);
+		if (terminate)
+			break;
+	}
+	if (!terminate && gen != allproc_gen)
+		goto again;
+	sx_sunlock(&allproc_lock);
+}
+
+static bool
+tmpfs_check_rw_maps(struct mount *mp)
+{
+	struct tmpfs_check_rw_maps_arg ca;
+
+	ca.found = false;
+	tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
+	return (ca.found);
+}
+
 static int
+tmpfs_rw_to_ro(struct mount *mp)
+{
+	int error, flags;
+	bool forced;
+
+	forced = (mp->mnt_flag & MNT_FORCE) != 0;
+	flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
+
+	if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
+		return (error);
+	error = vfs_write_suspend_umnt(mp);
+	if (error != 0)
+		return (error);
+	if (!forced && tmpfs_check_rw_maps(mp)) {
+		error = EBUSY;
+		goto out;
+	}
+	VFS_TO_TMPFS(mp)->tm_ronly = 1;
+	MNT_ILOCK(mp);
+	mp->mnt_flag |= MNT_RDONLY;
+	MNT_IUNLOCK(mp);
+	for (;;) {
+		tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
+		tmpfs_update_mtime(mp, false);
+		error = vflush(mp, 0, flags, curthread);
+		if (error != 0) {
+			VFS_TO_TMPFS(mp)->tm_ronly = 0;
+			MNT_ILOCK(mp);
+			mp->mnt_flag &= ~MNT_RDONLY;
+			MNT_IUNLOCK(mp);
+			goto out;
+		}
+		if (!tmpfs_check_rw_maps(mp))
+			break;
+	}
+out:
+	vfs_write_resume(mp, 0);
+	return (error);
+}
+
+static int
 tmpfs_mount(struct mount *mp)
 {
 	const size_t nodes_per_page = howmany(PAGE_SIZE,
@@ -159,9 +385,29 @@ tmpfs_mount(struct mount *mp)
 		/* Only support update mounts for certain options. */
 		if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
 			return (EOPNOTSUPP);
-		if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) !=
-		    ((struct tmpfs_mount *)mp->mnt_data)->tm_ronly)
-			return (EOPNOTSUPP);
+		if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
+			/*
+			 * On-the-fly resizing is not supported (yet). We still
+			 * need to have "size" listed as "supported", otherwise
+			 * trying to update fs that is listed in fstab with size
+			 * parameter, say trying to change rw to ro or vice
+			 * versa, would cause vfs_filteropt() to bail.
+			 */
+			if (size_max != VFS_TO_TMPFS(mp)->tm_size_max)
+				return (EOPNOTSUPP);
+		}
+		if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
+		    !(VFS_TO_TMPFS(mp)->tm_ronly)) {
+			/* RW -> RO */
+			return (tmpfs_rw_to_ro(mp));
+		} else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
+		    VFS_TO_TMPFS(mp)->tm_ronly) {
+			/* RO -> RW */
+			VFS_TO_TMPFS(mp)->tm_ronly = 0;
+			MNT_ILOCK(mp);
+			mp->mnt_flag &= ~MNT_RDONLY;
+			MNT_IUNLOCK(mp);
+		}
 		return (0);
 	}
 
@@ -227,6 +473,7 @@ tmpfs_mount(struct mount *mp)
 	tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
 	LIST_INIT(&tmp->tm_nodes_used);
 
+	tmp->tm_size_max = size_max;
 	tmp->tm_pages_max = pages;
 	tmp->tm_pages_used = 0;
 	tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->tm_allnode_lock);
@@ -433,45 +680,13 @@ tmpfs_statfs(struct mount *mp, struct statfs *sbp)
 static int
 tmpfs_sync(struct mount *mp, int waitfor)
 {
-	struct vnode *vp, *mvp;
-	struct vm_object *obj;
 
 	if (waitfor == MNT_SUSPEND) {
 		MNT_ILOCK(mp);
 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
 		MNT_IUNLOCK(mp);
 	} else if (waitfor == MNT_LAZY) {
-		/*
-		 * Handle lazy updates of mtime from writes to mmaped
-		 * regions.  Use MNT_VNODE_FOREACH_ALL instead of
-		 * MNT_VNODE_FOREACH_ACTIVE, since unmap of the
-		 * tmpfs-backed vnode does not call vinactive(), due
-		 * to vm object type is OBJT_SWAP.
-		 */
-		MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
-			if (vp->v_type != VREG) {
-				VI_UNLOCK(vp);
-				continue;
-			}
-			obj = vp->v_object;
-			KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
-			    (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
-
-			/*
-			 * Unlocked read, avoid taking vnode lock if
-			 * not needed.  Lost update will be handled on
-			 * the next call.
-			 */
-			if ((obj->flags & OBJ_TMPFS_DIRTY) == 0) {
-				VI_UNLOCK(vp);
-				continue;
-			}
-			if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
-			    curthread) != 0)
-				continue;
-			tmpfs_check_mtime(vp);
-			vput(vp);
-		}
+		tmpfs_update_mtime(mp, true);
 	}
 	return (0);
 }

Modified: stable/11/sys/fs/tmpfs/tmpfs_vnops.c
==============================================================================
--- stable/11/sys/fs/tmpfs/tmpfs_vnops.c	Tue Apr 16 17:33:08 2019	(r346285)
+++ stable/11/sys/fs/tmpfs/tmpfs_vnops.c	Tue Apr 16 17:43:14 2019	(r346286)
@@ -475,7 +475,7 @@ tmpfs_read(struct vop_read_args *v)
 	if (uio->uio_offset < 0)
 		return (EINVAL);
 	node = VP_TO_TMPFS_NODE(vp);
-	tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
+	tmpfs_set_status(VFS_TO_TMPFS(vp->v_mount), node, TMPFS_NODE_ACCESSED);
 	return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
 }
 
@@ -1182,25 +1182,30 @@ tmpfs_symlink(struct vop_symlink_args *v)
 }
 
 static int
-tmpfs_readdir(struct vop_readdir_args *v)
+tmpfs_readdir(struct vop_readdir_args *va)
 {
-	struct vnode *vp = v->a_vp;
-	struct uio *uio = v->a_uio;
-	int *eofflag = v->a_eofflag;
-	u_long **cookies = v->a_cookies;
-	int *ncookies = v->a_ncookies;
-
-	int error;
-	ssize_t startresid;
-	int maxcookies;
+	struct vnode *vp;
+	struct uio *uio;
+	struct tmpfs_mount *tm;
 	struct tmpfs_node *node;
+	u_long **cookies;
+	int *eofflag, *ncookies;
+	ssize_t startresid;
+	int error, maxcookies;
 
+	vp = va->a_vp;
+	uio = va->a_uio;
+	eofflag = va->a_eofflag;
+	cookies = va->a_cookies;
+	ncookies = va->a_ncookies;
+
 	/* This operation only makes sense on directory nodes. */
 	if (vp->v_type != VDIR)
 		return ENOTDIR;
 
 	maxcookies = 0;
 	node = VP_TO_TMPFS_DIR(vp);
+	tm = VFS_TO_TMPFS(vp->v_mount);
 
 	startresid = uio->uio_resid;
 
@@ -1214,9 +1219,9 @@ tmpfs_readdir(struct vop_readdir_args *v)
 	}
 
 	if (cookies == NULL)
-		error = tmpfs_dir_getdents(node, uio, 0, NULL, NULL);
+		error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL);
 	else
-		error = tmpfs_dir_getdents(node, uio, maxcookies, *cookies,
+		error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies,
 		    ncookies);
 
 	/* Buffer was filled without hitting EOF. */
@@ -1252,7 +1257,7 @@ tmpfs_readlink(struct vop_readlink_args *v)
 
 	error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid),
 	    uio);
-	tmpfs_set_status(node, TMPFS_NODE_ACCESSED);
+	tmpfs_set_status(VFS_TO_TMPFS(vp->v_mount), node, TMPFS_NODE_ACCESSED);
 
 	return (error);
 }


More information about the svn-src-all mailing list