svn commit: r355407 - head/sys/fs/tmpfs
Konstantin Belousov
kostikbel at gmail.com
Thu Dec 5 13:35:53 UTC 2019
On Thu, Dec 05, 2019 at 02:38:07PM +1100, Kubilay Kocak wrote:
> On 5/12/2019 11:03 am, Konstantin Belousov wrote:
> > Author: kib
> > Date: Thu Dec 5 00:03:17 2019
> > New Revision: 355407
> > URL: https://svnweb.freebsd.org/changeset/base/355407
>
> Could you elaborate on the why/rationale?
>
> Is there memory wastage/duplication, bug(s), performance or
> development/maintenance benefit?
Each mount/unmount of tmpfs created and destroyed two zones, as it is
easy to see from the patch.
>
> The review summary doesnt appear to include this information either
>
> > Log:
> > Stop using per-mount tmpfs zones.
> >
> > Requested and reviewed by: jeff
> > Sponsored by: The FreeBSD Foundation
> > MFC after: 1 week
> > Differential revision: https://reviews.freebsd.org/D22643
> >
> > Modified:
> > head/sys/fs/tmpfs/tmpfs.h
> > head/sys/fs/tmpfs/tmpfs_subr.c
> > head/sys/fs/tmpfs/tmpfs_vfsops.c
> >
> > Modified: head/sys/fs/tmpfs/tmpfs.h
> > ==============================================================================
> > --- head/sys/fs/tmpfs/tmpfs.h Wed Dec 4 23:24:40 2019 (r355406)
> > +++ head/sys/fs/tmpfs/tmpfs.h Thu Dec 5 00:03:17 2019 (r355407)
> > @@ -378,10 +378,6 @@ struct tmpfs_mount {
> > /* All node lock to protect the node list and tmp_pages_used. */
> > struct mtx tm_allnode_lock;
> >
> > - /* Zones used to store file system meta data, per tmpfs mount. */
> > - uma_zone_t tm_dirent_pool;
> > - uma_zone_t tm_node_pool;
> > -
> > /* Read-only status. */
> > bool tm_ronly;
> > /* Do not use namecache. */
> > @@ -493,8 +489,9 @@ struct tmpfs_dirent *tmpfs_dir_next(struct tmpfs_node
> > #endif
> >
> > size_t tmpfs_mem_avail(void);
> > -
> > size_t tmpfs_pages_used(struct tmpfs_mount *tmp);
> > +void tmpfs_subr_init(void);
> > +void tmpfs_subr_uninit(void);
> >
> > #endif
> >
> >
> > Modified: head/sys/fs/tmpfs/tmpfs_subr.c
> > ==============================================================================
> > --- head/sys/fs/tmpfs/tmpfs_subr.c Wed Dec 4 23:24:40 2019 (r355406)
> > +++ head/sys/fs/tmpfs/tmpfs_subr.c Thu Dec 5 00:03:17 2019 (r355407)
> > @@ -72,7 +72,74 @@ SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "tmp
> >
> > static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
> >
> > +static uma_zone_t tmpfs_dirent_pool;
> > +static uma_zone_t tmpfs_node_pool;
> > +
> > static int
> > +tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
> > +{
> > + struct tmpfs_node *node;
> > +
> > + node = mem;
> > + node->tn_gen++;
> > + node->tn_size = 0;
> > + node->tn_status = 0;
> > + node->tn_flags = 0;
> > + node->tn_links = 0;
> > + node->tn_vnode = NULL;
> > + node->tn_vpstate = 0;
> > + return (0);
> > +}
> > +
> > +static void
> > +tmpfs_node_dtor(void *mem, int size, void *arg)
> > +{
> > + struct tmpfs_node *node;
> > +
> > + node = mem;
> > + node->tn_type = VNON;
> > +}
> > +
> > +static int
> > +tmpfs_node_init(void *mem, int size, int flags)
> > +{
> > + struct tmpfs_node *node;
> > +
> > + node = mem;
> > + node->tn_id = 0;
> > + mtx_init(&node->tn_interlock, "tmpfsni", NULL, MTX_DEF);
> > + node->tn_gen = arc4random();
> > + return (0);
> > +}
> > +
> > +static void
> > +tmpfs_node_fini(void *mem, int size)
> > +{
> > + struct tmpfs_node *node;
> > +
> > + node = mem;
> > + mtx_destroy(&node->tn_interlock);
> > +}
> > +
> > +void
> > +tmpfs_subr_init(void)
> > +{
> > + tmpfs_dirent_pool = uma_zcreate("TMPFS dirent",
> > + sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
> > + UMA_ALIGN_PTR, 0);
> > + tmpfs_node_pool = uma_zcreate("TMPFS node",
> > + sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
> > + tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
> > +}
> > +
> > +void
> > +tmpfs_subr_uninit(void)
> > +{
> > + uma_zdestroy(tmpfs_node_pool);
> > + uma_zdestroy(tmpfs_dirent_pool);
> > +}
> > +
> > +static int
> > sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
> > {
> > int error;
> > @@ -219,8 +286,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount
> > if ((mp->mnt_kern_flag & MNT_RDONLY) != 0)
> > return (EROFS);
> >
> > - nnode = (struct tmpfs_node *)uma_zalloc_arg(tmp->tm_node_pool, tmp,
> > - M_WAITOK);
> > + nnode = uma_zalloc_arg(tmpfs_node_pool, tmp, M_WAITOK);
> >
> > /* Generic initialization. */
> > nnode->tn_type = type;
> > @@ -367,7 +433,7 @@ tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct
> > panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
> > }
> >
> > - uma_zfree(tmp->tm_node_pool, node);
> > + uma_zfree(tmpfs_node_pool, node);
> > TMPFS_LOCK(tmp);
> > tmpfs_free_tmp(tmp);
> > return (true);
> > @@ -434,7 +500,7 @@ tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmp
> > {
> > struct tmpfs_dirent *nde;
> >
> > - nde = uma_zalloc(tmp->tm_dirent_pool, M_WAITOK);
> > + nde = uma_zalloc(tmpfs_dirent_pool, M_WAITOK);
> > nde->td_node = node;
> > if (name != NULL) {
> > nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
> > @@ -470,7 +536,7 @@ tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpf
> > }
> > if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL)
> > free(de->ud.td_name, M_TMPFSNAME);
> > - uma_zfree(tmp->tm_dirent_pool, de);
> > + uma_zfree(tmpfs_dirent_pool, de);
> > }
> >
> > void
> >
> > Modified: head/sys/fs/tmpfs/tmpfs_vfsops.c
> > ==============================================================================
> > --- head/sys/fs/tmpfs/tmpfs_vfsops.c Wed Dec 4 23:24:40 2019 (r355406)
> > +++ head/sys/fs/tmpfs/tmpfs_vfsops.c Thu Dec 5 00:03:17 2019 (r355407)
> > @@ -99,49 +99,6 @@ static const char *tmpfs_updateopts[] = {
> > "from", "export", "size", NULL
> > };
> >
> > -static int
> > -tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
> > -{
> > - struct tmpfs_node *node = (struct tmpfs_node *)mem;
> > -
> > - node->tn_gen++;
> > - node->tn_size = 0;
> > - node->tn_status = 0;
> > - node->tn_flags = 0;
> > - node->tn_links = 0;
> > - node->tn_vnode = NULL;
> > - node->tn_vpstate = 0;
> > -
> > - return (0);
> > -}
> > -
> > -static void
> > -tmpfs_node_dtor(void *mem, int size, void *arg)
> > -{
> > - struct tmpfs_node *node = (struct tmpfs_node *)mem;
> > - node->tn_type = VNON;
> > -}
> > -
> > -static int
> > -tmpfs_node_init(void *mem, int size, int flags)
> > -{
> > - struct tmpfs_node *node = (struct tmpfs_node *)mem;
> > - node->tn_id = 0;
> > -
> > - mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF);
> > - node->tn_gen = arc4random();
> > -
> > - return (0);
> > -}
> > -
> > -static void
> > -tmpfs_node_fini(void *mem, int size)
> > -{
> > - struct tmpfs_node *node = (struct tmpfs_node *)mem;
> > -
> > - mtx_destroy(&node->tn_interlock);
> > -}
> > -
> > /*
> > * Handle updates of time from writes to mmaped regions. Use
> > * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since
> > @@ -481,12 +438,6 @@ tmpfs_mount(struct mount *mp)
> > tmp->tm_pages_max = pages;
> > tmp->tm_pages_used = 0;
> > new_unrhdr64(&tmp->tm_ino_unr, 2);
> > - tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
> > - sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
> > - UMA_ALIGN_PTR, 0);
> > - tmp->tm_node_pool = uma_zcreate("TMPFS node",
> > - sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
> > - tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
> > tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
> > tmp->tm_nonc = nonc;
> >
> > @@ -495,8 +446,6 @@ tmpfs_mount(struct mount *mp)
> > root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
> >
> > if (error != 0 || root == NULL) {
> > - uma_zdestroy(tmp->tm_node_pool);
> > - uma_zdestroy(tmp->tm_dirent_pool);
> > free(tmp, M_TMPFSMNT);
> > return (error);
> > }
> > @@ -590,9 +539,6 @@ tmpfs_free_tmp(struct tmpfs_mount *tmp)
> > }
> > TMPFS_UNLOCK(tmp);
> >
> > - uma_zdestroy(tmp->tm_dirent_pool);
> > - uma_zdestroy(tmp->tm_node_pool);
> > -
> > mtx_destroy(&tmp->tm_allnode_lock);
> > MPASS(tmp->tm_pages_used == 0);
> > MPASS(tmp->tm_nodes_inuse == 0);
> > @@ -702,10 +648,23 @@ tmpfs_susp_clean(struct mount *mp __unused)
> > {
> > }
> >
> > +static int
> > +tmpfs_init(struct vfsconf *conf)
> > +{
> > + tmpfs_subr_init();
> > + return (0);
> > +}
> > +
> > +static int
> > +tmpfs_uninit(struct vfsconf *conf)
> > +{
> > + tmpfs_subr_uninit();
> > + return (0);
> > +}
> > +
> > /*
> > * tmpfs vfs operations.
> > */
> > -
> > struct vfsops tmpfs_vfsops = {
> > .vfs_mount = tmpfs_mount,
> > .vfs_unmount = tmpfs_unmount,
> > @@ -715,5 +674,7 @@ struct vfsops tmpfs_vfsops = {
> > .vfs_fhtovp = tmpfs_fhtovp,
> > .vfs_sync = tmpfs_sync,
> > .vfs_susp_clean = tmpfs_susp_clean,
> > + .vfs_init = tmpfs_init,
> > + .vfs_uninit = tmpfs_uninit,
> > };
> > VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);
> > _______________________________________________
> > svn-src-head at freebsd.org mailing list
> > https://lists.freebsd.org/mailman/listinfo/svn-src-head
> > To unsubscribe, send any mail to "svn-src-head-unsubscribe at freebsd.org"
> >
More information about the svn-src-all
mailing list