svn commit: r356812 - head/sys/kern

Mateusz Guzik mjg at FreeBSD.org
Thu Jan 16 21:45:21 UTC 2020


Author: mjg
Date: Thu Jan 16 21:45:21 2020
New Revision: 356812
URL: https://svnweb.freebsd.org/changeset/base/356812

Log:
  vfs: increment numvnodes without the vnode list lock unless under pressure
  
  The vnode list lock is only needed to reclaim free vnodes or kick the vnlru
  thread (or to block and not miss a wake up (but note the sleep has a timeout so
  this would not be a correctness issue)). Try to get away without the lock by
  just doing an atomic increment.
  
  The lock is contended e.g., during poudriere -j 104 where about half of all
  acquires come from vnode allocation code.
  
  Note the entire scheme needs a rewrite, the above just reduces it's SMP impact.
  
  Reviewed by:	kib
  Differential Revision:	https://reviews.freebsd.org/D23140

Modified:
  head/sys/kern/vfs_subr.c

Modified: head/sys/kern/vfs_subr.c
==============================================================================
--- head/sys/kern/vfs_subr.c	Thu Jan 16 21:43:13 2020	(r356811)
+++ head/sys/kern/vfs_subr.c	Thu Jan 16 21:45:21 2020	(r356812)
@@ -1497,21 +1497,22 @@ vtryrecycle(struct vnode *vp)
  * The routine can try to free a vnode or stall for up to 1 second waiting for
  * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation.
  */
-static struct vnode *
-vn_alloc(struct mount *mp)
+static u_long vn_alloc_cyclecount;
+
+static struct vnode * __noinline
+vn_alloc_hard(struct mount *mp)
 {
 	u_long rnumvnodes, rfreevnodes;
-	static u_long cyclecount;
 
 	mtx_lock(&vnode_list_mtx);
 	rnumvnodes = atomic_load_long(&numvnodes);
 	if (rnumvnodes + 1 < desiredvnodes) {
-		cyclecount = 0;
+		vn_alloc_cyclecount = 0;
 		goto alloc;
 	}
 	rfreevnodes = atomic_load_long(&freevnodes);
-	if (cyclecount++ >= rfreevnodes) {
-		cyclecount = 0;
+	if (vn_alloc_cyclecount++ >= rfreevnodes) {
+		vn_alloc_cyclecount = 0;
 		vstir = 1;
 	}
 	/*
@@ -1543,6 +1544,22 @@ alloc:
 	if (vnlru_under(rnumvnodes, vlowat))
 		vnlru_kick();
 	mtx_unlock(&vnode_list_mtx);
+	return (uma_zalloc(vnode_zone, M_WAITOK));
+}
+
+static struct vnode *
+vn_alloc(struct mount *mp)
+{
+	u_long rnumvnodes;
+
+	if (__predict_false(vn_alloc_cyclecount != 0))
+		return (vn_alloc_hard(mp));
+	rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1;
+	if (__predict_false(vnlru_under(rnumvnodes, vlowat))) {
+		atomic_subtract_long(&numvnodes, 1);
+		return (vn_alloc_hard(mp));
+	}
+
 	return (uma_zalloc(vnode_zone, M_WAITOK));
 }
 


More information about the svn-src-head mailing list