svn commit: r194317 - in head: . share/man/man9 sys/conf sys/kern sys/sys

Attilio Rao attilio at FreeBSD.org
Wed Jun 17 01:55:43 UTC 2009


Author: attilio
Date: Wed Jun 17 01:55:42 2009
New Revision: 194317
URL: http://svn.freebsd.org/changeset/base/194317

Log:
  Introduce support for adaptive spinning in lockmgr.
  Actually, as it did receive few tuning, the support is disabled by
  default, but it can opt-in with the option ADAPTIVE_LOCKMGRS.
  Due to the nature of lockmgrs, adaptive spinning needs to be
  selectively enabled for any interested lockmgr.
  The support is bi-directional, or, in other ways, it will work in both
  cases if the lock is held in read or write way.  In particular, the
  read path is passible of further tunning using the sysctls
  debug.lockmgr.retries and debug.lockmgr.loops .  Ideally, such sysctls
  should be axed or compiled out before release.
  
  Addictionally note that adaptive spinning doesn't cope well with
  LK_SLEEPFAIL.  The reason is that many (and probabilly all) consumers
  of LK_SLEEPFAIL are mainly interested in knowing if the interlock was
  dropped or not in order to reacquire it and re-test initial conditions.
  This directly interacts with adaptive spinning because lockmgr needs
  to drop the interlock while spinning in order to avoid a deadlock
  (further details in the comments inside the patch).
  
  Final note: finding someone willing to help on tuning this with
  relevant workloads would be either very important and appreciated.
  
  Tested by:	jeff, pho
  Requested by:	many

Modified:
  head/UPDATING
  head/share/man/man9/lock.9
  head/sys/conf/options
  head/sys/kern/kern_lock.c
  head/sys/sys/lockmgr.h

Modified: head/UPDATING
==============================================================================
--- head/UPDATING	Wed Jun 17 00:35:21 2009	(r194316)
+++ head/UPDATING	Wed Jun 17 01:55:42 2009	(r194317)
@@ -22,6 +22,13 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 8.
 	to maximize performance.  (To disable malloc debugging, run
 	ln -s aj /etc/malloc.conf.)
 
+20090616:
+	The compiling option ADAPTIVE_LOCKMGRS has been introduced.
+	This option compiles in the support for adaptive spinning for lockmgrs
+	which want to enable it.  The lockinit() function now accepts the
+	flag LK_ADAPTIVE in order to make the lock object subject to
+	adaptive spinning when both held in write and read mode.
+
 20090613:
 	The layout of the structure returned by IEEE80211_IOC_STA_INFO
 	has changed.  User applications that use this ioctl need to be

Modified: head/share/man/man9/lock.9
==============================================================================
--- head/share/man/man9/lock.9	Wed Jun 17 00:35:21 2009	(r194316)
+++ head/share/man/man9/lock.9	Wed Jun 17 01:55:42 2009	(r194317)
@@ -26,7 +26,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd February 05, 2009
+.Dd June 16, 2009
 .Dt LOCK 9
 .Os
 .Sh NAME
@@ -96,6 +96,9 @@ The timeout value passed to
 .It Fa flags
 The flags the lock is to be initialized with:
 .Bl -tag -width ".Dv LK_CANRECURSE"
+.It Dv LK_ADAPTIVE
+Enable adaptive spinning for this lock if the kernel is compiled with the
+ADAPTIVE_LOCKMGRS option.
 .It Dv LK_CANRECURSE
 Allow recursive exclusive locks.
 .It Dv LK_NOPROFILE

Modified: head/sys/conf/options
==============================================================================
--- head/sys/conf/options	Wed Jun 17 00:35:21 2009	(r194316)
+++ head/sys/conf/options	Wed Jun 17 01:55:42 2009	(r194317)
@@ -60,6 +60,7 @@ KDB_UNATTENDED	opt_kdb.h
 SYSCTL_DEBUG	opt_sysctl.h
 
 # Miscellaneous options.
+ADAPTIVE_LOCKMGRS
 ALQ
 AUDIT		opt_global.h
 CODA_COMPAT_5	opt_coda.h

Modified: head/sys/kern/kern_lock.c
==============================================================================
--- head/sys/kern/kern_lock.c	Wed Jun 17 00:35:21 2009	(r194316)
+++ head/sys/kern/kern_lock.c	Wed Jun 17 01:55:42 2009	(r194317)
@@ -26,6 +26,7 @@
  * DAMAGE.
  */
 
+#include "opt_adaptive_lockmgrs.h"
 #include "opt_ddb.h"
 #include "opt_kdtrace.h"
 
@@ -34,6 +35,7 @@ __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/ktr.h>
+#include <sys/linker_set.h>
 #include <sys/lock.h>
 #include <sys/lock_profile.h>
 #include <sys/lockmgr.h>
@@ -43,6 +45,7 @@ __FBSDID("$FreeBSD$");
 #ifdef DEBUG_LOCKS
 #include <sys/stack.h>
 #endif
+#include <sys/sysctl.h>
 #include <sys/systm.h>
 
 #include <machine/cpu.h>
@@ -51,7 +54,10 @@ __FBSDID("$FreeBSD$");
 #include <ddb/ddb.h>
 #endif
 
-CTASSERT((LK_NOSHARE & LO_CLASSFLAGS) == LK_NOSHARE);
+CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
+    (LK_ADAPTIVE | LK_NOSHARE));
+CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
+    ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
 
 #define	SQ_EXCLUSIVE_QUEUE	0
 #define	SQ_SHARED_QUEUE		1
@@ -106,6 +112,7 @@ CTASSERT((LK_NOSHARE & LO_CLASSFLAGS) ==
 
 #define	LK_CAN_SHARE(x)							\
 	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
+	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
 	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
 #define	LK_TRYOP(x)							\
 	((x) & LK_NOWAIT)
@@ -115,6 +122,10 @@ CTASSERT((LK_NOSHARE & LO_CLASSFLAGS) ==
 #define	LK_TRYWIT(x)							\
 	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
 
+#define	LK_CAN_ADAPT(lk, f)						\
+	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
+	((f) & LK_SLEEPFAIL) == 0)
+
 #define	lockmgr_disowned(lk)						\
 	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
 
@@ -145,6 +156,14 @@ struct lock_class lock_class_lockmgr = {
 #endif
 };
 
+#ifdef ADAPTIVE_LOCKMGRS
+static u_int alk_retries = 10;
+static u_int alk_loops = 10000;
+SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
+SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
+SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
+#endif
+
 static __inline struct thread *
 lockmgr_xholder(struct lock *lk)
 {
@@ -233,9 +252,9 @@ wakeupshlk(struct lock *lk, const char *
 		 * lock quickly.
 		 */
 		if ((x & LK_ALL_WAITERS) == 0) {
-			MPASS(x == LK_SHARERS_LOCK(1));
-			if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1),
-			    LK_UNLOCKED))
+			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
+			    LK_SHARERS_LOCK(1));
+			if (atomic_cmpset_ptr(&lk->lk_lock, x, LK_UNLOCKED))
 				break;
 			continue;
 		}
@@ -245,7 +264,7 @@ wakeupshlk(struct lock *lk, const char *
 		 * path in order to handle wakeups correctly.
 		 */
 		sleepq_lock(&lk->lock_object);
-		x = lk->lk_lock & LK_ALL_WAITERS;
+		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
 		v = LK_UNLOCKED;
 
 		/*
@@ -256,7 +275,8 @@ wakeupshlk(struct lock *lk, const char *
 			queue = SQ_EXCLUSIVE_QUEUE;
 			v |= (x & LK_SHARED_WAITERS);
 		} else {
-			MPASS(x == LK_SHARED_WAITERS);
+			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
+			    LK_SHARED_WAITERS);
 			queue = SQ_SHARED_QUEUE;
 		}
 
@@ -326,7 +346,7 @@ lockinit(struct lock *lk, int pri, const
 		iflags |= LO_WITNESS;
 	if (flags & LK_QUIET)
 		iflags |= LO_QUIET;
-	iflags |= flags & LK_NOSHARE;
+	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
 
 	lk->lk_lock = LK_UNLOCKED;
 	lk->lk_recurse = 0;
@@ -359,6 +379,10 @@ __lockmgr_args(struct lock *lk, u_int fl
 	uint64_t waittime = 0;
 	int contested = 0;
 #endif
+#ifdef ADAPTIVE_LOCKMGRS
+	volatile struct thread *owner;
+	u_int i, spintries = 0;
+#endif
 
 	error = 0;
 	tid = (uintptr_t)curthread;
@@ -436,6 +460,59 @@ __lockmgr_args(struct lock *lk, u_int fl
 				break;
 			}
 
+#ifdef ADAPTIVE_LOCKMGRS
+			/*
+			 * If the owner is running on another CPU, spin until
+			 * the owner stops running or the state of the lock
+			 * changes.
+			 */
+			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
+			    LK_HOLDER(x) != LK_KERNPROC) {
+				owner = (struct thread *)LK_HOLDER(x);
+				if (LOCK_LOG_TEST(&lk->lock_object, 0))
+					CTR3(KTR_LOCK,
+					    "%s: spinning on %p held by %p",
+					    __func__, lk, owner);
+
+				/*
+				 * If we are holding also an interlock drop it
+				 * in order to avoid a deadlock if the lockmgr
+				 * owner is adaptively spinning on the
+				 * interlock itself.
+				 */
+				if (flags & LK_INTERLOCK) {
+					class->lc_unlock(ilk);
+					flags &= ~LK_INTERLOCK;
+				}
+				GIANT_SAVE();
+				while (LK_HOLDER(lk->lk_lock) ==
+				    (uintptr_t)owner && TD_IS_RUNNING(owner))
+					cpu_spinwait();
+			} else if (LK_CAN_ADAPT(lk, flags) &&
+			    (x & LK_SHARE) !=0 && LK_SHARERS(x) &&
+			    spintries < alk_retries) {
+				if (flags & LK_INTERLOCK) {
+					class->lc_unlock(ilk);
+					flags &= ~LK_INTERLOCK;
+				}
+				GIANT_SAVE();
+				spintries++;
+				for (i = 0; i < alk_loops; i++) {
+					if (LOCK_LOG_TEST(&lk->lock_object, 0))
+						CTR4(KTR_LOCK,
+				    "%s: shared spinning on %p with %u and %u",
+						    __func__, lk, spintries, i);
+					x = lk->lk_lock;
+					if ((x & LK_SHARE) == 0 ||
+					    LK_CAN_SHARE(x) != 0)
+						break;
+					cpu_spinwait();
+				}
+				if (i != alk_loops)
+					continue;
+			}
+#endif
+
 			/*
 			 * Acquire the sleepqueue chain lock because we
 			 * probabilly will need to manipulate waiters flags.
@@ -452,6 +529,24 @@ __lockmgr_args(struct lock *lk, u_int fl
 				continue;
 			}
 
+#ifdef ADAPTIVE_LOCKMGRS
+			/*
+			 * The current lock owner might have started executing
+			 * on another CPU (or the lock could have changed
+			 * owner) while we were waiting on the turnstile
+			 * chain lock.  If so, drop the turnstile lock and try
+			 * again.
+			 */
+			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
+			    LK_HOLDER(x) != LK_KERNPROC) {
+				owner = (struct thread *)LK_HOLDER(x);
+				if (TD_IS_RUNNING(owner)) {
+					sleepq_release(&lk->lock_object);
+					continue;
+				}
+			}
+#endif
+
 			/*
 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
 			 * loop back and retry.
@@ -497,13 +592,15 @@ __lockmgr_args(struct lock *lk, u_int fl
 		break;
 	case LK_UPGRADE:
 		_lockmgr_assert(lk, KA_SLOCKED, file, line);
-		x = lk->lk_lock & LK_ALL_WAITERS;
+		v = lk->lk_lock;
+		x = v & LK_ALL_WAITERS;
+		v &= LK_EXCLUSIVE_SPINNERS;
 
 		/*
 		 * Try to switch from one shared lock to an exclusive one.
 		 * We need to preserve waiters flags during the operation.
 		 */
-		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
+		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
 		    tid | x)) {
 			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
 			    line);
@@ -575,13 +672,69 @@ __lockmgr_args(struct lock *lk, u_int fl
 				break;
 			}
 
+#ifdef ADAPTIVE_LOCKMGRS
+			/*
+			 * If the owner is running on another CPU, spin until
+			 * the owner stops running or the state of the lock
+			 * changes.
+			 */
+			x = lk->lk_lock;
+			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
+			    LK_HOLDER(x) != LK_KERNPROC) {
+				owner = (struct thread *)LK_HOLDER(x);
+				if (LOCK_LOG_TEST(&lk->lock_object, 0))
+					CTR3(KTR_LOCK,
+					    "%s: spinning on %p held by %p",
+					    __func__, lk, owner);
+
+				/*
+				 * If we are holding also an interlock drop it
+				 * in order to avoid a deadlock if the lockmgr
+				 * owner is adaptively spinning on the
+				 * interlock itself.
+				 */
+				if (flags & LK_INTERLOCK) {
+					class->lc_unlock(ilk);
+					flags &= ~LK_INTERLOCK;
+				}
+				GIANT_SAVE();
+				while (LK_HOLDER(lk->lk_lock) ==
+				    (uintptr_t)owner && TD_IS_RUNNING(owner))
+					cpu_spinwait();
+			} else if (LK_CAN_ADAPT(lk, flags) &&
+			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
+			    spintries < alk_retries) {
+				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
+				    !atomic_cmpset_ptr(&lk->lk_lock, x,
+				    x | LK_EXCLUSIVE_SPINNERS))
+					continue;
+				if (flags & LK_INTERLOCK) {
+					class->lc_unlock(ilk);
+					flags &= ~LK_INTERLOCK;
+				}
+				GIANT_SAVE();
+				spintries++;
+				for (i = 0; i < alk_loops; i++) {
+					if (LOCK_LOG_TEST(&lk->lock_object, 0))
+						CTR4(KTR_LOCK,
+				    "%s: shared spinning on %p with %u and %u",
+						    __func__, lk, spintries, i);
+					if ((lk->lk_lock &
+					    LK_EXCLUSIVE_SPINNERS) == 0)
+						break;
+					cpu_spinwait();
+				}
+				if (i != alk_loops)
+					continue;
+			}
+#endif
+
 			/*
 			 * Acquire the sleepqueue chain lock because we
 			 * probabilly will need to manipulate waiters flags.
 			 */
 			sleepq_lock(&lk->lock_object);
 			x = lk->lk_lock;
-			v = x & LK_ALL_WAITERS;
 
 			/*
 			 * if the lock has been released while we spun on
@@ -592,6 +745,24 @@ __lockmgr_args(struct lock *lk, u_int fl
 				continue;
 			}
 
+#ifdef ADAPTIVE_LOCKMGRS
+			/*
+			 * The current lock owner might have started executing
+			 * on another CPU (or the lock could have changed
+			 * owner) while we were waiting on the turnstile
+			 * chain lock.  If so, drop the turnstile lock and try
+			 * again.
+			 */
+			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
+			    LK_HOLDER(x) != LK_KERNPROC) {
+				owner = (struct thread *)LK_HOLDER(x);
+				if (TD_IS_RUNNING(owner)) {
+					sleepq_release(&lk->lock_object);
+					continue;
+				}
+			}
+#endif
+
 			/*
 			 * The lock can be in the state where there is a
 			 * pending queue of waiters, but still no owner.
@@ -601,7 +772,9 @@ __lockmgr_args(struct lock *lk, u_int fl
 			 * claim lock ownership and return, preserving waiters
 			 * flags.
 			 */
-			if (x == (LK_UNLOCKED | v)) {
+			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
+			if ((x & ~v) == LK_UNLOCKED) {
+				v &= ~LK_EXCLUSIVE_SPINNERS;
 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
 				    tid | v)) {
 					sleepq_release(&lk->lock_object);
@@ -666,7 +839,9 @@ __lockmgr_args(struct lock *lk, u_int fl
 		 * In order to preserve waiters flags, just spin.
 		 */
 		for (;;) {
-			x = lk->lk_lock & LK_ALL_WAITERS;
+			x = lk->lk_lock;
+			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
+			x &= LK_ALL_WAITERS;
 			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
 			    LK_SHARERS_LOCK(1) | x))
 				break;
@@ -712,7 +887,7 @@ __lockmgr_args(struct lock *lk, u_int fl
 				break;
 
 			sleepq_lock(&lk->lock_object);
-			x = lk->lk_lock & LK_ALL_WAITERS;
+			x = lk->lk_lock;
 			v = LK_UNLOCKED;
 
 			/*
@@ -720,11 +895,13 @@ __lockmgr_args(struct lock *lk, u_int fl
 			 * preference in order to avoid deadlock with
 			 * shared runners up.
 			 */
+			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
 			if (x & LK_EXCLUSIVE_WAITERS) {
 				queue = SQ_EXCLUSIVE_QUEUE;
 				v |= (x & LK_SHARED_WAITERS);
 			} else {
-				MPASS(x == LK_SHARED_WAITERS);
+				MPASS((x & LK_ALL_WAITERS) ==
+				    LK_SHARED_WAITERS);
 				queue = SQ_SHARED_QUEUE;
 			}
 
@@ -777,7 +954,6 @@ __lockmgr_args(struct lock *lk, u_int fl
 			 */
 			sleepq_lock(&lk->lock_object);
 			x = lk->lk_lock;
-			v = x & LK_ALL_WAITERS;
 
 			/*
 			 * if the lock has been released while we spun on
@@ -788,8 +964,9 @@ __lockmgr_args(struct lock *lk, u_int fl
 				continue;
 			}
 
-			if (x == (LK_UNLOCKED | v)) {
-				v = x;
+			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
+			if ((x & ~v) == LK_UNLOCKED) {
+				v = (x & ~LK_EXCLUSIVE_SPINNERS);
 				if (v & LK_EXCLUSIVE_WAITERS) {
 					queue = SQ_EXCLUSIVE_QUEUE;
 					v &= ~LK_EXCLUSIVE_WAITERS;
@@ -902,7 +1079,9 @@ _lockmgr_disown(struct lock *lk, const c
 	 * In order to preserve waiters flags, just spin.
 	 */
 	for (;;) {
-		x = lk->lk_lock & LK_ALL_WAITERS;
+		x = lk->lk_lock;
+		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
+		x &= LK_ALL_WAITERS;
 		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
 		    LK_KERNPROC | x))
 			return;
@@ -933,6 +1112,8 @@ lockmgr_printinfo(struct lock *lk)
 		printf(" with exclusive waiters pending\n");
 	if (x & LK_SHARED_WAITERS)
 		printf(" with shared waiters pending\n");
+	if (x & LK_EXCLUSIVE_SPINNERS)
+		printf(" with exclusive spinners pending\n");
 
 	STACK_PRINT(lk);
 }
@@ -1094,5 +1275,10 @@ db_show_lockmgr(struct lock_object *lock
 	default:
 		db_printf("none\n");
 	}
+	db_printf(" spinners: ");
+	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
+		db_printf("exclusive\n");
+	else
+		db_printf("none\n");
 }
 #endif

Modified: head/sys/sys/lockmgr.h
==============================================================================
--- head/sys/sys/lockmgr.h	Wed Jun 17 00:35:21 2009	(r194316)
+++ head/sys/sys/lockmgr.h	Wed Jun 17 01:55:42 2009	(r194317)
@@ -39,13 +39,14 @@
 #define	LK_SHARE			0x01
 #define	LK_SHARED_WAITERS		0x02
 #define	LK_EXCLUSIVE_WAITERS		0x04
+#define	LK_EXCLUSIVE_SPINNERS		0x08
 #define	LK_ALL_WAITERS							\
 	(LK_SHARED_WAITERS | LK_EXCLUSIVE_WAITERS)
 #define	LK_FLAGMASK							\
-	(LK_SHARE | LK_ALL_WAITERS)
+	(LK_SHARE | LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)
 
 #define	LK_HOLDER(x)			((x) & ~LK_FLAGMASK)
-#define	LK_SHARERS_SHIFT		3
+#define	LK_SHARERS_SHIFT		4
 #define	LK_SHARERS(x)			(LK_HOLDER(x) >> LK_SHARERS_SHIFT)
 #define	LK_SHARERS_LOCK(x)		((x) << LK_SHARERS_SHIFT | LK_SHARE)
 #define	LK_ONE_SHARER			(1 << LK_SHARERS_SHIFT)
@@ -141,6 +142,7 @@ _lockmgr_args_rw(struct lock *lk, u_int 
 #define	LK_NOSHARE	0x000008
 #define	LK_NOWITNESS	0x000010
 #define	LK_QUIET	0x000020
+#define	LK_ADAPTIVE	0x000040
 
 /*
  * Additional attributes to be used in lockmgr().


More information about the svn-src-all mailing list