svn commit: r274606 - in stable/10: share/man/man9 sys/kern sys/sys

Konstantin Belousov kib at FreeBSD.org
Sun Nov 16 23:02:34 UTC 2014


Author: kib
Date: Sun Nov 16 23:02:32 2014
New Revision: 274606
URL: https://svnweb.freebsd.org/changeset/base/274606

Log:
  MFC r273966:
  Fix two issues with lockmgr(9) LK_CAN_SHARE() test, related
  to the exclusive locker starvation.
  
  MFC r273986:
  Fix the build with ADAPTIVE_LOCKMGRS kernel option.

Modified:
  stable/10/share/man/man9/lock.9
  stable/10/sys/kern/kern_lock.c
  stable/10/sys/kern/vfs_lookup.c
  stable/10/sys/sys/lockmgr.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/share/man/man9/lock.9
==============================================================================
--- stable/10/share/man/man9/lock.9	Sun Nov 16 21:39:56 2014	(r274605)
+++ stable/10/share/man/man9/lock.9	Sun Nov 16 23:02:32 2014	(r274606)
@@ -26,7 +26,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd October 6, 2013
+.Dd November 2, 2014
 .Dt LOCK 9
 .Os
 .Sh NAME
@@ -145,7 +145,7 @@ Their arguments are:
 A pointer to the lock to manipulate.
 .It Fa flags
 Flags indicating what action is to be taken.
-.Bl -tag -width ".Dv LK_CANRECURSE"
+.Bl -tag -width ".Dv LK_NODDLKTREAT"
 .It Dv LK_SHARED
 Acquire a shared lock.
 If an exclusive lock is currently held,
@@ -199,6 +199,29 @@ Allow recursion on an exclusive lock.
 For every lock there must be a release.
 .It Dv LK_INTERLOCK
 Unlock the interlock (which should be locked already).
+.It Dv LK_NODDLKTREAT
+Normally,
+.Fn lockmgr
+postpones serving further shared requests for shared-locked lock if there is
+exclusive waiter, to avoid exclusive lock starvation.
+But, if the thread requesting the shared lock already owns a shared lockmgr
+lock, the request is granted even in presence of the parallel exclusive lock
+request, which is done to avoid deadlocks with recursive shared acquisition.
+.Pp
+The
+.Dv LK_NODDLKTREAT
+flag can only be used by code which requests shared non-recursive lock.
+The flag allows exclusive requests to preempt the current shared request
+even if the current thread owns shared locks.
+This is safe since shared lock is guaranteed to not recurse, and is used
+when thread is known to held unrelated shared locks, to not cause
+unneccessary starvation.  An example is
+.Dv vp
+locking in VFS
+.Xr lookup 9 ,
+when
+.Dv dvp
+is already locked.
 .El
 .It Fa ilk
 An interlock mutex for controlling group access to the lock.

Modified: stable/10/sys/kern/kern_lock.c
==============================================================================
--- stable/10/sys/kern/kern_lock.c	Sun Nov 16 21:39:56 2014	(r274605)
+++ stable/10/sys/kern/kern_lock.c	Sun Nov 16 23:02:32 2014	(r274606)
@@ -116,10 +116,11 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
 	}								\
 } while (0)
 
-#define	LK_CAN_SHARE(x)							\
-	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
-	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
-	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
+#define	LK_CAN_SHARE(x, flags)						\
+	(((x) & LK_SHARE) &&						\
+	(((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 ||	\
+	(curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||	\
+	(curthread->td_pflags & TDP_DEADLKTREAT)))
 #define	LK_TRYOP(x)							\
 	((x) & LK_NOWAIT)
 
@@ -531,7 +532,7 @@ __lockmgr_args(struct lock *lk, u_int fl
 			 * waiters, if we fail to acquire the shared lock
 			 * loop back and retry.
 			 */
-			if (LK_CAN_SHARE(x)) {
+			if (LK_CAN_SHARE(x, flags)) {
 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
 				    x + LK_ONE_SHARER))
 					break;
@@ -615,7 +616,7 @@ __lockmgr_args(struct lock *lk, u_int fl
 						    __func__, lk, spintries, i);
 					x = lk->lk_lock;
 					if ((x & LK_SHARE) == 0 ||
-					    LK_CAN_SHARE(x) != 0)
+					    LK_CAN_SHARE(x, flags) != 0)
 						break;
 					cpu_spinwait();
 				}
@@ -636,7 +637,7 @@ __lockmgr_args(struct lock *lk, u_int fl
 			 * if the lock can be acquired in shared mode, try
 			 * again.
 			 */
-			if (LK_CAN_SHARE(x)) {
+			if (LK_CAN_SHARE(x, flags)) {
 				sleepq_release(&lk->lock_object);
 				continue;
 			}

Modified: stable/10/sys/kern/vfs_lookup.c
==============================================================================
--- stable/10/sys/kern/vfs_lookup.c	Sun Nov 16 21:39:56 2014	(r274605)
+++ stable/10/sys/kern/vfs_lookup.c	Sun Nov 16 23:02:32 2014	(r274606)
@@ -392,6 +392,7 @@ compute_cn_lkflags(struct mount *mp, int
 		lkflags &= ~LK_SHARED;
 		lkflags |= LK_EXCLUSIVE;
 	}
+	lkflags |= LK_NODDLKTREAT;
 	return (lkflags);
 }
 

Modified: stable/10/sys/sys/lockmgr.h
==============================================================================
--- stable/10/sys/sys/lockmgr.h	Sun Nov 16 21:39:56 2014	(r274605)
+++ stable/10/sys/sys/lockmgr.h	Sun Nov 16 23:02:32 2014	(r274606)
@@ -158,6 +158,7 @@ _lockmgr_args_rw(struct lock *lk, u_int 
 #define	LK_RETRY	0x000400
 #define	LK_SLEEPFAIL	0x000800
 #define	LK_TIMELOCK	0x001000
+#define	LK_NODDLKTREAT	0x002000
 
 /*
  * Operations for lockmgr().


More information about the svn-src-stable-10 mailing list