svn commit: r273966 - in head: share/man/man9 sys/kern sys/sys

Konstantin Belousov kostikbel at gmail.com
Sun Nov 2 19:10:36 UTC 2014


On Sun, Nov 02, 2014 at 06:53:44PM +0100, Attilio Rao wrote:
> > I did not proposed to verify owner chain.  I said that it is easy to
> > record the locks owned by current thread, only for current thread
> > consumption.  Below is the prototype.
> 
> I think it is too expensive, think that this must happen for every shared lock.
> I know we may not be using too many shared locks on lockmgr right now,
> but it is not a good reason to make  shared lock bloated and more
> expensive on lockmgr.

It can be significantly simplified, if the array of lock pointers is
kept dense.  Then the only non-trivial operation is unlock out of order,
when the array have to be compacted.

The code adds one write and n reads on shared lock, where n is the
number of shared-locked locks already owned by thread. Typical n is 0
or 1. On unlock, if done in order, the code adds one read; unordered
unlock shuffles array elements. Again, for typical lock nesting of 2,
this means one read and one write, and even this is rare. All reads and
writes are for thread-local memory.

I am not going to spend any more time on this if people do not consider
the lock tracking worth it.  Otherwise, I will benchmark the patch.

diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 24d94cc..d63c86f 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -75,8 +75,6 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
 #define	TD_LOCKS_INC(td)	((td)->td_locks++)
 #define	TD_LOCKS_DEC(td)	((td)->td_locks--)
 #endif
-#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
-#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
 
 #ifndef DEBUG_LOCKS
 #define	STACK_PRINT(lk)
@@ -115,11 +113,70 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
 	}								\
 } while (0)
 
-#define	LK_CAN_SHARE(x, flags)						\
-	(((x) & LK_SHARE) &&						\
-	(((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 ||	\
-	(curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||	\
-	(curthread->td_pflags & TDP_DEADLKTREAT)))
+static inline bool
+lk_can_share(struct thread *td, void *lk, uintptr_t x, int flags)
+{
+	int i, sl;
+
+	if ((x & LK_SHARE) == 0)
+		return (false);
+	sl = td->td_lk_slocks;
+	if (sl < nitems(td->td_lk_slocksp)) {
+		for (i = 0; i < sl; i++) {
+			/*
+			 * Current thread definitely owns the same
+			 * lock in shared mode already, grant the
+			 * request despite possible presence of the
+			 * exclusive waiters, to avoid deadlocks.
+			 */
+			if (lk == td->td_lk_slocksp[i])
+				return (true);
+		}
+	} else {
+		/*
+		 * All slots filled, fall to the safe side.
+		 * XXXKIB: panic instead ?
+		 */
+		return (true);
+	}
+	if ((curthread->td_pflags & TDP_DEADLKTREAT) != 0)
+		return (true);
+	if ((x & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) != 0)
+		return (false);
+	return (true);
+}
+
+static inline void
+lk_slocks_inc(struct thread *td, void *lk)
+{
+	int sl;
+
+	sl = td->td_lk_slocks;
+	if (sl < nitems(td->td_lk_slocksp))
+		td->td_lk_slocksp[sl] = lk;
+	td->td_lk_slocks++;
+}
+
+static inline void
+lk_slocks_dec(struct thread *td, void *lk)
+{
+	int i, j, sl;
+
+	sl = --td->td_lk_slocks;
+	KASSERT(sl >= 0, ("missed inc"));
+	if (sl < nitems(td->td_lk_slocksp) && td->td_lk_slocksp[sl] != lk) {
+		for (i = sl - 1; i >= 0; i--) {
+			if (lk == td->td_lk_slocksp[i]) {
+				for (j = i + 1; j <= sl; j++) {
+					td->td_lk_slocksp[j - 1] =
+					    td->td_lk_slocksp[j];
+				}
+			}
+			break;
+		}
+	}
+}
+
 #define	LK_TRYOP(x)							\
 	((x) & LK_NOWAIT)
 
@@ -338,7 +395,7 @@ wakeupshlk(struct lock *lk, const char *file, int line)
 
 	lock_profile_release_lock(&lk->lock_object);
 	TD_LOCKS_DEC(curthread);
-	TD_SLOCKS_DEC(curthread);
+	lk_slocks_dec(curthread, lk);
 	return (wakeup_swapper);
 }
 
@@ -531,7 +588,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
 			 * waiters, if we fail to acquire the shared lock
 			 * loop back and retry.
 			 */
-			if (LK_CAN_SHARE(x, flags)) {
+			if (lk_can_share(curthread, lk, x, flags)) {
 				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
 				    x + LK_ONE_SHARER))
 					break;
@@ -615,7 +672,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
 						    __func__, lk, spintries, i);
 					x = lk->lk_lock;
 					if ((x & LK_SHARE) == 0 ||
-					    LK_CAN_SHARE(x) != 0)
+					    lk_can_share(curthread, lk,
+					    x, flags) != 0)
 						break;
 					cpu_spinwait();
 				}
@@ -636,7 +694,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
 			 * if the lock can be acquired in shared mode, try
 			 * again.
 			 */
-			if (LK_CAN_SHARE(x, flags)) {
+			if (lk_can_share(curthread, lk, x, flags)) {
 				sleepq_release(&lk->lock_object);
 				continue;
 			}
@@ -698,7 +756,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
 			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
 			    line);
 			TD_LOCKS_INC(curthread);
-			TD_SLOCKS_INC(curthread);
+			lk_slocks_inc(curthread, lk);
 			STACK_SAVE(lk);
 		}
 		break;
@@ -719,7 +777,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
 			    line);
 			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
 			    LK_TRYWIT(flags), file, line);
-			TD_SLOCKS_DEC(curthread);
+			lk_slocks_dec(curthread, lk);
 			break;
 		}
 
@@ -974,7 +1032,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
 			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
 			    __func__, iwmesg, file, line);
 		}
-		TD_SLOCKS_INC(curthread);
+		lk_slocks_inc(curthread, lk);
 
 		/*
 		 * In order to preserve waiters flags, just spin.
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index f2ffab2..e4f9d64 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -390,7 +390,6 @@ compute_cn_lkflags(struct mount *mp, int lkflags, int cnflags)
 		lkflags &= ~LK_SHARED;
 		lkflags |= LK_EXCLUSIVE;
 	}
-	lkflags |= LK_NODDLKTREAT;
 	return (lkflags);
 }
 
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index ff0473d..a48523f 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -158,7 +158,6 @@ _lockmgr_args_rw(struct lock *lk, u_int flags, struct rwlock *ilk,
 #define	LK_RETRY	0x000400
 #define	LK_SLEEPFAIL	0x000800
 #define	LK_TIMELOCK	0x001000
-#define	LK_NODDLKTREAT	0x002000
 
 /*
  * Operations for lockmgr().
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index fac0915..c747576 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -237,6 +237,7 @@ struct thread {
 	short		td_rw_rlocks;	/* (k) Count of rwlock read locks. */
 	short		td_lk_slocks;	/* (k) Count of lockmgr shared locks. */
 	short		td_stopsched;	/* (k) Scheduler stopped. */
+	void		*td_lk_slocksp[8];/* (k) */
 	struct turnstile *td_blocked;	/* (t) Lock thread is blocked on. */
 	const char	*td_lockname;	/* (t) Name of lock blocked on. */
 	LIST_HEAD(, turnstile) td_contested;	/* (q) Contested locks. */


More information about the svn-src-all mailing list