svn commit: r284998 - in stable/10: cddl/contrib/opensolaris/cmd/lockstat sys/kern sys/sys

Andriy Gapon avg at FreeBSD.org
Wed Jul 1 10:15:51 UTC 2015


Author: avg
Date: Wed Jul  1 10:15:49 2015
New Revision: 284998
URL: https://svnweb.freebsd.org/changeset/base/284998

Log:
  MFC r284297: several lockstat improvements

Modified:
  stable/10/cddl/contrib/opensolaris/cmd/lockstat/lockstat.c
  stable/10/sys/kern/kern_mutex.c
  stable/10/sys/kern/kern_rwlock.c
  stable/10/sys/kern/kern_sx.c
  stable/10/sys/sys/lockstat.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/cddl/contrib/opensolaris/cmd/lockstat/lockstat.c
==============================================================================
--- stable/10/cddl/contrib/opensolaris/cmd/lockstat/lockstat.c	Wed Jul  1 09:25:23 2015	(r284997)
+++ stable/10/cddl/contrib/opensolaris/cmd/lockstat/lockstat.c	Wed Jul  1 10:15:49 2015	(r284998)
@@ -158,14 +158,22 @@ static ls_event_info_t g_event_info[LS_M
 	    "lockstat:::rw-block", "arg2 != 0 && arg3 == 1" },
 	{ 'C',	"Lock",	"R/W reader blocked by write wanted",	"nsec",
 	    "lockstat:::rw-block", "arg2 != 0 && arg3 == 0 && arg4" },
-	{ 'C',	"Lock",	"Unknown event (type 8)",		"units"	},
-	{ 'C',	"Lock",	"Unknown event (type 9)",		"units"	},
-	{ 'C',	"Lock",	"Unknown event (type 10)",		"units"	},
-	{ 'C',	"Lock",	"Unknown event (type 11)",		"units"	},
-	{ 'C',	"Lock",	"Unknown event (type 12)",		"units"	},
-	{ 'C',	"Lock",	"Unknown event (type 13)",		"units"	},
-	{ 'C',	"Lock",	"Unknown event (type 14)",		"units"	},
-	{ 'C',	"Lock",	"Unknown event (type 15)",		"units"	},
+	{ 'C',	"Lock",	"R/W writer spin on writer",		"nsec",
+	    "lockstat:::rw-spin", "arg2 == 0 && arg3 == 1" },
+	{ 'C',	"Lock",	"R/W writer spin on readers",		"nsec",
+	    "lockstat:::rw-spin", "arg2 == 0 && arg3 == 0 && arg4" },
+	{ 'C',	"Lock",	"R/W reader spin on writer",		"nsec",
+	    "lockstat:::rw-spin", "arg2 != 0 && arg3 == 1" },
+	{ 'C',	"Lock",	"R/W reader spin on write wanted",	"nsec",
+	    "lockstat:::rw-spin", "arg2 != 0 && arg3 == 0 && arg4" },
+	{ 'C',	"Lock",	"SX exclusive block",			"nsec",
+	    "lockstat:::sx-block", "arg2 == 0" },
+	{ 'C',	"Lock",	"SX shared block",			"nsec",
+	    "lockstat:::sx-block", "arg2 != 0" },
+	{ 'C',	"Lock",	"SX exclusive spin",			"nsec",
+	    "lockstat:::sx-spin", "arg2 == 0" },
+	{ 'C',	"Lock",	"SX shared spin",			"nsec",
+	    "lockstat:::sx-spin", "arg2 != 0" },
 	{ 'C',	"Lock",	"Unknown event (type 16)",		"units"	},
 	{ 'C',	"Lock",	"Unknown event (type 17)",		"units"	},
 	{ 'C',	"Lock",	"Unknown event (type 18)",		"units"	},
@@ -189,13 +197,17 @@ static ls_event_info_t g_event_info[LS_M
 	    "lockstat:::spin-release", NULL,
 	    "lockstat:::spin-acquire" },
 	{ 'H',	"Lock",	"R/W writer hold",			"nsec",
-	    "lockstat:::rw-release", "arg1 == 0",
-	    "lockstat:::rw-acquire" },
+	    "lockstat::rw_wunlock:rw-release", NULL,
+	    "lockstat::rw_wlock:rw-acquire" },
 	{ 'H',	"Lock",	"R/W reader hold",			"nsec",
-	    "lockstat:::rw-release", "arg1 != 0",
-	    "lockstat:::rw-acquire" },
-	{ 'H',	"Lock",	"Unknown event (type 36)",		"units"	},
-	{ 'H',	"Lock",	"Unknown event (type 37)",		"units"	},
+	    "lockstat::rw_runlock:rw-release", NULL,
+	    "lockstat::rw_rlock:rw-acquire" },
+	{ 'H',	"Lock",	"SX shared hold",			"nsec",
+	    "lockstat::sx_sunlock:sx-release", NULL,
+	    "lockstat::sx_slock:sx-acquire" },
+	{ 'H',	"Lock",	"SX exclusive hold",			"nsec",
+	    "lockstat::sx_xunlock:sx-release", NULL,
+	    "lockstat::sx_xlock:sx-acquire" },
 	{ 'H',	"Lock",	"Unknown event (type 38)",		"units"	},
 	{ 'H',	"Lock",	"Unknown event (type 39)",		"units"	},
 	{ 'H',	"Lock",	"Unknown event (type 40)",		"units"	},

Modified: stable/10/sys/kern/kern_mutex.c
==============================================================================
--- stable/10/sys/kern/kern_mutex.c	Wed Jul  1 09:25:23 2015	(r284997)
+++ stable/10/sys/kern/kern_mutex.c	Wed Jul  1 10:15:49 2015	(r284998)
@@ -388,6 +388,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
 	uint64_t spin_cnt = 0;
 	uint64_t sleep_cnt = 0;
 	int64_t sleep_time = 0;
+	int64_t all_time = 0;
 #endif
 
 	if (SCHEDULER_STOPPED())
@@ -418,6 +419,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
 		CTR4(KTR_LOCK,
 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
+#ifdef KDTRACE_HOOKS
+	all_time -= lockstat_nsecs();
+#endif
 
 	while (!_mtx_obtain_lock(m, tid)) {
 #ifdef KDTRACE_HOOKS
@@ -521,6 +525,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
 		sleep_cnt++;
 #endif
 	}
+#ifdef KDTRACE_HOOKS
+	all_time += lockstat_nsecs();
+#endif
 #ifdef KTR
 	if (cont_logged) {
 		CTR4(KTR_CONTENTION,
@@ -538,7 +545,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
 	 * Only record the loops spinning and not sleeping. 
 	 */
 	if (spin_cnt > sleep_cnt)
-		LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt));
+		LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time));
 #endif
 }
 
@@ -578,6 +585,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 	int contested = 0;
 	uint64_t waittime = 0;
 #endif
+#ifdef KDTRACE_HOOKS
+	int64_t spin_time = 0;
+#endif
 
 	if (SCHEDULER_STOPPED())
 		return;
@@ -593,6 +603,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 	PMC_SOFT_CALL( , , lock, failed);
 #endif
 	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
+#ifdef KDTRACE_HOOKS
+	spin_time -= lockstat_nsecs();
+#endif
 	while (!_mtx_obtain_lock(m, tid)) {
 
 		/* Give interrupts a chance while we spin. */
@@ -610,6 +623,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 		}
 		spinlock_enter();
 	}
+#ifdef KDTRACE_HOOKS
+	spin_time += lockstat_nsecs();
+#endif
 
 	if (LOCK_LOG_TEST(&m->lock_object, opts))
 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
@@ -618,7 +634,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 
 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
 	    contested, waittime, (file), (line));
-	LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i);
+	LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, spin_time);
 }
 #endif /* SMP */
 
@@ -633,7 +649,7 @@ thread_lock_flags_(struct thread *td, in
 	uint64_t waittime = 0;
 #endif
 #ifdef KDTRACE_HOOKS
-	uint64_t spin_cnt = 0;
+	int64_t spin_time = 0;
 #endif
 
 	i = 0;
@@ -642,6 +658,9 @@ thread_lock_flags_(struct thread *td, in
 	if (SCHEDULER_STOPPED())
 		return;
 
+#ifdef KDTRACE_HOOKS
+	spin_time -= lockstat_nsecs();
+#endif
 	for (;;) {
 retry:
 		spinlock_enter();
@@ -658,9 +677,6 @@ retry:
 		WITNESS_CHECKORDER(&m->lock_object,
 		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
 		while (!_mtx_obtain_lock(m, tid)) {
-#ifdef KDTRACE_HOOKS
-			spin_cnt++;
-#endif
 			if (m->mtx_lock == tid) {
 				m->mtx_recurse++;
 				break;
@@ -689,17 +705,17 @@ retry:
 		if (m == td->td_lock)
 			break;
 		__mtx_unlock_spin(m);	/* does spinlock_exit() */
+	}
 #ifdef KDTRACE_HOOKS
-		spin_cnt++;
+	spin_time += lockstat_nsecs();
 #endif
-	}
 	if (m->mtx_recurse == 0)
 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
 		    m, contested, waittime, (file), (line));
 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
 	    line);
 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
-	LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
+	LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_time);
 }
 
 struct mtx *

Modified: stable/10/sys/kern/kern_rwlock.c
==============================================================================
--- stable/10/sys/kern/kern_rwlock.c	Wed Jul  1 09:25:23 2015	(r284997)
+++ stable/10/sys/kern/kern_rwlock.c	Wed Jul  1 10:15:49 2015	(r284998)
@@ -299,6 +299,9 @@ __rw_try_wlock(volatile uintptr_t *c, co
 	if (rval) {
 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
 		    file, line);
+		if (!rw_recursed(rw))
+			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE,
+			    rw, 0, 0, file, line);
 		curthread->td_locks++;
 	}
 	return (rval);
@@ -353,9 +356,11 @@ __rw_rlock(volatile uintptr_t *c, const 
 #endif
 	uintptr_t v;
 #ifdef KDTRACE_HOOKS
+	uintptr_t state;
 	uint64_t spin_cnt = 0;
 	uint64_t sleep_cnt = 0;
 	int64_t sleep_time = 0;
+	int64_t all_time = 0;
 #endif
 
 	if (SCHEDULER_STOPPED())
@@ -373,6 +378,10 @@ __rw_rlock(volatile uintptr_t *c, const 
 	    rw->lock_object.lo_name, file, line));
 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
 
+#ifdef KDTRACE_HOOKS
+	all_time -= lockstat_nsecs();
+	state = rw->rw_lock;
+#endif
 	for (;;) {
 #ifdef KDTRACE_HOOKS
 		spin_cnt++;
@@ -532,7 +541,19 @@ __rw_rlock(volatile uintptr_t *c, const 
 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
 			    __func__, rw);
 	}
+#ifdef KDTRACE_HOOKS
+	all_time += lockstat_nsecs();
+	if (sleep_time)
+		LOCKSTAT_RECORD4(LS_RW_RLOCK_BLOCK, rw, sleep_time,
+		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
+		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
 
+	/* Record only the loops spinning and not sleeping. */
+	if (spin_cnt > sleep_cnt)
+		LOCKSTAT_RECORD4(LS_RW_RLOCK_SPIN, rw, all_time - sleep_time,
+		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
+		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
+#endif
 	/*
 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
 	 * however.  turnstiles don't like owners changing between calls to
@@ -544,16 +565,6 @@ __rw_rlock(volatile uintptr_t *c, const 
 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
 	curthread->td_locks++;
 	curthread->td_rw_rlocks++;
-#ifdef KDTRACE_HOOKS
-	if (sleep_time)
-		LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time);
-
-	/*
-	 * Record only the loops spinning and not sleeping. 
-	 */
-	if (spin_cnt > sleep_cnt)
-		LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
-#endif
 }
 
 int
@@ -581,6 +592,8 @@ __rw_try_rlock(volatile uintptr_t *c, co
 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
 			    line);
 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
+			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE,
+			    rw, 0, 0, file, line);
 			curthread->td_locks++;
 			curthread->td_rw_rlocks++;
 			return (1);
@@ -725,9 +738,11 @@ __rw_wlock_hard(volatile uintptr_t *c, u
 	int contested = 0;
 #endif
 #ifdef KDTRACE_HOOKS
+	uintptr_t state;
 	uint64_t spin_cnt = 0;
 	uint64_t sleep_cnt = 0;
 	int64_t sleep_time = 0;
+	int64_t all_time = 0;
 #endif
 
 	if (SCHEDULER_STOPPED())
@@ -749,6 +764,10 @@ __rw_wlock_hard(volatile uintptr_t *c, u
 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
 
+#ifdef KDTRACE_HOOKS
+	all_time -= lockstat_nsecs();
+	state = rw->rw_lock;
+#endif
 	while (!_rw_write_lock(rw, tid)) {
 #ifdef KDTRACE_HOOKS
 		spin_cnt++;
@@ -886,18 +905,21 @@ __rw_wlock_hard(volatile uintptr_t *c, u
 		spintries = 0;
 #endif
 	}
-	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
-	    waittime, file, line);
 #ifdef KDTRACE_HOOKS
+	all_time += lockstat_nsecs();
 	if (sleep_time)
-		LOCKSTAT_RECORD1(LS_RW_WLOCK_BLOCK, rw, sleep_time);
+		LOCKSTAT_RECORD4(LS_RW_WLOCK_BLOCK, rw, sleep_time,
+		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
+		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
 
-	/*
-	 * Record only the loops spinning and not sleeping.
-	 */ 
+	/* Record only the loops spinning and not sleeping. */
 	if (spin_cnt > sleep_cnt)
-		LOCKSTAT_RECORD1(LS_RW_WLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
+		LOCKSTAT_RECORD4(LS_RW_WLOCK_SPIN, rw, all_time - sleep_time,
+		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
+		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
 #endif
+	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
+	    waittime, file, line);
 }
 
 /*

Modified: stable/10/sys/kern/kern_sx.c
==============================================================================
--- stable/10/sys/kern/kern_sx.c	Wed Jul  1 09:25:23 2015	(r284997)
+++ stable/10/sys/kern/kern_sx.c	Wed Jul  1 10:15:49 2015	(r284998)
@@ -288,6 +288,8 @@ sx_try_slock_(struct sx *sx, const char 
 		if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
 			LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
 			WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
+			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE,
+			    sx, 0, 0, file, line);
 			curthread->td_locks++;
 			return (1);
 		}
@@ -348,6 +350,9 @@ sx_try_xlock_(struct sx *sx, const char 
 	if (rval) {
 		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
 		    file, line);
+		if (!sx_recursed(sx))
+			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
+			    sx, 0, 0, file, line);
 		curthread->td_locks++;
 	}
 
@@ -512,9 +517,11 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 #endif
 	int error = 0;
 #ifdef	KDTRACE_HOOKS
+	uintptr_t state;
 	uint64_t spin_cnt = 0;
 	uint64_t sleep_cnt = 0;
 	int64_t sleep_time = 0;
+	int64_t all_time = 0;
 #endif
 
 	if (SCHEDULER_STOPPED())
@@ -536,6 +543,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
 		    sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
 
+#ifdef KDTRACE_HOOKS
+	all_time -= lockstat_nsecs();
+	state = sx->sx_lock;
+#endif
 	while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
 #ifdef KDTRACE_HOOKS
 		spin_cnt++;
@@ -708,17 +719,21 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
 			    __func__, sx);
 	}
-
-	GIANT_RESTORE();
-	if (!error)
-		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
-		    contested, waittime, file, line);
 #ifdef KDTRACE_HOOKS
+	all_time += lockstat_nsecs();
 	if (sleep_time)
-		LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
+		LOCKSTAT_RECORD4(LS_SX_XLOCK_BLOCK, sx, sleep_time,
+		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
+		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
 	if (spin_cnt > sleep_cnt)
-		LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
+		LOCKSTAT_RECORD4(LS_SX_XLOCK_SPIN, sx, all_time - sleep_time,
+		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
+		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
 #endif
+	if (!error)
+		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
+		    contested, waittime, file, line);
+	GIANT_RESTORE();
 	return (error);
 }
 
@@ -804,14 +819,21 @@ _sx_slock_hard(struct sx *sx, int opts, 
 	uintptr_t x;
 	int error = 0;
 #ifdef KDTRACE_HOOKS
+	uintptr_t state;
 	uint64_t spin_cnt = 0;
 	uint64_t sleep_cnt = 0;
 	int64_t sleep_time = 0;
+	int64_t all_time = 0;
 #endif
 
 	if (SCHEDULER_STOPPED())
 		return (0);
 
+#ifdef KDTRACE_HOOKS
+	state = sx->sx_lock;
+	all_time -= lockstat_nsecs();
+#endif
+
 	/*
 	 * As with rwlocks, we don't make any attempt to try to block
 	 * shared locks once there is an exclusive waiter.
@@ -961,15 +983,20 @@ _sx_slock_hard(struct sx *sx, int opts, 
 			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
 			    __func__, sx);
 	}
-	if (error == 0)
-		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
-		    contested, waittime, file, line);
 #ifdef KDTRACE_HOOKS
+	all_time += lockstat_nsecs();
 	if (sleep_time)
-		LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
+		LOCKSTAT_RECORD4(LS_SX_SLOCK_BLOCK, sx, sleep_time,
+		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
+		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
 	if (spin_cnt > sleep_cnt)
-		LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
+		LOCKSTAT_RECORD4(LS_SX_SLOCK_SPIN, sx, all_time - sleep_time,
+		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
+		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
 #endif
+	if (error == 0)
+		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
+		    contested, waittime, file, line);
 	GIANT_RESTORE();
 	return (error);
 }

Modified: stable/10/sys/sys/lockstat.h
==============================================================================
--- stable/10/sys/sys/lockstat.h	Wed Jul  1 09:25:23 2015	(r284997)
+++ stable/10/sys/sys/lockstat.h	Wed Jul  1 10:15:49 2015	(r284998)
@@ -198,6 +198,9 @@ extern uint64_t lockstat_nsecs(void);
 		(*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0);     \
 } while (0)
 
+#define	LOCKSTAT_WRITER		0
+#define	LOCKSTAT_READER		1
+
 #else	/* !KDTRACE_HOOKS */
 
 #define	LOCKSTAT_RECORD(probe, lp, arg1)


More information about the svn-src-all mailing list