svn commit: r192853 - in head/sys: cddl/contrib/opensolaris/uts/common/dtrace cddl/dev/lockstat conf kern modules/dtrace modules/dtrace/dtraceall sys

Stacey Son sson at FreeBSD.org
Tue May 26 20:28:24 UTC 2009


Author: sson
Date: Tue May 26 20:28:22 2009
New Revision: 192853
URL: http://svn.freebsd.org/changeset/base/192853

Log:
  Add the OpenSolaris dtrace lockstat provider.  The lockstat provider
  adds probes for mutexes, reader/writer and shared/exclusive locks to
  gather contention statistics and other locking information for
  dtrace scripts, the lockstat(1M) command and other potential
  consumers.
  
  Reviewed by:	attilio jhb jb
  Approved by:	gnn (mentor)

Added:
  head/sys/cddl/dev/lockstat/
  head/sys/cddl/dev/lockstat/lockstat.c   (contents, props changed)
  head/sys/kern/kern_lockstat.c   (contents, props changed)
  head/sys/sys/lockstat.h   (contents, props changed)
Modified:
  head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
  head/sys/conf/files
  head/sys/kern/kern_lock.c
  head/sys/kern/kern_mutex.c
  head/sys/kern/kern_rmlock.c
  head/sys/kern/kern_rwlock.c
  head/sys/kern/kern_sx.c
  head/sys/modules/dtrace/Makefile
  head/sys/modules/dtrace/dtraceall/dtraceall.c
  head/sys/sys/lock.h
  head/sys/sys/mutex.h
  head/sys/sys/rwlock.h
  head/sys/sys/sx.h

Modified: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
==============================================================================
--- head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c	Tue May 26 20:22:03 2009	(r192852)
+++ head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c	Tue May 26 20:28:22 2009	(r192853)
@@ -122,6 +122,7 @@
 #include <sys/sysctl.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
+#include <sys/rwlock.h>
 #include <sys/sx.h>
 #include <sys/dtrace_bsd.h>
 #include <netinet/in.h>
@@ -3168,14 +3169,11 @@ dtrace_dif_subr(uint_t subr, uint_t rd, 
 		uintptr_t rw;
 	} r;
 #else
+	struct thread *lowner;
 	union {
-		struct mtx *mi;
-		uintptr_t mx;
-	} m;
-	union {
-		struct sx *si;
-		uintptr_t sx;
-	} s;
+		struct lock_object *li;
+		uintptr_t lx;
+	} l;
 #endif
 
 	switch (subr) {
@@ -3272,75 +3270,83 @@ dtrace_dif_subr(uint_t subr, uint_t rd, 
 		break;
 
 #else
-	/* 
-         * XXX - The following code works because mutex, rwlocks, & sxlocks
-         *       all have similar data structures in FreeBSD.  This may not be
-         *	 good if someone changes one of the lock data structures.
-	 * 	 Ideally, it would be nice if all these shared a common lock 
-	 * 	 object.
-         */
 	case DIF_SUBR_MUTEX_OWNED:
-		/* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 
-		m.mx = tupregs[0].dttk_value;
-
-#ifdef DOODAD
-		if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) { 
-			regs[rd] = !(m.mi->mtx_lock & MTX_UNOWNED);
-		} else {	
-			regs[rd] = !(m.mi->mtx_lock & SX_UNLOCKED);
+		if (!dtrace_canload(tupregs[0].dttk_value,
+			sizeof (struct lock_object), mstate, vstate)) {
+			regs[rd] = 0;
+			break;
 		}
-#endif
+		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
+		regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
 		break;
 
 	case DIF_SUBR_MUTEX_OWNER:
-		/* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 
-		m.mx = tupregs[0].dttk_value;
-
-		if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) { 
-			regs[rd] = m.mi->mtx_lock & ~MTX_FLAGMASK;
-		} else {
-			if (!(m.mi->mtx_lock & SX_LOCK_SHARED)) 
-				regs[rd] = SX_OWNER(m.mi->mtx_lock);
-			else
-				regs[rd] = 0;
+		if (!dtrace_canload(tupregs[0].dttk_value,
+			sizeof (struct lock_object), mstate, vstate)) {
+			regs[rd] = 0;
+			break;
 		}
+		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
+		LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
+		regs[rd] = (uintptr_t)lowner;
 		break;
 
 	case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
-		/* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 
-		m.mx = tupregs[0].dttk_value;
-
-		regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) != 0);
+		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
+		    mstate, vstate)) {
+			regs[rd] = 0;
+			break;
+		}
+		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
+		/* XXX - should be only LC_SLEEPABLE? */
+		regs[rd] = (LOCK_CLASS(l.li)->lc_flags &
+		    (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0;
 		break;
 
 	case DIF_SUBR_MUTEX_TYPE_SPIN:
-		/* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 
-		m.mx = tupregs[0].dttk_value;
-
-		regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) == 0);
+		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
+		    mstate, vstate)) {
+			regs[rd] = 0;
+			break;
+		}
+		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
+		regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
 		break;
 
 	case DIF_SUBR_RW_READ_HELD: 
 	case DIF_SUBR_SX_SHARED_HELD: 
-		/* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 
-		s.sx = tupregs[0].dttk_value;
-		regs[rd] = ((s.si->sx_lock & SX_LOCK_SHARED)  && 
-			    (SX_OWNER(s.si->sx_lock) >> SX_SHARERS_SHIFT) != 0);
+		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
+		    mstate, vstate)) {
+			regs[rd] = 0;
+			break;
+		}
+		l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
+		regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
+		    lowner == NULL;
 		break;
 
 	case DIF_SUBR_RW_WRITE_HELD:
 	case DIF_SUBR_SX_EXCLUSIVE_HELD:
-		/* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 
-		s.sx = tupregs[0].dttk_value;
-		regs[rd] = (SX_OWNER(s.si->sx_lock) == (uintptr_t) curthread); 
+		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
+		    mstate, vstate)) {
+			regs[rd] = 0;
+			break;
+		}
+		l.lx = dtrace_loadptr(tupregs[0].dttk_value);
+		LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
+		regs[rd] = (lowner == curthread);
 		break;
 
 	case DIF_SUBR_RW_ISWRITER:
 	case DIF_SUBR_SX_ISEXCLUSIVE:
-		/* XXX - need to use dtrace_canload() and dtrace_loadptr() */ 
-		s.sx = tupregs[0].dttk_value;
-		regs[rd] = ((s.si->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS) ||
-		            !(s.si->sx_lock & SX_LOCK_SHARED));
+		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
+		    mstate, vstate)) {
+			regs[rd] = 0;
+			break;
+		}
+		l.lx = dtrace_loadptr(tupregs[0].dttk_value);
+		regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
+		    lowner != NULL;
 		break;
 #endif /* ! defined(sun) */
 

Added: head/sys/cddl/dev/lockstat/lockstat.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/cddl/dev/lockstat/lockstat.c	Tue May 26 20:28:22 2009	(r192853)
@@ -0,0 +1,327 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * Portions Copyright (c) 2008-2009 Stacey Son <sson at FreeBSD.org> 
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "opt_kdtrace.h"
+
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/linker.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+
+#include <sys/dtrace.h>
+#include <sys/lockstat.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#define LOCKSTAT_AFRAMES 1
+#else
+#error "architecture not supported"
+#endif
+
+static d_open_t lockstat_open;
+static void     lockstat_provide(void *, dtrace_probedesc_t *);
+static void     lockstat_destroy(void *, dtrace_id_t, void *);
+static void     lockstat_enable(void *, dtrace_id_t, void *);
+static void     lockstat_disable(void *, dtrace_id_t, void *);
+static void     lockstat_load(void *);
+static int     	lockstat_unload(void);
+
+
+typedef struct lockstat_probe {
+	char		*lsp_func;
+	char		*lsp_name;
+	int		lsp_probe;
+	dtrace_id_t	lsp_id;
+#ifdef __FreeBSD__
+	int		lsp_frame;
+#endif
+} lockstat_probe_t;
+
+#ifdef __FreeBSD__
+lockstat_probe_t lockstat_probes[] =
+{
+  /* Spin Locks */
+  { LS_MTX_SPIN_LOCK,	LSS_ACQUIRE,	LS_MTX_SPIN_LOCK_ACQUIRE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_MTX_SPIN_LOCK, 	LSS_SPIN,	LS_MTX_SPIN_LOCK_SPIN,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_MTX_SPIN_UNLOCK,	LSS_RELEASE,	LS_MTX_SPIN_UNLOCK_RELEASE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  /* Adaptive Locks */
+  { LS_MTX_LOCK,	LSA_ACQUIRE,	LS_MTX_LOCK_ACQUIRE,
+	  DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
+  { LS_MTX_LOCK,	LSA_BLOCK,	LS_MTX_LOCK_BLOCK,
+	  DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
+  { LS_MTX_LOCK,	LSA_SPIN,	LS_MTX_LOCK_SPIN,
+	  DTRACE_IDNONE, (LOCKSTAT_AFRAMES + 1) },
+  { LS_MTX_UNLOCK,	LSA_RELEASE,	LS_MTX_UNLOCK_RELEASE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_MTX_TRYLOCK,	LSA_ACQUIRE,	LS_MTX_TRYLOCK_ACQUIRE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  /* Reader/Writer Locks */
+  { LS_RW_RLOCK,	LSR_ACQUIRE,	LS_RW_RLOCK_ACQUIRE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_RLOCK,	LSR_BLOCK,	LS_RW_RLOCK_BLOCK,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_RLOCK,	LSR_SPIN,	LS_RW_RLOCK_SPIN,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_RUNLOCK,	LSR_RELEASE,	LS_RW_RUNLOCK_RELEASE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_WLOCK,	LSR_ACQUIRE,	LS_RW_WLOCK_ACQUIRE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_WLOCK,	LSR_BLOCK,	LS_RW_WLOCK_BLOCK,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_WLOCK,	LSR_SPIN,	LS_RW_WLOCK_SPIN,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_WUNLOCK,	LSR_RELEASE,	LS_RW_WUNLOCK_RELEASE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_TRYUPGRADE,	LSR_UPGRADE,   	LS_RW_TRYUPGRADE_UPGRADE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_RW_DOWNGRADE,	LSR_DOWNGRADE, 	LS_RW_DOWNGRADE_DOWNGRADE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  /* Shared/Exclusive Locks */
+  { LS_SX_SLOCK,	LSX_ACQUIRE,	LS_SX_SLOCK_ACQUIRE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_SLOCK,	LSX_BLOCK,	LS_SX_SLOCK_BLOCK,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_SLOCK,	LSX_SPIN,	LS_SX_SLOCK_SPIN,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_SUNLOCK,	LSX_RELEASE,	LS_SX_SUNLOCK_RELEASE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_XLOCK,	LSX_ACQUIRE,	LS_SX_XLOCK_ACQUIRE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_XLOCK,	LSX_BLOCK,	LS_SX_XLOCK_BLOCK,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_XLOCK,	LSX_SPIN,	LS_SX_XLOCK_SPIN,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_XUNLOCK,	LSX_RELEASE,	LS_SX_XUNLOCK_RELEASE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_TRYUPGRADE,	LSX_UPGRADE,	LS_SX_TRYUPGRADE_UPGRADE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { LS_SX_DOWNGRADE,	LSX_DOWNGRADE,	LS_SX_DOWNGRADE_DOWNGRADE,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  /* Thread Locks */
+  { LS_THREAD_LOCK,	LST_SPIN,	LS_THREAD_LOCK_SPIN,
+	  DTRACE_IDNONE, LOCKSTAT_AFRAMES },
+  { NULL }
+};
+#else
+#error "OS not supported"
+#endif
+
+
+static struct cdevsw lockstat_cdevsw = {
+	.d_version	= D_VERSION,
+	.d_open		= lockstat_open,
+	.d_name		= "lockstat",
+};
+
+static struct cdev		*lockstat_cdev; 
+static dtrace_provider_id_t 	lockstat_id;
+
+/*ARGSUSED*/
+static void
+lockstat_enable(void *arg, dtrace_id_t id, void *parg)
+{
+	lockstat_probe_t *probe = parg;
+
+	ASSERT(!lockstat_probemap[probe->lsp_probe]);
+
+	lockstat_probemap[probe->lsp_probe] = id;
+#ifdef DOODAD
+	membar_producer();
+#endif
+
+	lockstat_probe_func = dtrace_probe;
+#ifdef DOODAD
+	membar_producer();
+
+	lockstat_hot_patch();
+	membar_producer();
+#endif
+}
+
+/*ARGSUSED*/
+static void
+lockstat_disable(void *arg, dtrace_id_t id, void *parg)
+{
+	lockstat_probe_t *probe = parg;
+	int i;
+
+	ASSERT(lockstat_probemap[probe->lsp_probe]);
+
+	lockstat_probemap[probe->lsp_probe] = 0;
+#ifdef DOODAD
+	lockstat_hot_patch();
+	membar_producer();
+#endif
+
+	/*
+	 * See if we have any probes left enabled.
+	 */
+	for (i = 0; i < LS_NPROBES; i++) {
+		if (lockstat_probemap[i]) {
+			/*
+			 * This probe is still enabled.  We don't need to deal
+			 * with waiting for all threads to be out of the
+			 * lockstat critical sections; just return.
+			 */
+			return;
+		}
+	}
+
+}
+
+/*ARGSUSED*/
+static int
+lockstat_open(struct cdev *dev __unused, int oflags __unused, 
+	      int devtype __unused, struct thread *td __unused)
+{
+	return (0);
+}
+
+/*ARGSUSED*/
+static void
+lockstat_provide(void *arg, dtrace_probedesc_t *desc)
+{
+	int i = 0;
+
+	for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) {
+		lockstat_probe_t *probe = &lockstat_probes[i];
+
+		if (dtrace_probe_lookup(lockstat_id, "kernel",
+		    probe->lsp_func, probe->lsp_name) != 0)
+			continue;
+
+		ASSERT(!probe->lsp_id);
+#ifdef __FreeBSD__
+		probe->lsp_id = dtrace_probe_create(lockstat_id,
+		    "kernel", probe->lsp_func, probe->lsp_name,
+		    probe->lsp_frame, probe);
+#else
+		probe->lsp_id = dtrace_probe_create(lockstat_id,
+		    "kernel", probe->lsp_func, probe->lsp_name,
+		    LOCKSTAT_AFRAMES, probe);
+#endif
+	}
+}
+
+/*ARGSUSED*/
+static void
+lockstat_destroy(void *arg, dtrace_id_t id, void *parg)
+{
+	lockstat_probe_t *probe = parg;
+
+	ASSERT(!lockstat_probemap[probe->lsp_probe]);
+	probe->lsp_id = 0;
+}
+
+static dtrace_pattr_t lockstat_attr = {
+{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
+{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
+{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
+{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
+};
+
+static dtrace_pops_t lockstat_pops = {
+	lockstat_provide,
+	NULL,
+	lockstat_enable,
+	lockstat_disable,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	lockstat_destroy
+};
+
+static void
+lockstat_load(void *dummy)
+{
+	/* Create the /dev/dtrace/lockstat entry. */
+	lockstat_cdev = make_dev(&lockstat_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
+	    "dtrace/lockstat");
+
+	if (dtrace_register("lockstat", &lockstat_attr, DTRACE_PRIV_USER,
+	    NULL, &lockstat_pops, NULL, &lockstat_id) != 0)
+	        return;
+}
+
+static int
+lockstat_unload()
+{
+	int error = 0;
+
+	if ((error = dtrace_unregister(lockstat_id)) != 0)
+	    return (error);
+
+	destroy_dev(lockstat_cdev);
+
+	return (error);
+}
+
+/* ARGSUSED */
+static int
+lockstat_modevent(module_t mod __unused, int type, void *data __unused)
+{
+	int error = 0;
+
+	switch (type) {
+	case MOD_LOAD:
+		break;
+
+	case MOD_UNLOAD:
+		break;
+
+	case MOD_SHUTDOWN:
+		break;
+
+	default:
+		error = EOPNOTSUPP;
+		break;
+	}
+	return (error);
+}
+
+SYSINIT(lockstat_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, lockstat_load, NULL);
+SYSUNINIT(lockstat_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, lockstat_unload, NULL);
+
+DEV_MODULE(lockstat, lockstat_modevent, NULL);
+MODULE_VERSION(lockstat, 1);
+MODULE_DEPEND(lockstat, dtrace, 1, 1, 1);
+MODULE_DEPEND(lockstat, opensolaris, 1, 1, 1);

Modified: head/sys/conf/files
==============================================================================
--- head/sys/conf/files	Tue May 26 20:22:03 2009	(r192852)
+++ head/sys/conf/files	Tue May 26 20:28:22 2009	(r192853)
@@ -1917,6 +1917,7 @@ kern/kern_ktrace.c		standard
 kern/kern_linker.c		standard
 kern/kern_lock.c		standard
 kern/kern_lockf.c		standard
+kern/kern_lockstat.c		optional kdtrace_hooks
 kern/kern_malloc.c		standard
 kern/kern_mbuf.c		standard
 kern/kern_mib.c			standard

Modified: head/sys/kern/kern_lock.c
==============================================================================
--- head/sys/kern/kern_lock.c	Tue May 26 20:22:03 2009	(r192852)
+++ head/sys/kern/kern_lock.c	Tue May 26 20:28:22 2009	(r192853)
@@ -27,6 +27,7 @@
  */
 
 #include "opt_ddb.h"
+#include "opt_kdtrace.h"
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
@@ -126,6 +127,9 @@ static void	 assert_lockmgr(struct lock_
 static void	 db_show_lockmgr(struct lock_object *lock);
 #endif
 static void	 lock_lockmgr(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
+#endif
 static int	 unlock_lockmgr(struct lock_object *lock);
 
 struct lock_class lock_class_lockmgr = {
@@ -136,7 +140,10 @@ struct lock_class lock_class_lockmgr = {
 	.lc_ddb_show = db_show_lockmgr,
 #endif
 	.lc_lock = lock_lockmgr,
-	.lc_unlock = unlock_lockmgr
+	.lc_unlock = unlock_lockmgr,
+#ifdef KDTRACE_HOOKS
+	.lc_owner = owner_lockmgr,
+#endif
 };
 
 static __inline struct thread *
@@ -293,6 +300,15 @@ unlock_lockmgr(struct lock_object *lock)
 	panic("lockmgr locks do not support sleep interlocking");
 }
 
+#ifdef KDTRACE_HOOKS
+static int
+owner_lockmgr(struct lock_object *lock, struct thread **owner)
+{
+
+	panic("lockmgr locks do not support owner inquiring");
+}
+#endif
+
 void
 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
 {

Added: head/sys/kern/kern_lockstat.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/kern/kern_lockstat.c	Tue May 26 20:28:22 2009	(r192853)
@@ -0,0 +1,64 @@
+/*-
+ * Copyright 2008-2009 Stacey Son <sson at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Backend for the lock tracing (lockstat) kernel support. This is required 
+ * to allow a module to load even though DTrace kernel support may not be 
+ * present. 
+ *
+ */
+
+#include "opt_kdtrace.h"
+
+#ifdef KDTRACE_HOOKS
+
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/lockstat.h>
+
+/*
+ * The following must match the type definition of dtrace_probe.  It is  
+ * defined this way to avoid having to rely on CDDL code.
+ */
+uint32_t lockstat_probemap[LS_NPROBES];
+void (*lockstat_probe_func)(uint32_t, uintptr_t, uintptr_t,
+    uintptr_t, uintptr_t, uintptr_t);
+
+
+uint64_t 
+lockstat_nsecs(void)
+{
+	struct bintime bt;
+	uint64_t ns;
+
+	binuptime(&bt);
+	ns = bt.sec * (uint64_t)1000000000;
+	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
+	return (ns);
+}
+
+#endif /* KDTRACE_HOOKS */

Modified: head/sys/kern/kern_mutex.c
==============================================================================
--- head/sys/kern/kern_mutex.c	Tue May 26 20:22:03 2009	(r192852)
+++ head/sys/kern/kern_mutex.c	Tue May 26 20:28:22 2009	(r192853)
@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
 #include "opt_adaptive_mutexes.h"
 #include "opt_ddb.h"
 #include "opt_global.h"
+#include "opt_kdtrace.h"
 #include "opt_sched.h"
 
 #include <sys/param.h>
@@ -90,6 +91,9 @@ static void	db_show_mtx(struct lock_obje
 #endif
 static void	lock_mtx(struct lock_object *lock, int how);
 static void	lock_spin(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int	owner_mtx(struct lock_object *lock, struct thread **owner);
+#endif
 static int	unlock_mtx(struct lock_object *lock);
 static int	unlock_spin(struct lock_object *lock);
 
@@ -105,6 +109,9 @@ struct lock_class lock_class_mtx_sleep =
 #endif
 	.lc_lock = lock_mtx,
 	.lc_unlock = unlock_mtx,
+#ifdef KDTRACE_HOOKS
+	.lc_owner = owner_mtx,
+#endif
 };
 struct lock_class lock_class_mtx_spin = {
 	.lc_name = "spin mutex",
@@ -115,6 +122,9 @@ struct lock_class lock_class_mtx_spin = 
 #endif
 	.lc_lock = lock_spin,
 	.lc_unlock = unlock_spin,
+#ifdef KDTRACE_HOOKS
+	.lc_owner = owner_mtx,
+#endif
 };
 
 /*
@@ -162,6 +172,17 @@ unlock_spin(struct lock_object *lock)
 	panic("spin locks can only use msleep_spin");
 }
 
+#ifdef KDTRACE_HOOKS
+int
+owner_mtx(struct lock_object *lock, struct thread **owner)
+{
+	struct mtx *m = (struct mtx *)lock;
+
+	*owner = mtx_owner(m);
+	return (mtx_unowned(m) == 0);
+}
+#endif
+
 /*
  * Function versions of the inlined __mtx_* macros.  These are used by
  * modules and can also be called from assembly language if needed.
@@ -202,7 +223,7 @@ _mtx_unlock_flags(struct mtx *m, int opt
 	mtx_assert(m, MA_OWNED);
 
 	if (m->mtx_recurse == 0)
-		lock_profile_release_lock(&m->lock_object);
+		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
 	_rel_sleep_lock(m, curthread, opts, file, line);
 }
 
@@ -280,8 +301,8 @@ _mtx_trylock(struct mtx *m, int opts, co
 		    file, line);
 		curthread->td_locks++;
 		if (m->mtx_recurse == 0)
-			lock_profile_obtain_lock_success(&m->lock_object, contested,
-			    waittime, file, line);
+			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
+			    m, contested, waittime, file, line);
 
 	}
 
@@ -310,6 +331,11 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 	int contested = 0;
 	uint64_t waittime = 0;
 #endif
+#ifdef KDTRACE_HOOKS
+	uint64_t spin_cnt = 0;
+	uint64_t sleep_cnt = 0;
+	int64_t sleep_time = 0;
+#endif
 
 	if (mtx_owned(m)) {
 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
@@ -330,6 +356,9 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
 
 	while (!_obtain_lock(m, tid)) {
+#ifdef KDTRACE_HOOKS
+		spin_cnt++;
+#endif
 #ifdef ADAPTIVE_MUTEXES
 		/*
 		 * If the owner is running on another CPU, spin until the
@@ -344,8 +373,12 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 					    "%s: spinning on %p held by %p",
 					    __func__, m, owner);
 				while (mtx_owner(m) == owner &&
-				    TD_IS_RUNNING(owner))
+				    TD_IS_RUNNING(owner)) {
 					cpu_spinwait();
+#ifdef KDTRACE_HOOKS
+					spin_cnt++;
+#endif
+				}
 				continue;
 			}
 		}
@@ -408,7 +441,14 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 		/*
 		 * Block on the turnstile.
 		 */
+#ifdef KDTRACE_HOOKS
+		sleep_time -= lockstat_nsecs();
+#endif
 		turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
+#ifdef KDTRACE_HOOKS
+		sleep_time += lockstat_nsecs();
+		sleep_cnt++;
+#endif
 	}
 #ifdef KTR
 	if (cont_logged) {
@@ -417,8 +457,18 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t
 		    m->lock_object.lo_name, (void *)tid, file, line);
 	}
 #endif
-	lock_profile_obtain_lock_success(&m->lock_object, contested,
+	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
 	    waittime, file, line);
+#ifdef KDTRACE_HOOKS
+	if (sleep_time)
+		LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
+
+	/*
+	 * Only record the loops spinning and not sleeping. 
+	 */
+	if (spin_cnt > sleep_cnt)
+		LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt));
+#endif
 }
 
 static void
@@ -482,8 +532,9 @@ _mtx_lock_spin(struct mtx *m, uintptr_t 
 	if (LOCK_LOG_TEST(&m->lock_object, opts))
 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
 
-	lock_profile_obtain_lock_success(&m->lock_object, contested,
-	    waittime, (file), (line));
+	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
+	    contested, waittime, (file), (line));
+	LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i);
 }
 #endif /* SMP */
 
@@ -497,6 +548,9 @@ _thread_lock_flags(struct thread *td, in
 	int contested = 0;
 	uint64_t waittime = 0;
 #endif
+#ifdef KDTRACE_HOOKS
+	uint64_t spin_cnt = 0;
+#endif
 
 	i = 0;
 	tid = (uintptr_t)curthread;
@@ -516,6 +570,9 @@ retry:
 		WITNESS_CHECKORDER(&m->lock_object,
 		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
 		while (!_obtain_lock(m, tid)) {
+#ifdef KDTRACE_HOOKS
+			spin_cnt++;
+#endif
 			if (m->mtx_lock == tid) {
 				m->mtx_recurse++;
 				break;
@@ -541,13 +598,17 @@ retry:
 		if (m == td->td_lock)
 			break;
 		_rel_spin_lock(m);	/* does spinlock_exit() */
+#ifdef KDTRACE_HOOKS
+		spin_cnt++;
+#endif
 	}
 	if (m->mtx_recurse == 0)
-		lock_profile_obtain_lock_success(&m->lock_object, contested,
-		    waittime, (file), (line));
+		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
+		    m, contested, waittime, (file), (line));
 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
 	    line);
 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
+	LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
 }
 
 struct mtx *

Modified: head/sys/kern/kern_rmlock.c
==============================================================================
--- head/sys/kern/kern_rmlock.c	Tue May 26 20:22:03 2009	(r192852)
+++ head/sys/kern/kern_rmlock.c	Tue May 26 20:28:22 2009	(r192853)
@@ -35,6 +35,7 @@
 __FBSDID("$FreeBSD$");
 
 #include "opt_ddb.h"
+#include "opt_kdtrace.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -71,6 +72,9 @@ static __inline void compiler_memory_bar
 
 static void	assert_rm(struct lock_object *lock, int what);
 static void	lock_rm(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int	owner_rm(struct lock_object *lock, struct thread **owner);
+#endif
 static int	unlock_rm(struct lock_object *lock);
 
 struct lock_class lock_class_rm = {
@@ -84,6 +88,9 @@ struct lock_class lock_class_rm = {
 #endif
 	.lc_lock = lock_rm,
 	.lc_unlock = unlock_rm,
+#ifdef KDTRACE_HOOKS
+	.lc_owner = owner_rm,
+#endif
 };
 
 static void
@@ -107,6 +114,15 @@ unlock_rm(struct lock_object *lock)
 	panic("unlock_rm called");
 }
 
+#ifdef KDTRACE_HOOKS
+static int
+owner_rm(struct lock_object *lock, struct thread **owner)
+{
+
+	panic("owner_rm called");
+}
+#endif
+
 static struct mtx rm_spinlock;
 
 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);

Modified: head/sys/kern/kern_rwlock.c
==============================================================================
--- head/sys/kern/kern_rwlock.c	Tue May 26 20:22:03 2009	(r192852)
+++ head/sys/kern/kern_rwlock.c	Tue May 26 20:28:22 2009	(r192853)
@@ -35,6 +35,7 @@
 __FBSDID("$FreeBSD$");
 
 #include "opt_ddb.h"
+#include "opt_kdtrace.h"
 #include "opt_no_adaptive_rwlocks.h"
 
 #include <sys/param.h>
@@ -71,6 +72,9 @@ static void	db_show_rwlock(struct lock_o
 #endif
 static void	assert_rw(struct lock_object *lock, int what);
 static void	lock_rw(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int	owner_rw(struct lock_object *lock, struct thread **owner);
+#endif
 static int	unlock_rw(struct lock_object *lock);
 
 struct lock_class lock_class_rw = {
@@ -82,6 +86,9 @@ struct lock_class lock_class_rw = {
 #endif
 	.lc_lock = lock_rw,
 	.lc_unlock = unlock_rw,
+#ifdef KDTRACE_HOOKS
+	.lc_owner = owner_rw,
+#endif
 };
 
 /*
@@ -149,6 +156,19 @@ unlock_rw(struct lock_object *lock)
 	}
 }
 
+#ifdef KDTRACE_HOOKS
+int
+owner_rw(struct lock_object *lock, struct thread **owner)
+{
+	struct rwlock *rw = (struct rwlock *)lock;
+	uintptr_t x = rw->rw_lock;
+
+	*owner = rw_wowner(rw);
+	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
+	    (*owner != NULL));
+}
+#endif
+
 void
 rw_init_flags(struct rwlock *rw, const char *name, int opts)
 {
@@ -258,7 +278,7 @@ _rw_wunlock(struct rwlock *rw, const cha
 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
 	    line);
 	if (!rw_recursed(rw))
-		lock_profile_release_lock(&rw->lock_object);
+		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw);
 	__rw_wunlock(rw, curthread, file, line);
 }
 /*
@@ -287,6 +307,11 @@ _rw_rlock(struct rwlock *rw, const char 
 	int contested = 0;
 #endif
 	uintptr_t v;
+#ifdef KDTRACE_HOOKS
+	uint64_t spin_cnt = 0;
+	uint64_t sleep_cnt = 0;
+	int64_t sleep_time = 0;
+#endif
 
 	KASSERT(rw->rw_lock != RW_DESTROYED,
 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
@@ -296,6 +321,9 @@ _rw_rlock(struct rwlock *rw, const char 
 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
 
 	for (;;) {
+#ifdef KDTRACE_HOOKS
+		spin_cnt++;
+#endif
 		/*
 		 * Handle the easy case.  If no other thread has a write
 		 * lock, then try to bump up the count of read locks.  Note
@@ -342,8 +370,12 @@ _rw_rlock(struct rwlock *rw, const char 
 					    "%s: spinning on %p held by %p",
 					    __func__, rw, owner);
 				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
-				    owner && TD_IS_RUNNING(owner))
+				    owner && TD_IS_RUNNING(owner)) {
 					cpu_spinwait();
+#ifdef KDTRACE_HOOKS
+					spin_cnt++;
+#endif
+				}
 				continue;
 			}
 		} else if (spintries < rowner_retries) {
@@ -423,7 +455,14 @@ _rw_rlock(struct rwlock *rw, const char 
 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
 			    rw);
+#ifdef KDTRACE_HOOKS
+		sleep_time -= lockstat_nsecs();
+#endif
 		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
+#ifdef KDTRACE_HOOKS
+		sleep_time += lockstat_nsecs();
+		sleep_cnt++;
+#endif
 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
 			    __func__, rw);
@@ -434,12 +473,22 @@ _rw_rlock(struct rwlock *rw, const char 
 	 * however.  turnstiles don't like owners changing between calls to
 	 * turnstile_wait() currently.
 	 */
-	lock_profile_obtain_lock_success( &rw->lock_object, contested,
+	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
 	    waittime, file, line);
 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
 	curthread->td_locks++;
 	curthread->td_rw_rlocks++;
+#ifdef KDTRACE_HOOKS
+	if (sleep_time)
+		LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time);
+
+	/*
+	 * Record only the loops spinning and not sleeping. 
+	 */
+	if (spin_cnt > sleep_cnt)
+		LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
+#endif
 }
 
 int
@@ -569,7 +618,7 @@ _rw_runlock(struct rwlock *rw, const cha
 		turnstile_chain_unlock(&rw->lock_object);
 		break;
 	}
-	lock_profile_release_lock(&rw->lock_object);
+	LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list