PERFORCE change 98700 for review
Kip Macy
kmacy at FreeBSD.org
Tue Jun 6 22:23:54 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=98700
Change 98700 by kmacy at kmacy_storage:sun4v_work on 2006/06/06 21:49:30
Kris Kennaway's spinlock profiling changes
Affected files ...
.. //depot/projects/kmacy_sun4v/src/sys/conf/options#9 edit
.. //depot/projects/kmacy_sun4v/src/sys/kern/kern_mutex.c#8 edit
.. //depot/projects/kmacy_sun4v/src/sys/sys/_mutex.h#3 edit
.. //depot/projects/kmacy_sun4v/src/sys/sys/file.h#4 edit
.. //depot/projects/kmacy_sun4v/src/sys/sys/mutex.h#6 edit
Differences ...
==== //depot/projects/kmacy_sun4v/src/sys/conf/options#9 (text+ko) ====
==== //depot/projects/kmacy_sun4v/src/sys/kern/kern_mutex.c#8 (text+ko) ====
@@ -35,7 +35,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/sys/kern/kern_mutex.c,v 1.173 2006/06/03 21:11:32 jhb Exp $");
-
#include "opt_adaptive_mutexes.h"
#include "opt_ddb.h"
#include "opt_mprof.h"
@@ -56,12 +55,14 @@
#include <sys/resourcevar.h>
#include <sys/sched.h>
#include <sys/sbuf.h>
+#include <sys/stack.h>
#include <sys/sysctl.h>
#include <sys/turnstile.h>
#include <sys/vmmeter.h>
#include <machine/atomic.h>
#include <machine/bus.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
#include <ddb/ddb.h>
@@ -84,7 +85,7 @@
* Internal utility macros.
*/
#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-
+/* XXX */
#define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
#ifdef DDB
@@ -114,6 +115,7 @@
*/
struct mtx sched_lock;
struct mtx Giant;
+struct mtx timer_lock;
#ifdef SPIN_PROFILING
SYSCTL_NODE(_debug, OID_AUTO, spinlock, CTLFLAG_RD, NULL, "spinlock debugging");
@@ -121,12 +123,15 @@
int spin_prof_enable = 0;
SYSCTL_INT(_debug_spinlock_prof, OID_AUTO, enable, CTLFLAG_RW,
&spin_prof_enable, 0, "Enable tracing of spinlock holdtime");
-int spin_total = 0;
-int spin_count = 0;
-SYSCTL_INT(_debug_spinlock_prof, OID_AUTO, total, CTLFLAG_RD,
+u_int spin_total = 0;
+uint64_t spin_count = 0;
+u_int spin_scale = 100;
+SYSCTL_UINT(_debug_spinlock_prof, OID_AUTO, total, CTLFLAG_RD,
&spin_total, 0, "Number of spinlock spins recorded");
-SYSCTL_INT(_debug_spinlock_prof, OID_AUTO, count, CTLFLAG_RD,
+SYSCTL_UINT(_debug_spinlock_prof, OID_AUTO, count, CTLFLAG_RD,
&spin_count, 0, "Number of spinlock acquisitions recorded");
+SYSCTL_UINT(_debug_spinlock_prof, OID_AUTO, scale, CTLFLAG_RW,
+ &spin_scale, 0, "How often to KTR spinlock acquisition");
static int
reset_spin_prof_stats(SYSCTL_HANDLER_ARGS)
@@ -155,50 +160,55 @@
SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
static int mutex_prof_enable = 0;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
- &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
+ &mutex_prof_enable, 0, "Enable tracing of mutex holdtime/contention");
+static int mutex_prof_global = 1;
+SYSCTL_INT(_debug_mutex_prof, OID_AUTO, global, CTLFLAG_RW,
+ &mutex_prof_global, 0, "Enable tracing of all mutexes");
struct mutex_prof {
const char *name;
+ u_int namehash;
const char *file;
int line;
+#if 0
uintmax_t cnt_max;
uintmax_t cnt_tot;
+#endif
uintmax_t cnt_cur;
uintmax_t cnt_contest_holding;
uintmax_t cnt_contest_locking;
- struct mutex_prof *next;
};
/*
* mprof_buf is a static pool of profiling records to avoid possible
* reentrance of the memory allocation functions.
- *
- * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
*/
-#ifdef MPROF_BUFFERS
-#define NUM_MPROF_BUFFERS MPROF_BUFFERS
-#else
-#define NUM_MPROF_BUFFERS 1000
-#endif
-static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
-static int first_free_mprof_buf;
#ifndef MPROF_HASH_SIZE
-#define MPROF_HASH_SIZE 1009
+#define MPROF_HASH_SIZE 4096
+#define MPROF_HASH_MASK (MPROF_HASH_SIZE - 1)
#endif
-#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
-#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
-#endif
-static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
+static struct mutex_prof mprof_buf[MPROF_HASH_SIZE];
+static int allocated_mprof_buf;
/* SWAG: sbuf size = avg stat. line size * number of locks */
#define MPROF_SBUF_SIZE 256 * 400
+/* We keep a smaller pool of spin mutexes for protecting the mprof hash entries */
+#define MPROF_LOCK_SIZE 16
+#define MPROF_LOCK_MASK (MPROF_LOCK_SIZE - 1)
+#define MPROF_LHASH(hash) ((hash) & MPROF_LOCK_MASK)
+
+#define MPROF_LOCK(hash) mtx_lock_spin(&mprof_locks[MPROF_LHASH(hash)])
+#define MPROF_UNLOCK(hash) mtx_unlock_spin(&mprof_locks[MPROF_LHASH(hash)])
+
+struct mtx mprof_locks[MPROF_LOCK_SIZE];
+
static int mutex_prof_acquisitions;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
&mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
static int mutex_prof_records;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
&mutex_prof_records, 0, "Number of profiling records");
-static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
+static int mutex_prof_maxrecords = MPROF_HASH_SIZE;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
&mutex_prof_maxrecords, 0, "Maximum number of profiling records");
static int mutex_prof_rejected;
@@ -210,13 +220,13 @@
static int mutex_prof_collisions = 0;
SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
&mutex_prof_collisions, 0, "Number of hash collisions");
+#ifdef KTR
+static int stack_scale = 1;
+SYSCTL_INT(_debug_mutex_prof, OID_AUTO, scale, CTLTYPE_INT | CTLFLAG_RW,
+ &stack_scale, 0, "How often to sample stack traces");
+#endif
-/*
- * mprof_mtx protects the profiling buffers and the hash.
- */
-static struct mtx mprof_mtx;
-MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
-
+#if 0
static u_int64_t
nanoseconds(void)
{
@@ -225,6 +235,7 @@
nanotime(&tv);
return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
}
+#endif
static int
dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
@@ -232,8 +243,9 @@
struct sbuf *sb;
int error, i;
static int multiplier = 1;
+ const char *p;
- if (first_free_mprof_buf == 0)
+ if (allocated_mprof_buf == 0)
return (SYSCTL_OUT(req, "No locking recorded",
sizeof("No locking recorded")));
@@ -241,31 +253,35 @@
sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n",
"max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
- /*
- * XXX this spinlock seems to be by far the largest perpetrator
- * of spinlock latency (1.6 msec on an Athlon1600 was recorded
- * even before I pessimized it further by moving the average
- * computation here).
- */
- mtx_lock_spin(&mprof_mtx);
- for (i = 0; i < first_free_mprof_buf; ++i) {
+ for (i = 0; i < MPROF_HASH_SIZE; ++i) {
+ if (mprof_buf[i].name == NULL)
+ continue;
+ for (p = mprof_buf[i].file;
+ p != NULL && strncmp(p, "../", 3) == 0; p += 3)
+ /* nothing */ ;
sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
+#if 0
mprof_buf[i].cnt_max / 1000,
mprof_buf[i].cnt_tot / 1000,
+#endif
+ (uintmax_t)0,
+ (uintmax_t)0,
mprof_buf[i].cnt_cur,
+ (uintmax_t)0,
+#if 0
mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
+#endif
mprof_buf[i].cnt_contest_holding,
mprof_buf[i].cnt_contest_locking,
- mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
+ p, mprof_buf[i].line, mprof_buf[i].name);
if (sbuf_overflowed(sb)) {
- mtx_unlock_spin(&mprof_mtx);
sbuf_delete(sb);
multiplier++;
goto retry_sbufops;
}
}
- mtx_unlock_spin(&mprof_mtx);
+
sbuf_finish(sb);
error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
sbuf_delete(sb);
@@ -279,7 +295,7 @@
{
int error, v;
- if (first_free_mprof_buf == 0)
+ if (allocated_mprof_buf == 0)
return (0);
v = 0;
@@ -291,11 +307,8 @@
if (v == 0)
return (0);
- mtx_lock_spin(&mprof_mtx);
- bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
- bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
- first_free_mprof_buf = 0;
- mtx_unlock_spin(&mprof_mtx);
+ bzero(mprof_buf, MPROF_HASH_SIZE);
+ allocated_mprof_buf = 0;
return (0);
}
SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
@@ -322,11 +335,13 @@
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
#ifdef MUTEX_PROFILING
/* don't reset the timer when/if recursing */
- if (m->mtx_acqtime == 0) {
+ if (mutex_prof_enable == 1 && mutex_prof_global && m->mtx_acqtime == 0) {
m->mtx_filename = file;
m->mtx_lineno = line;
- m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
+ m->mtx_acqtime = 1;
+#if 0
++mutex_prof_acquisitions;
+#endif
}
#endif
}
@@ -344,53 +359,63 @@
line);
mtx_assert(m, MA_OWNED);
#ifdef MUTEX_PROFILING
- if (m->mtx_acqtime != 0) {
- static const char *unknown = "(unknown)";
+ if (mutex_prof_global && m->mtx_acqtime != 0) {
+ const char *unknown = "(unknown)";
struct mutex_prof *mpp;
u_int64_t acqtime, now;
- const char *p, *q;
- volatile u_int hash;
+ u_int hash;
+ const char *p = m->mtx_filename;
+ int collision = 0;
- now = nanoseconds();
+ now = 2;
acqtime = m->mtx_acqtime;
m->mtx_acqtime = 0;
if (now <= acqtime)
goto out;
- for (p = m->mtx_filename;
- p != NULL && strncmp(p, "../", 3) == 0; p += 3)
- /* nothing */ ;
if (p == NULL || *p == '\0')
p = unknown;
- for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
- hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
- mtx_lock_spin(&mprof_mtx);
- for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
+ hash = (m->mtx_namehash * 31 * 31 + (uintptr_t)p * 31 + m->mtx_lineno) & MPROF_HASH_MASK;
+ CTR5(KTR_SPARE1, "Hashing %s(%x) %s:%d to %d", mtx_name(m), m->mtx_namehash, p, m->mtx_lineno, hash);
+ mpp = &mprof_buf[hash];
+ while (mpp->name != NULL) {
if (mpp->line == m->mtx_lineno &&
- strcmp(mpp->file, p) == 0)
+ mpp->file == p &&
+ mpp->namehash == m->mtx_namehash)
break;
- if (mpp == NULL) {
+ /* If the mprof_hash entry is allocated to someone else, try the next one */
+ collision = 1;
+ CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file, mpp->line, mpp->name, mpp->namehash);
+ hash = (hash + 1) & MPROF_HASH_MASK;
+ mpp = &mprof_buf[hash];
+ }
+ if (mpp->name == NULL) {
+ int buf;
+
+ buf = atomic_fetchadd_int(&allocated_mprof_buf, 1);
/* Just exit if we cannot get a trace buffer */
- if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
+ if (buf >= MPROF_HASH_SIZE) {
++mutex_prof_rejected;
- goto unlock;
+ goto out;
}
- mpp = &mprof_buf[first_free_mprof_buf++];
- mpp->name = mtx_name(m);
mpp->file = p;
mpp->line = m->mtx_lineno;
- mpp->next = mprof_hash[hash];
- if (mprof_hash[hash] != NULL)
+ mpp->name = mtx_name(m);
+ mpp->namehash = m->mtx_namehash;
+ if (collision)
++mutex_prof_collisions;
- mprof_hash[hash] = mpp;
+ /* We might have raced someone else but who cares, they'll try again next time */
++mutex_prof_records;
}
+ MPROF_LOCK(hash);
/*
* Record if the mutex has been held longer now than ever
* before.
*/
+#if 0
if (now - acqtime > mpp->cnt_max)
mpp->cnt_max = now - acqtime;
mpp->cnt_tot += now - acqtime;
+#endif
mpp->cnt_cur++;
/*
* There's a small race, really we should cmpxchg
@@ -402,8 +427,7 @@
m->mtx_contest_holding = 0;
mpp->cnt_contest_locking += m->mtx_contest_locking;
m->mtx_contest_locking = 0;
-unlock:
- mtx_unlock_spin(&mprof_mtx);
+ MPROF_UNLOCK(hash);
}
out:
#endif
@@ -486,10 +510,12 @@
#endif
uintptr_t v;
#ifdef KTR
+#if 0
int cont_logged = 0;
#endif
+#endif
#ifdef MUTEX_PROFILING
- int contested;
+ int contested, oldhold, fine_profiling = 0;
#endif
if (mtx_owned(m)) {
@@ -510,11 +536,32 @@
#ifdef MUTEX_PROFILING
contested = 0;
+ if (m->mtx_object.lo_flags & LO_PROFILE)
+ fine_profiling = 1;
#endif
while (!_obtain_lock(m, tid)) {
#ifdef MUTEX_PROFILING
- contested = 1;
- atomic_add_int(&m->mtx_contest_holding, 1);
+ if (mutex_prof_global) {
+ contested = 1;
+ atomic_add_int(&m->mtx_contest_holding, 1);
+ } else if (fine_profiling && !contested) {
+ contested = 1;
+ oldhold = atomic_fetchadd_int(&m->mtx_contest_holding, 1);
+#if 0
+ if (!((oldhold + 1) % stack_scale)) {
+ mtx_lock_spin(&mprof_mtx);
+ stack_save(&mtx_stack);
+ CTR2(KTR_CONTENTION, "cont_hold (%d) of %s", oldhold, m->mtx_object.lo_name);
+ CTRSTACK(KTR_CONTENTION, &mtx_stack, 4, 0);
+ mtx_unlock_spin(&mprof_mtx);
+ }
+ CTR3(KTR_CONTENTION,
+ "%s held by %s:%d",
+ m->mtx_object.lo_name,
+ m->mtx_filename,
+ m->mtx_lineno);
+#endif
+ }
#endif
turnstile_lock(&m->mtx_object);
v = m->mtx_lock;
@@ -581,17 +628,6 @@
*/
mtx_assert(m, MA_NOTOWNED);
-#ifdef KTR
- if (!cont_logged) {
- CTR6(KTR_CONTENTION,
- "contention: %p at %s:%d wants %s, taken by %s:%d",
- (void *)tid, file, line, m->mtx_object.lo_name,
- WITNESS_FILE(&m->mtx_object),
- WITNESS_LINE(&m->mtx_object));
- cont_logged = 1;
- }
-#endif
-
/*
* Block on the turnstile.
*/
@@ -600,16 +636,19 @@
}
#ifdef KTR
- if (cont_logged) {
+#if 0
+ if (fine_profiling && contested) {
CTR4(KTR_CONTENTION,
"contention end: %s acquired by %p at %s:%d",
- m->mtx_object.lo_name, (void *)tid, file, line);
+ m->mtx_object.lo_name, (void *)tid, m->mtx_filename, m->mtx_lineno);
}
#endif
+#endif
#ifdef MUTEX_PROFILING
- if (contested)
- m->mtx_contest_locking++;
- m->mtx_contest_holding = 0;
+ if (fine_profiling || mutex_prof_global) {
+ m->mtx_contest_locking = contested;
+ atomic_store_rel_int(&m->mtx_contest_holding,0);
+ }
#endif
return;
}
@@ -628,8 +667,9 @@
int i = 0;
#ifdef SPIN_PROFILING
int profiling = 0;
+ volatile struct thread *td = NULL;
- if (m->mtx_object.lo_flags & LO_PROFILE && spin_prof_enable)
+ if (spin_prof_enable)
profiling = 1;
#endif
@@ -640,6 +680,9 @@
/* Give interrupts a chance while we spin. */
spinlock_exit();
+#ifdef SPIN_PROFILING
+ td = mtx_owner(m);
+#endif
while (m->mtx_lock != MTX_UNOWNED) {
if (i++ < 10000000) {
cpu_spinwait();
@@ -664,7 +707,13 @@
/* We have the lock, record how many spins it took to get it */
if (profiling) {
spin_total += i;
- spin_count++;
+ if (!(++spin_count % spin_scale)) {
+ if (td != NULL)
+ CTR6(KTR_SPARE1, "%p %s:%d spun %d (pid %d, \"%s\")", m, file, line, i,
+ td->td_proc->p_pid, td->td_proc->p_comm);
+ else
+ CTR4(KTR_SPARE1, "%p %s:%d spun %d", m, file, line, i);
+ }
}
#endif
@@ -871,6 +920,10 @@
{
struct lock_class *class;
int flags;
+#ifdef MUTEX_PROFILING
+ const char *p;
+ u_int hash = 0;
+#endif
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
MTX_NOWITNESS | MTX_DUPOK)) == 0);
@@ -906,7 +959,16 @@
m->mtx_lineno = 0;
m->mtx_contest_holding = 0;
m->mtx_contest_locking = 0;
+
+ /* Hash the mutex name to an int so we don't have to strcmp() it repeatedly */
+ for (p = name; *p != '\0'; p++)
+ hash = 31 * hash + *p;
+ m->mtx_namehash = hash;
+#if 0
+ if (opts & MTX_PROFILE)
+ m->mtx_stack = stack_create();
#endif
+#endif
lock_init(&m->mtx_object, class, name, type, flags);
}
@@ -935,6 +997,13 @@
__LINE__);
}
+#ifdef MUTEX_PROFILING
+#if 0
+ if (m->mtx_object.lo_flags & LO_PROFILE)
+ stack_destroy(m->mtx_stack);
+#endif
+#endif
+
lock_destroy(&m->mtx_object);
}
@@ -946,15 +1015,27 @@
void
mutex_init(void)
{
+#ifdef MUTEX_PROFILING
+ int i;
+#endif
/* Setup turnstiles so that sleep mutexes work. */
init_turnstiles();
+#ifdef MUTEX_PROFILING
+ /* Initialize the mutex profiling locks */
+ for (i = 0; i < MPROF_LOCK_SIZE; i++) {
+ mtx_init(&mprof_locks[i], "mprof lock",
+ NULL, MTX_SPIN|MTX_QUIET);
+ }
+#endif
+
/*
* Initialize mutexes.
*/
mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
- mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE | MTX_PROFILE);
+ mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
+ mtx_init(&timer_lock, "timer lock", NULL, MTX_SPIN);
mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
mtx_lock(&Giant);
==== //depot/projects/kmacy_sun4v/src/sys/sys/_mutex.h#3 (text+ko) ====
@@ -45,6 +45,7 @@
* MUTEX_PROFILING is in opt_global.h
*/
u_int64_t mtx_acqtime;
+ u_int mtx_namehash;
const char *mtx_filename;
int mtx_lineno;
/*
@@ -56,6 +57,7 @@
*/
u_int mtx_contest_holding;
u_int mtx_contest_locking;
+ struct stack *mtx_stack;
#endif
};
==== //depot/projects/kmacy_sun4v/src/sys/sys/file.h#4 (text+ko) ====
@@ -92,6 +92,7 @@
#define DFLAG_PASSABLE 0x01 /* may be passed via unix sockets. */
#define DFLAG_SEEKABLE 0x02 /* seekable / nonsequential */
+#define DFLAG_MPSAFE 0x04 /* These fileops are MPSAFE */
/*
* Kernel descriptor table.
==== //depot/projects/kmacy_sun4v/src/sys/sys/mutex.h#6 (text+ko) ====
@@ -56,7 +56,7 @@
#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */
#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */
-#define MTX_PROFILE 0x00000010 /* Enable spinlock profiling for this spin lock */
+#define MTX_PROFILE 0x00000020 /* Enable spinlock profiling for this spin lock */
/*
* Option flags passed to certain lock/unlock routines, through the use
@@ -74,6 +74,7 @@
#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */
#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED)
+
#endif /* _KERNEL */
#ifndef LOCORE
@@ -165,9 +166,22 @@
*/
#ifndef _get_spin_lock
#ifdef SMP
+#define _mtx_spin_trylock(mp, tid, file, line) ({ \
+ uintptr_t _tid = (uintptr_t)(tid); \
+ int v; \
+ \
+ spinlock_enter(); \
+ v = _obtain_lock((mp), _tid); \
+ if (!v) \
+ spinlock_exit(); \
+ v; \
+})
+
+
#ifdef SPIN_PROFILING
extern int spin_prof_enable;
-extern int spin_count;
+extern uint64_t spin_count;
+extern u_int spin_scale;
#define _get_spin_lock(mp, tid, opts, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
\
@@ -178,8 +192,8 @@
else \
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
} \
- else if ((mp)->mtx_object.lo_flags & LO_PROFILE && spin_prof_enable) \
- spin_count++; \
+ else if (spin_prof_enable && !(++spin_count % spin_scale)) \
+ CTR3(KTR_SPARE1, "%p %s:%d", mp, file, line); \
} while (0)
#else /* SPIN_PROFILING */
#define _get_spin_lock(mp, tid, opts, file, line) do { \
@@ -325,6 +339,8 @@
_mtx_lock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
#define mtx_unlock_spin_flags(m, opts) \
_mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
+#define mtx_trylock_spin(m) \
+ _mtx_spin_trylock((m), curthread, LOCK_FILE, LOCK_LINE)
#else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */
#define mtx_lock_flags(m, opts) \
_get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
@@ -334,10 +350,13 @@
_get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
#define mtx_unlock_spin_flags(m, opts) \
_rel_spin_lock((m))
+#define mtx_trylock_spin(m) \
+ _mtx_spin_trylock((m), curthread, LOCK_FILE, LOCK_LINE)
#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */
-#define mtx_trylock_flags(m, opts) \
- _mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE)
+#define mtx_trylock_flags(m, opts) \
+ _mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE)
+
#define mtx_initialized(m) lock_initalized(&(m)->mtx_object)
@@ -351,6 +370,7 @@
* Global locks.
*/
extern struct mtx sched_lock;
+extern struct mtx timer_lock;
extern struct mtx Giant;
/*
More information about the p4-projects
mailing list