git: 7530de77f539 - main - thread: add td_wantedlock

From: Mateusz Guzik <mjg_at_FreeBSD.org>
Date: Sun, 22 Oct 2023 17:09:50 UTC
The branch main has been updated by mjg:

URL: https://cgit.FreeBSD.org/src/commit/?id=7530de77f539678847edd29489b7635f8f58b574

commit 7530de77f539678847edd29489b7635f8f58b574
Author:     Mateusz Guzik <mjg@FreeBSD.org>
AuthorDate: 2023-10-22 15:46:39 +0000
Commit:     Mateusz Guzik <mjg@FreeBSD.org>
CommitDate: 2023-10-22 17:09:45 +0000

    thread: add td_wantedlock
    
    This enables obtaining lock information threads are actively waiting for
    while sampling. Without the change one would only see a bunch of calls
    to lock_delay(), where the stacktrace often does not reveal what the
    lock might be.
    
    Note this is not the same as lock profiling, which only produces data
    for cases which wait for locks.
    
    struct thread already has a td_lockname field, but I did not use it
    because it has different semantics -- denotes when the thread is off
    cpu. At the same time it could not be converted to hold a lock_object
    pointer because non-curthread access would no longer be guaranteed to be
    safe -- by the time it reads the pointer the lock might have been taken,
    released and the object containing it freed.
    
    Sample usage with dtrace:
    rm /tmp/out.kern_stacks ; dtrace -x stackframes=100 -n 'profile-997 { @[curthread->td_wantedlock != NULL ? stringof(curthread->td_wantedlock->lo_name) : stringof("\n"), stack()] = count(); }' -o /tmp/out.kern_stacks
    
    This also facilitates addition of lock information to traces produced by
    hwpmc.
    
    Note: spinlocks are not supported at the moment.
    
    Sponsored by:   Rubicon Communications, LLC ("Netgate")
---
 sys/kern/kern_mutex.c  |  3 +++
 sys/kern/kern_rwlock.c |  6 ++++++
 sys/kern/kern_sx.c     |  6 ++++++
 sys/kern/kern_thread.c |  6 +++---
 sys/sys/proc.h         | 11 +++++++++++
 5 files changed, 29 insertions(+), 3 deletions(-)

diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 7f348530ed31..6071ac7fd6f1 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -574,6 +574,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
 
+	THREAD_CONTENDS_ON_LOCK(&m->lock_object);
+
 	for (;;) {
 		if (v == MTX_UNOWNED) {
 			if (_mtx_obtain_lock_fetch(m, &v, tid))
@@ -670,6 +672,7 @@ retry_turnstile:
 #endif
 		v = MTX_READ_VALUE(m);
 	}
+	THREAD_CONTENTION_DONE(&m->lock_object);
 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
 	if (__predict_true(!doing_lockprof))
 		return;
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 5705de0f311f..83d5862a6667 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -482,6 +482,8 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
 	lock_profile_obtain_lock_failed(&rw->lock_object, false,
 	    &contested, &waittime);
 
+	THREAD_CONTENDS_ON_LOCK(&rw->lock_object);
+
 	for (;;) {
 		if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG))
 			break;
@@ -628,6 +630,7 @@ retry_ts:
 			    __func__, rw);
 		v = RW_READ_VALUE(rw);
 	}
+	THREAD_CONTENTION_DONE(&rw->lock_object);
 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
 	if (__predict_true(!doing_lockprof))
 		return;
@@ -976,6 +979,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
 	lock_profile_obtain_lock_failed(&rw->lock_object, false,
 	    &contested, &waittime);
 
+	THREAD_CONTENDS_ON_LOCK(&rw->lock_object);
+
 	for (;;) {
 		if (v == RW_UNLOCKED) {
 			if (_rw_write_lock_fetch(rw, &v, tid))
@@ -1161,6 +1166,7 @@ retry_ts:
 #endif
 		v = RW_READ_VALUE(rw);
 	}
+	THREAD_CONTENTION_DONE(&rw->lock_object);
 	if (__predict_true(!extra_work))
 		return;
 #ifdef ADAPTIVE_RWLOCKS
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 81e46fceed5e..bc8a1214689f 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -653,6 +653,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
 	GIANT_SAVE(extra_work);
 #endif
 
+	THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
+
 	for (;;) {
 		if (x == SX_LOCK_UNLOCKED) {
 			if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
@@ -872,6 +874,7 @@ retry_sleepq:
 			    __func__, sx);
 		x = SX_READ_VALUE(sx);
 	}
+	THREAD_CONTENTION_DONE(&sx->lock_object);
 	if (__predict_true(!extra_work))
 		return (error);
 #ifdef ADAPTIVE_SX
@@ -1074,6 +1077,8 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
 	GIANT_SAVE(extra_work);
 #endif
 
+	THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
+
 	/*
 	 * As with rwlocks, we don't make any attempt to try to block
 	 * shared locks once there is an exclusive waiter.
@@ -1224,6 +1229,7 @@ retry_sleepq:
 			    __func__, sx);
 		x = SX_READ_VALUE(sx);
 	}
+	THREAD_CONTENTION_DONE(&sx->lock_object);
 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
 	if (__predict_true(!extra_work))
 		return (error);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index dcebb08956e6..3bc8546db594 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -87,7 +87,7 @@ _Static_assert(offsetof(struct thread, td_flags) == 0x108,
     "struct thread KBI td_flags");
 _Static_assert(offsetof(struct thread, td_pflags) == 0x114,
     "struct thread KBI td_pflags");
-_Static_assert(offsetof(struct thread, td_frame) == 0x4b0,
+_Static_assert(offsetof(struct thread, td_frame) == 0x4b8,
     "struct thread KBI td_frame");
 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0,
     "struct thread KBI td_emuldata");
@@ -107,9 +107,9 @@ _Static_assert(offsetof(struct thread, td_flags) == 0x9c,
     "struct thread KBI td_flags");
 _Static_assert(offsetof(struct thread, td_pflags) == 0xa8,
     "struct thread KBI td_pflags");
-_Static_assert(offsetof(struct thread, td_frame) == 0x314,
+_Static_assert(offsetof(struct thread, td_frame) == 0x318,
     "struct thread KBI td_frame");
-_Static_assert(offsetof(struct thread, td_emuldata) == 0x358,
+_Static_assert(offsetof(struct thread, td_emuldata) == 0x35c,
     "struct thread KBI td_emuldata");
 _Static_assert(offsetof(struct proc, p_flag) == 0x6c,
     "struct proc KBI p_flag");
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 8c916173b4ef..dc94d12d16c4 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -277,6 +277,7 @@ struct thread {
 	int		td_rw_rlocks;	/* (k) Count of rwlock read locks. */
 	int		td_sx_slocks;	/* (k) Count of sx shared locks. */
 	int		td_lk_slocks;	/* (k) Count of lockmgr shared locks. */
+	struct lock_object *td_wantedlock; /* (k) Lock we are contending on */
 	struct turnstile *td_blocked;	/* (t) Lock thread is blocked on. */
 	const char	*td_lockname;	/* (t) Name of lock blocked on. */
 	LIST_HEAD(, turnstile) td_contested;	/* (q) Contested locks. */
@@ -1074,6 +1075,16 @@ extern pid_t pid_max;
 
 #define	THREAD_CAN_SLEEP()		((curthread)->td_no_sleeping == 0)
 
+#define	THREAD_CONTENDS_ON_LOCK(lo)		do {			\
+	MPASS(curthread->td_wantedlock == NULL);			\
+	curthread->td_wantedlock = lo;					\
+} while (0)
+
+#define	THREAD_CONTENTION_DONE(lo)		do {			\
+	MPASS(curthread->td_wantedlock == lo);				\
+	curthread->td_wantedlock = NULL;				\
+} while (0)
+
 #define	PIDHASH(pid)	(&pidhashtbl[(pid) & pidhash])
 #define	PIDHASHLOCK(pid) (&pidhashtbl_lock[((pid) & pidhashlock)])
 extern LIST_HEAD(pidhashhead, proc) *pidhashtbl;