git: 27fb895f9e81 - stable/14 - mtx: Avoid nested panics on lock class mismatch assertions
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 29 Apr 2025 18:30:42 UTC
The branch stable/14 has been updated by jhb:
URL: https://cgit.FreeBSD.org/src/commit/?id=27fb895f9e81c57378f39d705e8a34caffaa9e93
commit 27fb895f9e81c57378f39d705e8a34caffaa9e93
Author: John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2025-03-12 14:24:35 +0000
Commit: John Baldwin <jhb@FreeBSD.org>
CommitDate: 2025-04-29 14:43:36 +0000
mtx: Avoid nested panics on lock class mismatch assertions
It is only (somewhat) safe to dereference lo_name if we know the mutex
has a specific lock class that is incorrect, not if just has "some"
incorrect lock class. In particular, in the case of memory
overwritten with 0xdeadc0de, the lock class won't match either mutex
type. However, trying to dereference lo_name via a 0xdeadc0de pointer
triggers a nested panic building the panicstr which then prevents a
crash dump.
Reviewed by: olce, kib, markj
Sponsored by: AFRL, DARPA
Differential Revision: https://reviews.freebsd.org/D49313
(cherry picked from commit dba45599c498deed01e1c98acef74e183c1bbf8d)
---
sys/kern/kern_mutex.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 7f348530ed31..11ff7e1cacc4 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -270,7 +270,7 @@ __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
curthread, m->lock_object.lo_name, file, line));
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock() of destroyed mutex @ %s:%d", file, line));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin,
("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
file, line));
WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
@@ -299,7 +299,7 @@ __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin,
("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
file, line));
WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
@@ -328,7 +328,7 @@ __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
("mtx_lock_spin() of sleep mutex %s @ %s:%d",
m->lock_object.lo_name, file, line));
if (mtx_owned(m))
@@ -369,7 +369,7 @@ __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
m->lock_object.lo_name, file, line));
KASSERT((opts & MTX_RECURSE) == 0,
@@ -394,7 +394,7 @@ __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
m->lock_object.lo_name, file, line));
WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
@@ -432,7 +432,7 @@ _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
curthread, m->lock_object.lo_name, file, line));
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin,
("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
file, line));
@@ -806,7 +806,7 @@ thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
KASSERT(m->mtx_lock != MTX_DESTROYED,
("thread_lock() of destroyed mutex @ %s:%d", file, line));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
("thread_lock() of sleep mutex %s @ %s:%d",
m->lock_object.lo_name, file, line));
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
@@ -1263,7 +1263,7 @@ mtx_spin_wait_unlocked(struct mtx *m)
KASSERT(m->mtx_lock != MTX_DESTROYED,
("%s() of destroyed mutex %p", __func__, m));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
("%s() of sleep mutex %p (%s)", __func__, m,
m->lock_object.lo_name));
KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
@@ -1289,8 +1289,8 @@ mtx_wait_unlocked(struct mtx *m)
KASSERT(m->mtx_lock != MTX_DESTROYED,
("%s() of destroyed mutex %p", __func__, m));
- KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
- ("%s() not a sleep mutex %p (%s)", __func__, m,
+ KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin,
+ ("%s() of spin mutex %p (%s)", __func__, m,
m->lock_object.lo_name));
KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
m->lock_object.lo_name));