git: aa54113d8e1b - stable/15 - LinuxKPI: add scoped_guard(), spinlock guard support
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 27 Feb 2026 02:29:50 UTC
The branch stable/15 has been updated by bz:
URL: https://cgit.FreeBSD.org/src/commit/?id=aa54113d8e1bfc0442017cff97fcbc1b8c9f5c54
commit aa54113d8e1bfc0442017cff97fcbc1b8c9f5c54
Author: Bjoern A. Zeeb <bz@FreeBSD.org>
AuthorDate: 2026-01-20 22:54:30 +0000
Commit: Bjoern A. Zeeb <bz@FreeBSD.org>
CommitDate: 2026-02-26 23:05:09 +0000
LinuxKPI: add scoped_guard(), spinlock guard support
The "cleanup.h" implementation got a bit more complicated.
For one we now use a macro to concatenate a prefix, the name, and a
suffix for variable and function declarations. This was triggered
by the fact that the "guard_" prefix we used was confusing. We now
use a generic "cleanup_" which is only encoded in the single place
rather than all over the file.
As already indicated by the comment the DEFINE_LOCK_GUARD_0()
macro got split up and a _1 version which also takes a type got
implemented and is used for a spinlock variant used by rtw89(4)
via the new scoped_guard() bits.
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D54808
(cherry picked from commit 31393810a168b74cf13ace0e1d35dae6b4a12bf5)
---
sys/compat/linuxkpi/common/include/linux/cleanup.h | 127 ++++++++++++++++-----
.../linuxkpi/common/include/linux/spinlock.h | 10 ++
2 files changed, 109 insertions(+), 28 deletions(-)
diff --git a/sys/compat/linuxkpi/common/include/linux/cleanup.h b/sys/compat/linuxkpi/common/include/linux/cleanup.h
index 5bb146f082ed..fb21a81f121b 100644
--- a/sys/compat/linuxkpi/common/include/linux/cleanup.h
+++ b/sys/compat/linuxkpi/common/include/linux/cleanup.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2024-2025 The FreeBSD Foundation
+ * Copyright (c) 2024-2026 The FreeBSD Foundation
*
* This software was developed by Björn Zeeb under sponsorship from
* the FreeBSD Foundation.
@@ -10,18 +10,26 @@
#ifndef _LINUXKPI_LINUX_CLEANUP_H
#define _LINUXKPI_LINUX_CLEANUP_H
+#include <linux/err.h>
+
+#define CLEANUP_NAME(_n, _s) __CONCAT(__CONCAT(cleanup_, _n), _s)
+
#define __cleanup(_f) __attribute__((__cleanup__(_f)))
+#define DECLARE(_n, _x) \
+ CLEANUP_NAME(_n, _t) _x __cleanup(CLEANUP_NAME(_n, _destroy)) = \
+ CLEANUP_NAME(_n, _create)
+
/*
* Note: "_T" are special as they are exposed into common code for
* statements. Extra care should be taken when changing the code.
*/
#define DEFINE_GUARD(_n, _dt, _lock, _unlock) \
\
- typedef _dt guard_ ## _n ## _t; \
+ typedef _dt CLEANUP_NAME(_n, _t); \
\
static inline _dt \
- guard_ ## _n ## _create( _dt _T) \
+ CLEANUP_NAME(_n, _create)( _dt _T) \
{ \
_dt c; \
\
@@ -30,7 +38,7 @@
} \
\
static inline void \
- guard_ ## _n ## _destroy(_dt *t) \
+ CLEANUP_NAME(_n, _destroy)(_dt *t) \
{ \
_dt _T; \
\
@@ -39,9 +47,10 @@
}
/* We need to keep these calls unique. */
+#define _guard(_n, _x) \
+ DECLARE(_n, _x)
#define guard(_n) \
- guard_ ## _n ## _t guard_ ## _n ## _ ## __COUNTER__ \
- __cleanup(guard_ ## _n ## _destroy) = guard_ ## _n ## _create
+ _guard(_n, guard_ ## _n ## _ ## __COUNTER__)
#define DEFINE_FREE(_n, _t, _f) \
static inline void \
@@ -56,38 +65,100 @@
#define __free(_n) __cleanup(__free_##_n)
/*
- * Given this is a _0 version it should likely be broken up into parts.
- * But we have no idead what a _1, _2, ... version would do different
- * until we see a call.
- * This is used for a not-real-type (rcu). We use a bool to "simulate"
- * the lock held. Also _T still special, may not always be used, so tag
- * with __unused (or better the LinuxKPI __maybe_unused).
+ * Our initial version go broken up. Some simplifications like using
+ * "bool" for the lock had to be changed to a more general type.
+ * _T is still special and, like other bits, may not always be used,
+ * so tag with __unused (or better the LinuxKPI __maybe_unused).
*/
-#define DEFINE_LOCK_GUARD_0(_n, _lock, _unlock, ...) \
+#define _DEFINE_LOCK_GUARD_0(_n, _lock) \
+ static inline CLEANUP_NAME(_n, _t) \
+ CLEANUP_NAME(_n, _create)(void) \
+ { \
+ CLEANUP_NAME(_n, _t) _tmp; \
+ CLEANUP_NAME(_n, _t) *_T __maybe_unused; \
+ \
+ _tmp.lock = (void *)1; \
+ _T = &_tmp; \
+ _lock; \
+ return (_tmp); \
+ }
+
+#define _DEFINE_LOCK_GUARD_1(_n, _type, _lock) \
+ static inline CLEANUP_NAME(_n, _t) \
+ CLEANUP_NAME(_n, _create)(_type *l) \
+ { \
+ CLEANUP_NAME(_n, _t) _tmp; \
+ CLEANUP_NAME(_n, _t) *_T __maybe_unused; \
\
+ _tmp.lock = l; \
+ _T = &_tmp; \
+ _lock; \
+ return (_tmp); \
+ }
+
+#define _GUARD_IS_ERR(_v) \
+ ({ \
+ uintptr_t x = (uintptr_t)(void *)(_v); \
+ IS_ERR_VALUE(x); \
+ })
+
+#define __is_cond_ptr(_n) \
+ CLEANUP_NAME(_n, _is_cond)
+#define __guard_ptr(_n) \
+ CLEANUP_NAME(_n, _ptr)
+
+#define _DEFINE_CLEANUP_IS_CONDITIONAL(_n, _b) \
+ static const bool CLEANUP_NAME(_n, _is_cond) __maybe_unused = _b
+
+#define _DEFINE_GUARD_LOCK_PTR(_n, _lp) \
+ static inline void * \
+ CLEANUP_NAME(_n, _lock_ptr)(CLEANUP_NAME(_n, _t) *_T) \
+ { \
+ void *_p; \
+ \
+ _p = (void *)(uintptr_t)*(_lp); \
+ if (IS_ERR(_p)) \
+ _p = NULL; \
+ return (_p); \
+ }
+
+#define _DEFINE_UNLOCK_GUARD(_n, _type, _unlock, ...) \
typedef struct { \
- bool lock; \
+ _type *lock; \
__VA_ARGS__; \
- } guard_ ## _n ## _t; \
+ } CLEANUP_NAME(_n, _t); \
\
static inline void \
- guard_ ## _n ## _destroy(guard_ ## _n ## _t *_T) \
+ CLEANUP_NAME(_n, _destroy)(CLEANUP_NAME(_n, _t) *_T) \
{ \
- if (_T->lock) { \
+ if (!_GUARD_IS_ERR(_T->lock)) { \
_unlock; \
} \
} \
\
- static inline guard_ ## _n ## _t \
- guard_ ## _n ## _create(void) \
- { \
- guard_ ## _n ## _t _tmp; \
- guard_ ## _n ## _t *_T __maybe_unused; \
- \
- _tmp.lock = true; \
- _T = &_tmp; \
- _lock; \
- return (_tmp); \
- }
+ _DEFINE_GUARD_LOCK_PTR(_n, &_T->lock)
+
+#define DEFINE_LOCK_GUARD_0(_n, _lock, _unlock, ...) \
+ _DEFINE_CLEANUP_IS_CONDITIONAL(_n, false); \
+ _DEFINE_UNLOCK_GUARD(_n, void, _unlock, __VA_ARGS__) \
+ _DEFINE_LOCK_GUARD_0(_n, _lock)
+
+/* This allows the type to be set. */
+#define DEFINE_LOCK_GUARD_1(_n, _t, _lock, _unlock, ...) \
+ _DEFINE_CLEANUP_IS_CONDITIONAL(_n, false); \
+ _DEFINE_UNLOCK_GUARD(_n, _t, _unlock, __VA_ARGS__) \
+ _DEFINE_LOCK_GUARD_1(_n, _t, _lock)
+
+#define _scoped_guard(_n, _l, ...) \
+ for (DECLARE(_n, _scoped)(__VA_ARGS__); \
+ 1 /*__guard_ptr(_n)(&_scoped) || !__is_cond_ptr(_n) */; \
+ ({ goto _l; })) \
+ if (0) { \
+_l: \
+ break; \
+ } else
+
+#define scoped_guard(_n, ...) \
+ _scoped_guard(_n, ___label_ ## __COUNTER__, ##__VA_ARGS__)
#endif /* _LINUXKPI_LINUX_CLEANUP_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/spinlock.h b/sys/compat/linuxkpi/common/include/linux/spinlock.h
index dc10b0457153..a08edcf8f417 100644
--- a/sys/compat/linuxkpi/common/include/linux/spinlock.h
+++ b/sys/compat/linuxkpi/common/include/linux/spinlock.h
@@ -36,6 +36,7 @@
#include <sys/mutex.h>
#include <sys/kdb.h>
+#include <linux/cleanup.h>
#include <linux/compiler.h>
#include <linux/rwlock.h>
#include <linux/bottom_half.h>
@@ -178,4 +179,13 @@ _atomic_dec_and_lock_irqsave(atomic_t *cnt, spinlock_t *lock,
return (0);
}
+/*
+ * cleanup.h related pre-defined cases.
+ */
+DEFINE_LOCK_GUARD_1(spinlock_irqsave,
+ spinlock_t,
+ spin_lock_irqsave(_T->lock, _T->flags),
+ spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
#endif /* _LINUXKPI_LINUX_SPINLOCK_H_ */