git: 3c84b4b35f28 - main - kern: move __always_inline to canonical position

From: Ryan Libby <rlibby_at_FreeBSD.org>
Date: Mon, 24 Jun 2024 17:07:25 UTC
The branch main has been updated by rlibby:

URL: https://cgit.FreeBSD.org/src/commit/?id=3c84b4b35f28ab2d0c3798dd567de05ed020cdca

commit 3c84b4b35f28ab2d0c3798dd567de05ed020cdca
Author:     Ryan Libby <rlibby@FreeBSD.org>
AuthorDate: 2024-06-24 17:05:58 +0000
Commit:     Ryan Libby <rlibby@FreeBSD.org>
CommitDate: 2024-06-24 17:05:58 +0000

    kern: move __always_inline to canonical position
    
    Ahead of including inline in __always_inline, move __always_inline to
    where inline goes.
    
    Reviewed by:    kib, olce
    Sponsored by:   Dell EMC Isilon
    Differential Revision:  https://reviews.freebsd.org/D45708
---
 sys/kern/kern_lock.c   | 11 ++++++-----
 sys/kern/kern_rwlock.c |  6 +++---
 sys/kern/kern_sx.c     |  6 +++---
 3 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 1b4d21f064a7..4700ee0f8f98 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -115,7 +115,7 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
 	}								\
 } while (0)
 
-static bool __always_inline
+static __always_inline bool
 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
 {
 
@@ -180,9 +180,10 @@ struct lockmgr_wait {
 	int itimo;
 };
 
-static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
+static __always_inline bool lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
     int flags, bool fp);
-static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
+static __always_inline bool lockmgr_sunlock_try(struct lock *lk,
+    uintptr_t *xp);
 
 static void
 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
@@ -511,7 +512,7 @@ lockdestroy(struct lock *lk)
 	lock_destroy(&lk->lock_object);
 }
 
-static bool __always_inline
+static __always_inline bool
 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
 {
 
@@ -531,7 +532,7 @@ lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
 	return (false);
 }
 
-static bool __always_inline
+static __always_inline bool
 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
 {
 
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 29767f09b304..f53c69b5e6ec 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -384,7 +384,7 @@ _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
  * is unlocked and has no writer waiters or spinners.  Failing otherwise
  * prioritizes writers before readers.
  */
-static bool __always_inline
+static __always_inline bool
 __rw_can_read(struct thread *td, uintptr_t v, bool fp)
 {
 
@@ -396,7 +396,7 @@ __rw_can_read(struct thread *td, uintptr_t v, bool fp)
 	return (false);
 }
 
-static bool __always_inline
+static __always_inline bool
 __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp, bool fp
     LOCK_FILE_LINE_ARG_DEF)
 {
@@ -742,7 +742,7 @@ __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
 	return (__rw_try_rlock_int(rw LOCK_FILE_LINE_ARG));
 }
 
-static bool __always_inline
+static __always_inline bool
 __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp)
 {
 
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index d302fa45161e..18e6ba232c4a 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -984,7 +984,7 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
 		kick_proc0();
 }
 
-static bool __always_inline
+static __always_inline bool
 __sx_can_read(struct thread *td, uintptr_t x, bool fp)
 {
 
@@ -996,7 +996,7 @@ __sx_can_read(struct thread *td, uintptr_t x, bool fp)
 	return (false);
 }
 
-static bool __always_inline
+static __always_inline bool
 __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp
     LOCK_FILE_LINE_ARG_DEF)
 {
@@ -1306,7 +1306,7 @@ _sx_slock(struct sx *sx, int opts, const char *file, int line)
 	return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
 }
 
-static bool __always_inline
+static __always_inline bool
 _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp)
 {