git: b09c2d924fdb - stable/13 - rtld: style the rest of rtld_lock.c

From: Konstantin Belousov <kib_at_FreeBSD.org>
Date: Sun, 18 Sep 2022 00:34:43 UTC
The branch stable/13 has been updated by kib:

URL: https://cgit.FreeBSD.org/src/commit/?id=b09c2d924fdbe956e0eb260e5ce73af4683185fe

commit b09c2d924fdbe956e0eb260e5ce73af4683185fe
Author:     Konstantin Belousov <kib@FreeBSD.org>
AuthorDate: 2022-08-30 12:49:15 +0000
Commit:     Konstantin Belousov <kib@FreeBSD.org>
CommitDate: 2022-09-18 00:33:45 +0000

    rtld: style the rest of rtld_lock.c
    
    (cherry picked from commit 7444f54bd326780ffafc0fec8ef49cad3e13baef)
---
 libexec/rtld-elf/rtld_lock.c | 61 ++++++++++++++++++++++----------------------
 1 file changed, 31 insertions(+), 30 deletions(-)

diff --git a/libexec/rtld-elf/rtld_lock.c b/libexec/rtld-elf/rtld_lock.c
index 8b9a6a51e061..9da8a8daccf9 100644
--- a/libexec/rtld-elf/rtld_lock.c
+++ b/libexec/rtld-elf/rtld_lock.c
@@ -89,39 +89,39 @@ static uint32_t fsigblock;
 static void *
 def_lock_create(void)
 {
-    void *base;
-    char *p;
-    uintptr_t r;
-    Lock *l;
-
-    /*
-     * Arrange for the lock to occupy its own cache line.  First, we
-     * optimistically allocate just a cache line, hoping that malloc
-     * will give us a well-aligned block of memory.  If that doesn't
-     * work, we allocate a larger block and take a well-aligned cache
-     * line from it.
-     */
-    base = xmalloc(CACHE_LINE_SIZE);
-    p = (char *)base;
-    if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
-	free(base);
-	base = xmalloc(2 * CACHE_LINE_SIZE);
-	p = (char *)base;
-	if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
-	    p += CACHE_LINE_SIZE - r;
-    }
-    l = (Lock *)p;
-    l->base = base;
-    l->lock = 0;
-    return l;
+	void *base;
+	char *p;
+	uintptr_t r;
+	Lock *l;
+
+	/*
+	 * Arrange for the lock to occupy its own cache line.  First, we
+	 * optimistically allocate just a cache line, hoping that malloc
+	 * will give us a well-aligned block of memory.  If that doesn't
+	 * work, we allocate a larger block and take a well-aligned cache
+	 * line from it.
+	 */
+	base = xmalloc(CACHE_LINE_SIZE);
+	p = base;
+	if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
+		free(base);
+		base = xmalloc(2 * CACHE_LINE_SIZE);
+		p = base;
+		if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
+			p += CACHE_LINE_SIZE - r;
+	}
+	l = (Lock *)p;
+	l->base = base;
+	l->lock = 0;
+	return (l);
 }
 
 static void
 def_lock_destroy(void *lock)
 {
-    Lock *l = (Lock *)lock;
+	Lock *l = lock;
 
-    free(l->base);
+	free(l->base);
 }
 
 static void
@@ -189,9 +189,8 @@ def_wlock_acquire(void *lock)
 static void
 def_lock_release(void *lock)
 {
-	Lock *l;
+	Lock *l = lock;
 
-	l = (Lock *)lock;
 	atomic_add_rel_int(&l->lock, -((l->lock & WAFLAG) == 0 ?
 	    RC_INCR : WAFLAG));
 	if (ld_fast_sigblock)
@@ -204,6 +203,7 @@ static int
 def_thread_set_flag(int mask)
 {
 	int old_val = thread_flag;
+
 	thread_flag |= mask;
 	return (old_val);
 }
@@ -212,6 +212,7 @@ static int
 def_thread_clr_flag(int mask)
 {
 	int old_val = thread_flag;
+
 	thread_flag &= ~mask;
 	return (old_val);
 }
@@ -225,7 +226,7 @@ static struct RtldLockInfo deflockinfo;
 static __inline int
 thread_mask_set(int mask)
 {
-	return lockinfo.thread_set_flag(mask);
+	return (lockinfo.thread_set_flag(mask));
 }
 
 static __inline void