git: a474e53d03ca - main - riscv: Add implementations of atomic_testand(set|clear)_(32|64|long)
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 19 Nov 2024 15:28:41 UTC
The branch main has been updated by jhb:
URL: https://cgit.FreeBSD.org/src/commit/?id=a474e53d03ca8c78696fc222520de7d6876cc530
commit a474e53d03ca8c78696fc222520de7d6876cc530
Author: John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2024-11-19 15:19:54 +0000
Commit: John Baldwin <jhb@FreeBSD.org>
CommitDate: 2024-11-19 15:20:32 +0000
riscv: Add implementations of atomic_testand(set|clear)_(32|64|long)
These use amoor and amoand rather than a loop.
Also define atomic_testandset_acq_(64|long) using amoor.aq.
Reviewed by: mhorne, kib
Sponsored by: AFRL, DARPA
Differential Revision: https://reviews.freebsd.org/D47627
---
sys/riscv/include/atomic.h | 73 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 73 insertions(+)
diff --git a/sys/riscv/include/atomic.h b/sys/riscv/include/atomic.h
index 03c1327d74da..38261e95cf31 100644
--- a/sys/riscv/include/atomic.h
+++ b/sys/riscv/include/atomic.h
@@ -266,6 +266,34 @@ atomic_readandclear_32(volatile uint32_t *p)
return (ret);
}
+static __inline int
+atomic_testandclear_32(volatile uint32_t *p, u_int val)
+{
+ uint32_t mask, old;
+
+ mask = 1u << (val & 31);
+ __asm __volatile("amoand.w %0, %2, %1"
+ : "=&r" (old), "+A" (*p)
+ : "r" (~mask)
+ : "memory");
+
+ return ((old & mask) != 0);
+}
+
+static __inline int
+atomic_testandset_32(volatile uint32_t *p, u_int val)
+{
+ uint32_t mask, old;
+
+ mask = 1u << (val & 31);
+ __asm __volatile("amoor.w %0, %2, %1"
+ : "=&r" (old), "+A" (*p)
+ : "r" (mask)
+ : "memory");
+
+ return ((old & mask) != 0);
+}
+
#define atomic_add_int atomic_add_32
#define atomic_clear_int atomic_clear_32
#define atomic_cmpset_int atomic_cmpset_32
@@ -437,6 +465,48 @@ atomic_readandclear_64(volatile uint64_t *p)
return (ret);
}
+static __inline int
+atomic_testandclear_64(volatile uint64_t *p, u_int val)
+{
+ uint64_t mask, old;
+
+ mask = 1ul << (val & 63);
+ __asm __volatile("amoand.d %0, %2, %1"
+ : "=&r" (old), "+A" (*p)
+ : "r" (~mask)
+ : "memory");
+
+ return ((old & mask) != 0);
+}
+
+static __inline int
+atomic_testandset_64(volatile uint64_t *p, u_int val)
+{
+ uint64_t mask, old;
+
+ mask = 1ul << (val & 63);
+ __asm __volatile("amoor.d %0, %2, %1"
+ : "=&r" (old), "+A" (*p)
+ : "r" (mask)
+ : "memory");
+
+ return ((old & mask) != 0);
+}
+
+static __inline int
+atomic_testandset_acq_64(volatile uint64_t *p, u_int val)
+{
+ uint64_t mask, old;
+
+ mask = 1ul << (val & 63);
+ __asm __volatile("amoor.d.aq %0, %2, %1"
+ : "=&r" (old), "+A" (*p)
+ : "r" (mask)
+ : "memory");
+
+ return ((old & mask) != 0);
+}
+
static __inline uint32_t
atomic_swap_32(volatile uint32_t *p, uint32_t val)
{
@@ -474,6 +544,9 @@ atomic_swap_64(volatile uint64_t *p, uint64_t val)
#define atomic_set_long atomic_set_64
#define atomic_subtract_long atomic_subtract_64
#define atomic_swap_long atomic_swap_64
+#define atomic_testandclear_long atomic_testandclear_64
+#define atomic_testandset_long atomic_testandset_64
+#define atomic_testandset_acq_long atomic_testandset_acq_64
#define atomic_add_ptr atomic_add_64
#define atomic_clear_ptr atomic_clear_64