PERFORCE change 104865 for review
Oleksandr Tymoshenko
gonzo at FreeBSD.org
Wed Aug 23 21:58:47 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=104865
Change 104865 by gonzo at gonzo_hideout on 2006/08/23 21:58:17
o Replace intr disable/enable approach to atomic operations with
load linked/store conditional. It is suitable for both kernel
and userland.
o Throw away atomic ops fro char/short. The seemed to be unnecessary.
Affected files ...
.. //depot/projects/mips2/src/sys/mips/include/atomic.h#4 edit
Differences ...
==== //depot/projects/mips2/src/sys/mips/include/atomic.h#4 (text+ko) ====
@@ -1,4 +1,5 @@
/*-
+ * Copyright (c) 2002-2004 Juli Mallett. All rights reserved.
* Copyright (c) 2006 John Baldwin <jhb at FreeBSD.org>
* All rights reserved.
*
@@ -38,16 +39,6 @@
* Various simple arithmetic on memory which is atomic in the presence
* of interrupts and multiple processors.
*
- * atomic_set_char(P, V) (*(u_char*)(P) |= (V))
- * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V))
- * atomic_add_char(P, V) (*(u_char*)(P) += (V))
- * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V))
- *
- * atomic_set_short(P, V) (*(u_short*)(P) |= (V))
- * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V))
- * atomic_add_short(P, V) (*(u_short*)(P) += (V))
- * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V))
- *
* atomic_set_int(P, V) (*(u_int*)(P) |= (V))
* atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V))
* atomic_add_int(P, V) (*(u_int*)(P) += (V))
@@ -62,28 +53,27 @@
*/
/*
- * The above functions are expanded inline in the statically-linked
- * kernel and kernel modules. For userland we will have to figure out
- * something different.
+ * Integer atomic operations, hardware calls this single.
*/
-#ifdef _KERNEL
+#define ATOMIC_OP(op, asmop) \
+static __inline void \
+atomic_ ## op ## _int(volatile u_int *p, u_int val) \
+{ \
+ u_int temp; \
+ \
+ __asm __volatile ( \
+ "1:\n\t" \
+ "ll %[temp], %[p]\n\t" \
+ asmop "\n\t" \
+ "sc %[temp], %[p]\n\t" \
+ "beqz %[temp], 1b\n\t" \
+ : [temp] "=&r"(temp), [p] "+m"(*p) \
+ : [val] "r"(val) \
+ : "memory" \
+ ); \
+}
-/*
- * The assembly is volatilized to demark potential before-and-after side
- * effects if an interrupt or SMP collision were to occur.
- */
-#define ATOMIC_OP(NAME, TYPE, OP, V) \
-static __inline void \
-atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
-{ \
- register_t s; \
- \
- s = intr_disable(); \
- *p OP V; \
- intr_restore(s); \
-} \
-struct __hack
/*
* Atomic compare and set, used by the mutex functions
@@ -96,15 +86,25 @@
static __inline int
atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
{
- register_t s;
+ u_int temp;
int res;
- s = intr_disable();
- res = (*dst == exp);
- if (res)
- *dst = src;
- intr_restore(s);
- return (res);
+ __asm __volatile (
+ "1:\n\t"
+ "move %[res], $0\n\t"
+ "ll %[temp], %[dst]\n\t"
+ "bne %[temp], %[exp], 2f\n\t"
+ "move %[temp], %[src]\n\t"
+ "li %[res], 1\n\t"
+ "sc %[temp], %[dst]\n\t"
+ "beqz %[temp], 1b\n\t"
+ "2:\n\t"
+ : [res] "=&r"(res), [temp] "=&r"(temp), [dst] "+m"(*dst)
+ : [exp] "r"(exp), [src] "r"(src)
+ : "memory"
+ );
+
+ return res;
}
/*
@@ -114,97 +114,73 @@
static __inline u_int
atomic_fetchadd_int(volatile u_int *p, u_int v)
{
- register_t s;
- u_int res;
+ u_int temp, res;
+
+ __asm __volatile (
+ "1:\n\t"
+ "ll %[temp], %[p]\n\t"
+ "move %[res], %[temp]\n\t"
+ "addu %[temp], %[v]\n\t"
+ "sc %[temp], %[p]\n\t"
+ "beqz %[temp], 1b\n\t"
+ : [res] "=&r"(res), [temp] "=&r"(temp), [p] "+m"(*p)
+ : [v] "r"(v)
+ : "memory"
+ );
+
+ return res;
- s = intr_disable();
- res = (*p += v);
- intr_restore(s);
- return (res);
}
/* Read the current value and store a zero in the destination. */
static __inline u_int
atomic_readandclear_int(volatile u_int *addr)
{
- register_t s;
- u_int result;
+ u_int temp, res;
+
+ __asm __volatile (
+ "1:\n\t"
+ "ll %[temp], %[p]\n\t"
+ "move %[res], %[temp]\n\t"
+ "move %[temp], $0\n\t"
+ "sc %[temp], %[p]\n\t"
+ "beqz %[temp], 1b\n\t"
+ : [res] "=&r"(res), [temp] "=&r"(temp), [p] "+m"(*addr)
+ :
+ : "memory"
+ );
- s = intr_disable();
- result = *addr;
- *addr = 0;
- /*
- * XXXMIPS: I think it's a mistake. We should have intr_restore()
- * here, shouldn't we?
- */
-#if 0
- intr_disable();
-#endif
- intr_restore(s);
- return (result);
+ return res;
}
#define ATOMIC_STORE_LOAD(TYPE) \
static __inline u_##TYPE \
atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
{ \
- return (*p); \
+ u_##TYPE res; \
+ res = *p; \
+ mips_write_membar(); \
+ return res; \
} \
\
static __inline void \
atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
+ mips_read_membar(); \
*p = v; \
} \
struct __hack
-#else
-
-/* XXX: userland */
-
-#endif /* _KERNEL */
-
-ATOMIC_OP(set, char, |=, v);
-ATOMIC_OP(clear, char, &=, ~v);
-ATOMIC_OP(add, char, +=, v);
-ATOMIC_OP(subtract, char, -=, v);
+ATOMIC_OP(add, "addu %[temp], %[val]")
+ATOMIC_OP(clear, "not %[val] ; and %[temp], %[val] ; not %[val]")
+ATOMIC_OP(set, "or %[temp], %[val]")
+ATOMIC_OP(subtract, "subu %[temp], %[val]")
-ATOMIC_OP(set, short, |=, v);
-ATOMIC_OP(clear, short, &=, ~v);
-ATOMIC_OP(add, short, +=, v);
-ATOMIC_OP(subtract, short, -=, v);
-
-ATOMIC_OP(set, int, |=, v);
-ATOMIC_OP(clear, int, &=, ~v);
-ATOMIC_OP(add, int, +=, v);
-ATOMIC_OP(subtract, int, -=, v);
-
-ATOMIC_STORE_LOAD(char);
-ATOMIC_STORE_LOAD(short);
ATOMIC_STORE_LOAD(int);
#undef ATOMIC_OP
#undef ATOMIC_STORE_LOAD
-/* Acquire and release variants are identical to the normal ones. */
-#define atomic_set_acq_char atomic_set_char
-#define atomic_set_rel_char atomic_set_char
-#define atomic_clear_acq_char atomic_clear_char
-#define atomic_clear_rel_char atomic_clear_char
-#define atomic_add_acq_char atomic_add_char
-#define atomic_add_rel_char atomic_add_char
-#define atomic_subtract_acq_char atomic_subtract_char
-#define atomic_subtract_rel_char atomic_subtract_char
-
-#define atomic_set_acq_short atomic_set_short
-#define atomic_set_rel_short atomic_set_short
-#define atomic_clear_acq_short atomic_clear_short
-#define atomic_clear_rel_short atomic_clear_short
-#define atomic_add_acq_short atomic_add_short
-#define atomic_add_rel_short atomic_add_short
-#define atomic_subtract_acq_short atomic_subtract_short
-#define atomic_subtract_rel_short atomic_subtract_short
-
#define atomic_set_acq_int atomic_set_int
#define atomic_set_rel_int atomic_set_int
#define atomic_clear_acq_int atomic_clear_int
@@ -216,38 +192,6 @@
#define atomic_cmpset_acq_int atomic_cmpset_int
#define atomic_cmpset_rel_int atomic_cmpset_int
-/* Operations on 8-bit bytes. */
-#define atomic_set_8 atomic_set_char
-#define atomic_set_acq_8 atomic_set_acq_char
-#define atomic_set_rel_8 atomic_set_rel_char
-#define atomic_clear_8 atomic_clear_char
-#define atomic_clear_acq_8 atomic_clear_acq_char
-#define atomic_clear_rel_8 atomic_clear_rel_char
-#define atomic_add_8 atomic_add_char
-#define atomic_add_acq_8 atomic_add_acq_char
-#define atomic_add_rel_8 atomic_add_rel_char
-#define atomic_subtract_8 atomic_subtract_char
-#define atomic_subtract_acq_8 atomic_subtract_acq_char
-#define atomic_subtract_rel_8 atomic_subtract_rel_char
-#define atomic_load_acq_8 atomic_load_acq_char
-#define atomic_store_rel_8 atomic_store_rel_char
-
-/* Operations on 16-bit words. */
-#define atomic_set_16 atomic_set_short
-#define atomic_set_acq_16 atomic_set_acq_short
-#define atomic_set_rel_16 atomic_set_rel_short
-#define atomic_clear_16 atomic_clear_short
-#define atomic_clear_acq_16 atomic_clear_acq_short
-#define atomic_clear_rel_16 atomic_clear_rel_short
-#define atomic_add_16 atomic_add_short
-#define atomic_add_acq_16 atomic_add_acq_short
-#define atomic_add_rel_16 atomic_add_rel_short
-#define atomic_subtract_16 atomic_subtract_short
-#define atomic_subtract_acq_16 atomic_subtract_acq_short
-#define atomic_subtract_rel_16 atomic_subtract_rel_short
-#define atomic_load_acq_16 atomic_load_acq_short
-#define atomic_store_rel_16 atomic_store_rel_short
-
/* Operations on 32-bit double words. */
#define atomic_set_32 atomic_set_int
#define atomic_set_acq_32 atomic_set_acq_int
More information about the p4-projects
mailing list