[PATCH] Change atomic operations to use fences for memory barriers

John Baldwin jhb at FreeBSD.org
Mon Feb 7 14:25:50 PST 2005


On Friday 04 February 2005 03:58 pm, John Baldwin wrote:
> The patch below changes the atomic operations on amd64 to use the cheaper
> fence instructions for memory barriers.  I'd like people to test it to see
> if 1) it breaks things or not, and 2) if it impacts performance either in a
> good way or a bad way.  For this last I'm curious about both UP and SMP as
> my initial guess is that it will help on UP but might hurt on SMP.

Would help if I included it:

--- //depot/vendor/freebsd/src/sys/amd64/include/atomic.h	2003/11/21 03:05:42
+++ //depot/projects/smpng/sys/amd64/include/atomic.h	2004/11/19 20:16:10
@@ -70,7 +70,7 @@
 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
 int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src);
 
-#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
+#define	ATOMIC_STORE_LOAD(TYPE)					\
 u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
 void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
 
@@ -162,30 +162,22 @@
 
 #if defined(__GNUC__)
 
-#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
+#define ATOMIC_STORE_LOAD(TYPE)				\
 static __inline u_##TYPE				\
 atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
 {							\
-	u_##TYPE res;					\
+	u_##TYPE v;					\
 							\
-	__asm __volatile(__XSTRING(MPLOCKED) LOP	\
-	: "=a" (res),			/* 0 (result) */\
-	  "+m" (*p)			/* 1 */		\
-	: : "memory");				 	\
-							\
-	return (res);					\
+	v = *p;						\
+	__asm __volatile("lfence" ::: "memory");	\
+	return (v);					\
 }							\
 							\
-/*							\
- * The XCHG instruction asserts LOCK automagically.	\
- */							\
 static __inline void					\
 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
 {							\
-	__asm __volatile(SOP				\
-	: "+m" (*p),			/* 0 */		\
-	  "+r" (v)			/* 1 */		\
-	: : "memory");				 	\
+	__asm __volatile("sfence" ::: "memory");	\
+	*p = v;						\
 }							\
 struct __hack
 
@@ -194,7 +186,7 @@
 extern int atomic_cmpset_int(volatile u_int *, u_int, u_int);
 extern int atomic_cmpset_long(volatile u_long *, u_long, u_long);
 
-#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)				\
+#define ATOMIC_STORE_LOAD(TYPE)						\
 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p);		\
 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
 
@@ -222,10 +214,10 @@
 ATOMIC_ASM(add,	     long,  "addq %1,%0",  "ir",  v);
 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
 
-ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
-ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
-ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
-ATOMIC_STORE_LOAD(long,	"cmpxchgq %0,%1",  "xchgq %1,%0");
+ATOMIC_STORE_LOAD(char);
+ATOMIC_STORE_LOAD(short);
+ATOMIC_STORE_LOAD(int);
+ATOMIC_STORE_LOAD(long);
 
 #undef ATOMIC_ASM
 #undef ATOMIC_STORE_LOAD
--- //depot/vendor/freebsd/src/sys/amd64/include/bus_amd64.h	2005/01/05 
20:20:40
+++ //depot/projects/smpng/sys/amd64/include/bus_amd64.h	2005/01/05 22:38:38
@@ -1215,9 +1215,9 @@
 {
 #ifdef	__GNUC__
 	if (flags & BUS_SPACE_BARRIER_READ)
-		__asm __volatile("lock; addl $0,0(%%rsp)" : : : "memory");
+		__asm __volatile("lfence" : : : "memory");
 	else
-		__asm __volatile("" : : : "memory");
+		__asm __volatile("sfence" : : : "memory");
 #endif
 }
 
-- 
John Baldwin <jhb at FreeBSD.org>  <><  http://www.FreeBSD.org/~jhb/
"Power Users Use the Power to Serve"  =  http://www.FreeBSD.org


More information about the freebsd-amd64 mailing list