svn commit: r197981 - in stable/8/sys: . amd64/include/xen cddl/contrib/opensolaris contrib/dev/acpica contrib/pf dev/xen/xenpci kern sys

Attilio Rao attilio at FreeBSD.org
Mon Oct 12 15:32:01 UTC 2009


Author: attilio
Date: Mon Oct 12 15:32:00 2009
New Revision: 197981
URL: http://svn.freebsd.org/changeset/base/197981

Log:
  MFC r197643, r197735:
  When releasing a read/shared lock we need to use a write memory barrier
  in order to avoid, on architectures which doesn't have strong ordered
  writes, CPU instructions reordering.
  
  Approved by:	re (kib)

Modified:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)
  stable/8/sys/dev/xen/xenpci/   (props changed)
  stable/8/sys/kern/kern_lock.c
  stable/8/sys/kern/kern_rwlock.c
  stable/8/sys/kern/kern_sx.c
  stable/8/sys/sys/rwlock.h
  stable/8/sys/sys/sx.h

Modified: stable/8/sys/kern/kern_lock.c
==============================================================================
--- stable/8/sys/kern/kern_lock.c	Mon Oct 12 14:51:19 2009	(r197980)
+++ stable/8/sys/kern/kern_lock.c	Mon Oct 12 15:32:00 2009	(r197981)
@@ -238,7 +238,7 @@ wakeupshlk(struct lock *lk, const char *
 		 * and return.
 		 */
 		if (LK_SHARERS(x) > 1) {
-			if (atomic_cmpset_ptr(&lk->lk_lock, x,
+			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
 			    x - LK_ONE_SHARER))
 				break;
 			continue;
@@ -251,7 +251,7 @@ wakeupshlk(struct lock *lk, const char *
 		if ((x & LK_ALL_WAITERS) == 0) {
 			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
 			    LK_SHARERS_LOCK(1));
-			if (atomic_cmpset_ptr(&lk->lk_lock, x, LK_UNLOCKED))
+			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
 				break;
 			continue;
 		}
@@ -277,7 +277,7 @@ wakeupshlk(struct lock *lk, const char *
 			queue = SQ_SHARED_QUEUE;
 		}
 
-		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
+		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
 		    v)) {
 			sleepq_release(&lk->lock_object);
 			continue;

Modified: stable/8/sys/kern/kern_rwlock.c
==============================================================================
--- stable/8/sys/kern/kern_rwlock.c	Mon Oct 12 14:51:19 2009	(r197980)
+++ stable/8/sys/kern/kern_rwlock.c	Mon Oct 12 15:32:00 2009	(r197981)
@@ -538,7 +538,7 @@ _rw_runlock(struct rwlock *rw, const cha
 		 */
 		x = rw->rw_lock;
 		if (RW_READERS(x) > 1) {
-			if (atomic_cmpset_ptr(&rw->rw_lock, x,
+			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
 			    x - RW_ONE_READER)) {
 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
 					CTR4(KTR_LOCK,
@@ -556,7 +556,8 @@ _rw_runlock(struct rwlock *rw, const cha
 		if (!(x & RW_LOCK_WAITERS)) {
 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
 			    RW_READERS_LOCK(1));
-			if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) {
+			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
+			    RW_UNLOCKED)) {
 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
 					CTR2(KTR_LOCK, "%s: %p last succeeded",
 					    __func__, rw);
@@ -594,7 +595,7 @@ _rw_runlock(struct rwlock *rw, const cha
 			x |= (v & RW_LOCK_READ_WAITERS);
 		} else
 			queue = TS_SHARED_QUEUE;
-		if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
+		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
 		    x)) {
 			turnstile_chain_unlock(&rw->lock_object);
 			continue;

Modified: stable/8/sys/kern/kern_sx.c
==============================================================================
--- stable/8/sys/kern/kern_sx.c	Mon Oct 12 14:51:19 2009	(r197980)
+++ stable/8/sys/kern/kern_sx.c	Mon Oct 12 15:32:00 2009	(r197981)
@@ -928,7 +928,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
 		 * so, just drop one and return.
 		 */
 		if (SX_SHARERS(x) > 1) {
-			if (atomic_cmpset_ptr(&sx->sx_lock, x,
+			if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
 			    x - SX_ONE_SHARER)) {
 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
 					CTR4(KTR_LOCK,
@@ -946,8 +946,8 @@ _sx_sunlock_hard(struct sx *sx, const ch
 		 */
 		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
 			MPASS(x == SX_SHARERS_LOCK(1));
-			if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
-			    SX_LOCK_UNLOCKED)) {
+			if (atomic_cmpset_rel_ptr(&sx->sx_lock,
+			    SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
 					CTR2(KTR_LOCK, "%s: %p last succeeded",
 					    __func__, sx);
@@ -970,7 +970,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
 		 * Note that the state of the lock could have changed,
 		 * so if it fails loop back and retry.
 		 */
-		if (!atomic_cmpset_ptr(&sx->sx_lock,
+		if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
 		    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
 		    SX_LOCK_UNLOCKED)) {
 			sleepq_release(&sx->lock_object);

Modified: stable/8/sys/sys/rwlock.h
==============================================================================
--- stable/8/sys/sys/rwlock.h	Mon Oct 12 14:51:19 2009	(r197980)
+++ stable/8/sys/sys/rwlock.h	Mon Oct 12 15:32:00 2009	(r197981)
@@ -55,13 +55,6 @@
  *
  * When the lock is not locked by any thread, it is encoded as a read lock
  * with zero waiters.
- *
- * A note about memory barriers.  Write locks need to use the same memory
- * barriers as mutexes: _acq when acquiring a write lock and _rel when
- * releasing a write lock.  Read locks also need to use an _acq barrier when
- * acquiring a read lock.  However, since read locks do not update any
- * locked data (modulo bugs of course), no memory barrier is needed when
- * releasing a read lock.
  */
 
 #define	RW_LOCK_READ		0x01

Modified: stable/8/sys/sys/sx.h
==============================================================================
--- stable/8/sys/sys/sx.h	Mon Oct 12 14:51:19 2009	(r197980)
+++ stable/8/sys/sys/sx.h	Mon Oct 12 15:32:00 2009	(r197981)
@@ -63,13 +63,6 @@
  *
  * When the lock is not locked by any thread, it is encoded as a
  * shared lock with zero waiters.
- *
- * A note about memory barriers.  Exclusive locks need to use the same
- * memory barriers as mutexes: _acq when acquiring an exclusive lock
- * and _rel when releasing an exclusive lock.  On the other side,
- * shared lock needs to use an _acq barrier when acquiring the lock
- * but, since they don't update any locked data, no memory barrier is
- * needed when releasing a shared lock.
  */
 
 #define	SX_LOCK_SHARED			0x01
@@ -200,7 +193,7 @@ __sx_sunlock(struct sx *sx, const char *
 	uintptr_t x = sx->sx_lock;
 
 	if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
-	    !atomic_cmpset_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
+	    !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
 		_sx_sunlock_hard(sx, file, line);
 }
 


More information about the svn-src-all mailing list