svn commit: r197643 - in head/sys: kern sys

Robert Watson rwatson at FreeBSD.org
Wed Sep 30 17:37:51 UTC 2009


On Wed, 30 Sep 2009, Attilio Rao wrote:

>  When releasing a read/shared lock we need to use a write memory barrier
>  in order to avoid, on architectures which doesn't have strong ordered
>  writes, CPU instructions reordering.

Hi Attilio (Fabio, et al),

Nice catch!  Are we aware of specific reported problems that can be laid at 
the feet of this bug, or was this more of a "wait a moment, shouldn't there be 
a barrier there?".  Could you comment on the scope of this problem across 
architectures we support?

Robert

>
>  Diagnosed by:	fabio
>  Reviewed by:	jhb
>  Tested by:	Giovanni Trematerra
>  		<giovanni dot trematerra at gmail dot com>
>
> Modified:
>  head/sys/kern/kern_rwlock.c
>  head/sys/kern/kern_sx.c
>  head/sys/sys/rwlock.h
>  head/sys/sys/sx.h
>
> Modified: head/sys/kern/kern_rwlock.c
> ==============================================================================
> --- head/sys/kern/kern_rwlock.c	Wed Sep 30 12:53:21 2009	(r197642)
> +++ head/sys/kern/kern_rwlock.c	Wed Sep 30 13:26:31 2009	(r197643)
> @@ -541,7 +541,7 @@ _rw_runlock(struct rwlock *rw, const cha
> 		 */
> 		x = rw->rw_lock;
> 		if (RW_READERS(x) > 1) {
> -			if (atomic_cmpset_ptr(&rw->rw_lock, x,
> +			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
> 			    x - RW_ONE_READER)) {
> 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
> 					CTR4(KTR_LOCK,
> @@ -559,7 +559,8 @@ _rw_runlock(struct rwlock *rw, const cha
> 		if (!(x & RW_LOCK_WAITERS)) {
> 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
> 			    RW_READERS_LOCK(1));
> -			if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) {
> +			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
> +			    RW_UNLOCKED)) {
> 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
> 					CTR2(KTR_LOCK, "%s: %p last succeeded",
> 					    __func__, rw);
> @@ -597,7 +598,7 @@ _rw_runlock(struct rwlock *rw, const cha
> 			x |= (v & RW_LOCK_READ_WAITERS);
> 		} else
> 			queue = TS_SHARED_QUEUE;
> -		if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
> +		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
> 		    x)) {
> 			turnstile_chain_unlock(&rw->lock_object);
> 			continue;
>
> Modified: head/sys/kern/kern_sx.c
> ==============================================================================
> --- head/sys/kern/kern_sx.c	Wed Sep 30 12:53:21 2009	(r197642)
> +++ head/sys/kern/kern_sx.c	Wed Sep 30 13:26:31 2009	(r197643)
> @@ -931,7 +931,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
> 		 * so, just drop one and return.
> 		 */
> 		if (SX_SHARERS(x) > 1) {
> -			if (atomic_cmpset_ptr(&sx->sx_lock, x,
> +			if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
> 			    x - SX_ONE_SHARER)) {
> 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
> 					CTR4(KTR_LOCK,
> @@ -949,8 +949,8 @@ _sx_sunlock_hard(struct sx *sx, const ch
> 		 */
> 		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
> 			MPASS(x == SX_SHARERS_LOCK(1));
> -			if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
> -			    SX_LOCK_UNLOCKED)) {
> +			if (atomic_cmpset_rel_ptr(&sx->sx_lock,
> +			    SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
> 				if (LOCK_LOG_TEST(&sx->lock_object, 0))
> 					CTR2(KTR_LOCK, "%s: %p last succeeded",
> 					    __func__, sx);
> @@ -973,7 +973,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
> 		 * Note that the state of the lock could have changed,
> 		 * so if it fails loop back and retry.
> 		 */
> -		if (!atomic_cmpset_ptr(&sx->sx_lock,
> +		if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
> 		    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
> 		    SX_LOCK_UNLOCKED)) {
> 			sleepq_release(&sx->lock_object);
>
> Modified: head/sys/sys/rwlock.h
> ==============================================================================
> --- head/sys/sys/rwlock.h	Wed Sep 30 12:53:21 2009	(r197642)
> +++ head/sys/sys/rwlock.h	Wed Sep 30 13:26:31 2009	(r197643)
> @@ -55,13 +55,6 @@
>  *
>  * When the lock is not locked by any thread, it is encoded as a read lock
>  * with zero waiters.
> - *
> - * A note about memory barriers.  Write locks need to use the same memory
> - * barriers as mutexes: _acq when acquiring a write lock and _rel when
> - * releasing a write lock.  Read locks also need to use an _acq barrier when
> - * acquiring a read lock.  However, since read locks do not update any
> - * locked data (modulo bugs of course), no memory barrier is needed when
> - * releasing a read lock.
>  */
>
> #define	RW_LOCK_READ		0x01
>
> Modified: head/sys/sys/sx.h
> ==============================================================================
> --- head/sys/sys/sx.h	Wed Sep 30 12:53:21 2009	(r197642)
> +++ head/sys/sys/sx.h	Wed Sep 30 13:26:31 2009	(r197643)
> @@ -63,13 +63,6 @@
>  *
>  * When the lock is not locked by any thread, it is encoded as a
>  * shared lock with zero waiters.
> - *
> - * A note about memory barriers.  Exclusive locks need to use the same
> - * memory barriers as mutexes: _acq when acquiring an exclusive lock
> - * and _rel when releasing an exclusive lock.  On the other side,
> - * shared lock needs to use an _acq barrier when acquiring the lock
> - * but, since they don't update any locked data, no memory barrier is
> - * needed when releasing a shared lock.
>  */
>
> #define	SX_LOCK_SHARED			0x01
> @@ -200,7 +193,7 @@ __sx_sunlock(struct sx *sx, const char *
> 	uintptr_t x = sx->sx_lock;
>
> 	if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
> -	    !atomic_cmpset_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
> +	    !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
> 		_sx_sunlock_hard(sx, file, line);
> }
>
>


More information about the svn-src-all mailing list