svn commit: r233441 - user/davidxu/pth_objdestroy/lib/libthr/thread

David Xu davidxu at FreeBSD.org
Sun Mar 25 02:01:18 UTC 2012


Author: davidxu
Date: Sun Mar 25 02:01:17 2012
New Revision: 233441
URL: http://svn.freebsd.org/changeset/base/233441

Log:
  Too eliminate overhead in revision 233103 which enters kernel to unlock
  a contested mutex which results holding mutex too long, we use 3 state
  simple mutex when possible, it is purely implemented in userland.
  The unlocking thread only wakes sleeping thread up but not writes memory,
  the error in kernel caused by memory error can be safely ignored.

Modified:
  user/davidxu/pth_objdestroy/lib/libthr/thread/thr_cond.c
  user/davidxu/pth_objdestroy/lib/libthr/thread/thr_mutex.c
  user/davidxu/pth_objdestroy/lib/libthr/thread/thr_private.h
  user/davidxu/pth_objdestroy/lib/libthr/thread/thr_umtx.c
  user/davidxu/pth_objdestroy/lib/libthr/thread/thr_umtx.h

Modified: user/davidxu/pth_objdestroy/lib/libthr/thread/thr_cond.c
==============================================================================
--- user/davidxu/pth_objdestroy/lib/libthr/thread/thr_cond.c	Sun Mar 25 01:00:31 2012	(r233440)
+++ user/davidxu/pth_objdestroy/lib/libthr/thread/thr_cond.c	Sun Mar 25 02:01:17 2012	(r233441)
@@ -290,13 +290,10 @@ cond_wait_common(pthread_cond_t *cond, p
 	if ((error = _mutex_owned(curthread, mp)) != 0)
 		return (error);
 
-	if (curthread->attr.sched_policy != SCHED_OTHER ||
-	    (mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT|
-		USYNC_PROCESS_SHARED)) != 0 ||
-	    (cvp->__flags & USYNC_PROCESS_SHARED) != 0)
-		return cond_wait_kernel(cvp, mp, abstime, cancel);
-	else
+	if (is_user_mutex(&mp->m_lock))
 		return cond_wait_user(cvp, mp, abstime, cancel);
+	else
+		return cond_wait_kernel(cvp, mp, abstime, cancel);
 }
 
 int

Modified: user/davidxu/pth_objdestroy/lib/libthr/thread/thr_mutex.c
==============================================================================
--- user/davidxu/pth_objdestroy/lib/libthr/thread/thr_mutex.c	Sun Mar 25 01:00:31 2012	(r233440)
+++ user/davidxu/pth_objdestroy/lib/libthr/thread/thr_mutex.c	Sun Mar 25 02:01:17 2012	(r233441)
@@ -311,6 +311,24 @@ _pthread_mutex_destroy(pthread_mutex_t *
 		m = *mutex;						\
 	}
 
+static inline int
+user_mutex_trylock(struct umutex *m)
+{
+	return _thr_mtx_trylock(&m->m_owner);
+}
+
+static inline int 
+user_mutex_timedlock(struct umutex *m, const struct timespec *abstime)
+{
+	return _thr_mtx_timedlock(&m->m_owner, abstime);
+}
+
+static inline int
+user_mutex_unlock(struct umutex *m)
+{
+	return _thr_mtx_unlock(&m->m_owner);
+}
+
 static int
 mutex_trylock_common(pthread_mutex_t *mutex)
 {
@@ -319,15 +337,24 @@ mutex_trylock_common(pthread_mutex_t *mu
 	uint32_t id;
 	int ret;
 
-	id = TID(curthread);
 	if (m->m_flags & PMUTEX_FLAG_PRIVATE)
 		THR_CRITICAL_ENTER(curthread);
-	ret = _thr_umutex_trylock(&m->m_lock, id);
-	if (__predict_true(ret == 0)) {
-		ENQUEUE_MUTEX(curthread, m);
-	} else if (m->m_owner == curthread) {
+	if (__predict_true(is_user_mutex(&m->m_lock))) {
+		ret = user_mutex_trylock(&m->m_lock);
+		if (ret == 0) {
+			m->m_owner = curthread;
+			return (0);
+		}
+	} else {
+		id = TID(curthread);
+		ret = _thr_umutex_trylock(&m->m_lock, id);
+		if (ret == 0) {
+			ENQUEUE_MUTEX(curthread, m);
+			return (0);
+		}
+	}
+	if (m->m_owner == curthread)
 		ret = mutex_self_trylock(m);
-	} /* else {} */
 	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
 		THR_CRITICAL_LEAVE(curthread);
 	return (ret);
@@ -347,24 +374,24 @@ static int
 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
 	const struct timespec *abstime)
 {
-	uint32_t	id, owner;
+	uint32_t	owner;
+	int	user_mutex;
 	int	count;
 	int	ret;
 
 	if (m->m_owner == curthread)
 		return mutex_self_lock(m, abstime);
 
-	id = TID(curthread);
+	user_mutex = is_user_mutex(&m->m_lock);
+
 	/*
 	 * For adaptive mutexes, spin for a bit in the expectation
 	 * that if the application requests this mutex type then
 	 * the lock is likely to be released quickly and it is
 	 * faster than entering the kernel
 	 */
-	if (__predict_false(
-		(m->m_lock.m_flags & 
-		 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
-			goto sleep_in_kernel;
+	if (__predict_false(!user_mutex))
+		goto sleep;
 
 	if (!_thr_is_smp)
 		goto yield_loop;
@@ -372,8 +399,8 @@ mutex_lock_sleep(struct pthread *curthre
 	count = m->m_spinloops;
 	while (count--) {
 		owner = m->m_lock.m_owner;
-		if ((owner & ~UMUTEX_CONTESTED) == 0) {
-			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
+		if (owner == 0) {
+			if (user_mutex_trylock(&m->m_lock) == 0) {
 				ret = 0;
 				goto done;
 			}
@@ -386,28 +413,32 @@ yield_loop:
 	while (count--) {
 		_sched_yield();
 		owner = m->m_lock.m_owner;
-		if ((owner & ~UMUTEX_CONTESTED) == 0) {
-			if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
+		if (owner == 0) {
+			if (user_mutex_trylock(&m->m_lock) == 0) {
 				ret = 0;
 				goto done;
 			}
 		}
 	}
 
-sleep_in_kernel:
-	if (abstime == NULL) {
-		ret = __thr_umutex_lock(&m->m_lock, id);
-	} else if (__predict_false(
-		   abstime->tv_nsec < 0 ||
-		   abstime->tv_nsec >= 1000000000)) {
+sleep:
+	if (__predict_false(abstime != NULL && 
+	    (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000))) {
 		ret = EINVAL;
 	} else {
-		ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
+		if (user_mutex)
+			ret = user_mutex_timedlock(&m->m_lock, abstime);
+		else	
+			ret = __thr_umutex_timedlock(&m->m_lock,
+				TID(_get_curthread()), abstime);
 	}
 done:
-	if (ret == 0)
-		ENQUEUE_MUTEX(curthread, m);
-
+	if (ret == 0) {
+		if (user_mutex)
+			m->m_owner = curthread;
+		else
+			ENQUEUE_MUTEX(curthread, m);
+	}
 	return (ret);
 }
 
@@ -420,12 +451,20 @@ mutex_lock_common(struct pthread_mutex *
 
 	if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
 		THR_CRITICAL_ENTER(curthread);
-	if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
-		ENQUEUE_MUTEX(curthread, m);
-		ret = 0;
+	if (__predict_true(is_user_mutex(&m->m_lock))) {
+		ret = user_mutex_trylock(&m->m_lock);
+		if (ret == 0) {
+			m->m_owner = curthread;
+			return (0);
+		}
 	} else {
-		ret = mutex_lock_sleep(curthread, m, abstime);
+		ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread));
+		if (ret == 0) {
+			ENQUEUE_MUTEX(curthread, m);
+			return (0);
+		}
 	}
+	ret = mutex_lock_sleep(curthread, m, abstime);
 	if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
 		THR_CRITICAL_LEAVE(curthread);
 	return (ret);
@@ -658,8 +697,13 @@ mutex_unlock_common(struct pthread_mutex
         	} else
                 	defered = 0;
 
-		DEQUEUE_MUTEX(curthread, m);
-		_thr_umutex_unlock(&m->m_lock, id);
+		if (is_user_mutex(&m->m_lock)) {
+			m->m_owner = NULL;
+			user_mutex_unlock(&m->m_lock);
+		} else {
+			DEQUEUE_MUTEX(curthread, m);
+			_thr_umutex_unlock(&m->m_lock, id);
+		}
 
 		if (curthread->will_sleep == 0 && defered)  {
 			_thr_wake_all(curthread->defer_waiters,

Modified: user/davidxu/pth_objdestroy/lib/libthr/thread/thr_private.h
==============================================================================
--- user/davidxu/pth_objdestroy/lib/libthr/thread/thr_private.h	Sun Mar 25 01:00:31 2012	(r233440)
+++ user/davidxu/pth_objdestroy/lib/libthr/thread/thr_private.h	Sun Mar 25 02:01:17 2012	(r233441)
@@ -592,6 +592,14 @@ do {							\
 	_thr_ast(thrd);					\
 } while (0)
 
+#define	THR_LOCK_RELEASE_OPT(thrd, lck)			\
+do {							\
+	THR_ASSERT_LOCKLEVEL(thrd);			\
+	_thr_umutex_unlock_opt((lck), TID(thrd));	\
+	(thrd)->locklevel--;				\
+	_thr_ast(thrd);					\
+} while (0)
+
 #define	THR_LOCK(curthrd)		THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
 #define	THR_UNLOCK(curthrd)		THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
 #define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
@@ -888,6 +896,15 @@ _sleepq_first(struct sleepqueue *sq)
 	return TAILQ_FIRST(&sq->sq_blocked);
 }
 
+static inline int
+is_user_mutex(struct umutex *m)
+{
+	return ((m->m_flags &
+		 (UMUTEX_PRIO_PROTECT|
+                  UMUTEX_PRIO_INHERIT|
+                  USYNC_PROCESS_SHARED)) == 0);
+}
+
 void	_sleepq_init(void) __hidden;
 struct sleepqueue *_sleepq_alloc(void) __hidden;
 void	_sleepq_free(struct sleepqueue *) __hidden;

Modified: user/davidxu/pth_objdestroy/lib/libthr/thread/thr_umtx.c
==============================================================================
--- user/davidxu/pth_objdestroy/lib/libthr/thread/thr_umtx.c	Sun Mar 25 01:00:31 2012	(r233440)
+++ user/davidxu/pth_objdestroy/lib/libthr/thread/thr_umtx.c	Sun Mar 25 02:01:17 2012	(r233441)
@@ -157,6 +157,21 @@ __thr_umutex_unlock(struct umutex *mtx, 
 	return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
 }
 
+/* Unlock a never freed mutex */
+int
+__thr_umutex_unlock_opt(struct umutex *mtx, uint32_t id)
+{
+#ifndef __ia64__
+	/* XXX this logic has a race-condition on ia64. */
+	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
+		if (atomic_cmpset_rel_32(&mtx->m_owner, 
+		    id | UMUTEX_CONTESTED, UMUTEX_CONTESTED))
+			return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0);
+	}
+#endif
+	return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
+}
+
 int
 __thr_umutex_trylock(struct umutex *mtx)
 {
@@ -340,3 +355,36 @@ _thr_rwl_unlock(struct urwlock *rwlock)
 	if (_thr_rwlock_unlock(rwlock))
 		PANIC("unlock error");
 }
+
+int 
+__thr_mtx_timedlock(volatile uint32_t *m, const struct timespec *abstime)
+{
+	uint32_t owner;
+	int ret = 0, error;
+
+	/* contested case */
+	do {
+		owner = *m;
+		if (owner == 2 || atomic_cmpset_acq_32(m, 1, 2)) {
+			error = _thr_umtx_timedwait_uint(m, 2,
+				CLOCK_REALTIME, abstime, 0);
+			if (error != 0 && error != EINTR) {
+				if (atomic_cmpset_acq_32(m, 0, 2))
+					ret = 0;
+				else
+					ret = error;
+				break;
+			}
+		}
+	} while (!atomic_cmpset_acq_32(m, 0, 2));
+	return (ret);
+}
+
+int
+__thr_mtx_unlock(volatile uint32_t *m)
+{
+	*m = 0;
+	wmb();
+	(void)_thr_umtx_wake(m, 1, 0);
+	return (0);
+}

Modified: user/davidxu/pth_objdestroy/lib/libthr/thread/thr_umtx.h
==============================================================================
--- user/davidxu/pth_objdestroy/lib/libthr/thread/thr_umtx.h	Sun Mar 25 01:00:31 2012	(r233440)
+++ user/davidxu/pth_objdestroy/lib/libthr/thread/thr_umtx.h	Sun Mar 25 02:01:17 2012	(r233441)
@@ -40,9 +40,12 @@ int __thr_umutex_lock_spin(struct umutex
 int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
 	const struct timespec *timeout) __hidden;
 int __thr_umutex_unlock(struct umutex *mtx, uint32_t id) __hidden;
+int __thr_umutex_unlock_opt(struct umutex *mtx, uint32_t id) __hidden;
 int __thr_umutex_trylock(struct umutex *mtx) __hidden;
 int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
 	uint32_t *oldceiling) __hidden;
+int __thr_mtx_timedlock(volatile uint32_t *, const struct timespec *);
+int __thr_mtx_unlock(volatile uint32_t *);
 
 void _thr_umutex_init(struct umutex *mtx) __hidden;
 void _thr_urwlock_init(struct urwlock *rwl) __hidden;
@@ -127,6 +130,14 @@ _thr_umutex_unlock(struct umutex *mtx, u
 }
 
 static inline int
+_thr_umutex_unlock_opt(struct umutex *mtx, uint32_t id)
+{
+    if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED))
+	return (0);
+    return (__thr_umutex_unlock_opt(mtx, id));
+}
+
+static inline int
 _thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
 {
 	int32_t state;
@@ -206,4 +217,29 @@ _thr_rwlock_unlock(struct urwlock *rwloc
     	}
     	return (__thr_rwlock_unlock(rwlock));
 }
+
+/* simple mutex lock/unlock */
+static inline int
+_thr_mtx_trylock(volatile uint32_t *m)
+{
+	if (atomic_cmpset_acq_32(m, 0, 1))
+		return (0);
+	return (EBUSY);
+}
+
+static inline int
+_thr_mtx_timedlock(volatile uint32_t *m, const struct timespec *abstime)
+{
+	if (atomic_cmpset_acq_32(m, 0, 1))
+		return (0);
+	return __thr_mtx_timedlock(m, abstime);
+}
+
+static inline int
+_thr_mtx_unlock(volatile uint32_t *m)
+{
+	if (atomic_cmpset_rel_32(m, 1, 0))
+		return (0);
+	return __thr_mtx_unlock(m);
+}
 #endif


More information about the svn-src-user mailing list