svn commit: r214814 - user/davidxu/libthr/lib/libthr/thread

David Xu davidxu at FreeBSD.org
Fri Nov 5 00:07:56 UTC 2010


Author: davidxu
Date: Fri Nov  5 00:07:56 2010
New Revision: 214814
URL: http://svn.freebsd.org/changeset/base/214814

Log:
  Merge condition variable work from previous work from
  libthr.user_requeue. if the current thread is using the simple
  mutex, use userlevel cv, otherwise use kernel based cv.
  userlevel cv is fast. may there are other cases, thread
  should use kernel based cv.

Modified:
  user/davidxu/libthr/lib/libthr/thread/thr_cond.c
  user/davidxu/libthr/lib/libthr/thread/thr_mutex.c
  user/davidxu/libthr/lib/libthr/thread/thr_private.h

Modified: user/davidxu/libthr/lib/libthr/thread/thr_cond.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_cond.c	Thu Nov  4 22:05:50 2010	(r214813)
+++ user/davidxu/libthr/lib/libthr/thread/thr_cond.c	Fri Nov  5 00:07:56 2010	(r214814)
@@ -80,6 +80,7 @@ cond_init(pthread_cond_t *cond, const pt
 				pcond->c_kerncv.c_flags |= USYNC_PROCESS_SHARED;
 			pcond->c_kerncv.c_clockid = (*cond_attr)->c_clockid;
 		}
+		_thr_umutex_init(&pcond->c_lock);
 		pcond->c_kerncv.c_flags |= UCOND_BIND_MUTEX;
 		*cond = pcond;
 	}
@@ -137,6 +138,8 @@ _pthread_cond_destroy(pthread_cond_t *co
 		rval = EINVAL;
 	else {
 		cv = *cond;
+		if (cv->c_waiters != 0)
+			return (EBUSY);
 		_thr_ucond_broadcast(&cv->c_kerncv);
 		*cond = THR_COND_DESTROYED;
 
@@ -158,7 +161,7 @@ _pthread_cond_destroy(pthread_cond_t *co
  *   to be lost.
  */
 static int
-cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
+cond_wait_kernel(pthread_cond_t *cond, pthread_mutex_t *mutex,
 	const struct timespec *abstime, int cancel)
 {
 	struct pthread	*curthread = _get_curthread();
@@ -167,12 +170,6 @@ cond_wait_common(pthread_cond_t *cond, p
 	int	recurse;
 	int	ret;
 
-	/*
-	 * If the condition variable is statically initialized,
-	 * perform the dynamic initialization:
-	 */
-	CHECK_AND_INIT_COND
-
 	cv = *cond;
 	ret = _mutex_cv_detach(mutex, &recurse);
 	if (__predict_false(ret != 0))
@@ -206,6 +203,102 @@ cond_wait_common(pthread_cond_t *cond, p
 	return (ret);
 }
 
+static int
+cond_wait_user(pthread_cond_t *cond, pthread_mutex_t *mutex,
+	const struct timespec *abstime, int cancel)
+{
+	struct pthread	*curthread = _get_curthread();
+	struct timespec ts, ts2, *tsp;
+	int		recurse;
+	pthread_cond_t  cv;
+	int		ret;
+	uint64_t	seq, bseq;
+
+	cv = *cond;
+	THR_UMUTEX_LOCK(curthread, &cv->c_lock);
+	cv->c_waiters++;
+	ret = _mutex_cv_unlock(mutex, &recurse);
+	if (__predict_false(ret != 0)) {
+		cv->c_waiters--;
+		THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
+		return (ret);
+	}
+
+	if (abstime != NULL) {
+		clock_gettime(cv->c_kerncv.c_clockid, &ts);
+		TIMESPEC_SUB(&ts2, abstime, &ts);
+		tsp = &ts2;
+	} else
+		tsp = NULL;
+
+	bseq = cv->c_broadcast_seq;
+	for(;;) {
+		seq = cv->c_seq;
+		THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
+
+		if (cancel) {
+			_thr_cancel_enter2(curthread, 0);
+			ret = _thr_umtx_wait_uint((u_int *)&cv->c_seq,
+				(u_int)seq, tsp, 0);
+			_thr_cancel_leave(curthread, 0);
+		} else {
+			ret = _thr_umtx_wait_uint((u_int *)&cv->c_seq,
+				(u_int)seq, tsp, 0);
+		}
+
+		THR_UMUTEX_LOCK(curthread, &cv->c_lock);
+		if (cv->c_broadcast_seq != bseq) {
+			ret = 0;
+			break;
+		}
+		if (cv->c_signaled > 0) {
+			cv->c_signaled--;
+			ret = 0;
+			break;
+		} else if (cancel && SHOULD_CANCEL(curthread) &&
+			   !THR_IN_CRITICAL(curthread)) {
+				THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
+				_pthread_exit(PTHREAD_CANCELED);
+		}
+	}
+	THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
+
+	if (ret == EINTR)
+		ret = 0;
+	_mutex_cv_lock(mutex, recurse);
+	return (ret);
+}
+
+
+static int
+cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
+	const struct timespec *abstime, int cancel)
+{
+	struct pthread	*curthread = _get_curthread();
+	struct pthread_mutex *m;
+	pthread_cond_t  cv;
+	int err;
+
+	/*
+	 * If the condition variable is statically initialized,
+	 * perform the dynamic initialization:
+	 */
+	CHECK_AND_INIT_COND
+
+	if ((err = _mutex_owned(curthread, mutex)) != 0)
+		return (err);
+
+	m = *mutex;
+	if ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) !=
+	    (cv->c_kerncv.c_flags & USYNC_PROCESS_SHARED))
+		return (EINVAL);
+
+	if (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT)) 
+		return cond_wait_kernel(cond, mutex, abstime, cancel);
+	else
+		return cond_wait_user(cond, mutex, abstime, cancel);
+}
+
 int
 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 {
@@ -247,8 +340,8 @@ __pthread_cond_timedwait(pthread_cond_t 
 static int
 cond_signal_common(pthread_cond_t *cond, int broadcast)
 {
+	struct pthread	*curthread = _get_curthread();
 	pthread_cond_t	cv;
-	int		ret = 0;
 
 	/*
 	 * If the condition variable is statically initialized, perform dynamic
@@ -257,10 +350,30 @@ cond_signal_common(pthread_cond_t *cond,
 	CHECK_AND_INIT_COND
 
 	if (!broadcast)
-		ret = _thr_ucond_signal(&cv->c_kerncv);
+		_thr_ucond_signal(&cv->c_kerncv);
 	else
-		ret = _thr_ucond_broadcast(&cv->c_kerncv);
-	return (ret);
+		_thr_ucond_broadcast(&cv->c_kerncv);
+
+	if (cv->c_waiters == 0)
+		return (0);
+
+	THR_UMUTEX_LOCK(curthread, &cv->c_lock);
+	if (cv->c_waiters > 0) {
+		if (!broadcast) {
+			cv->c_seq++;
+			cv->c_signaled++;
+			cv->c_waiters--;
+			_thr_umtx_wake(&cv->c_seq, 1, 0);
+		} else {
+			cv->c_seq++;
+			cv->c_broadcast_seq++;
+			cv->c_waiters = 0;
+			cv->c_signaled = 0;
+			_thr_umtx_wake(&cv->c_seq, INT_MAX, 0);
+		}
+	}
+	THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
+	return (0);
 }
 
 int

Modified: user/davidxu/libthr/lib/libthr/thread/thr_mutex.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_mutex.c	Thu Nov  4 22:05:50 2010	(r214813)
+++ user/davidxu/libthr/lib/libthr/thread/thr_mutex.c	Fri Nov  5 00:07:56 2010	(r214814)
@@ -548,12 +548,10 @@ mutex_self_lock(struct pthread_mutex *m,
 	return (ret);
 }
 
-static int
-mutex_unlock_common(pthread_mutex_t *mutex)
+int
+_mutex_owned(struct pthread *curthread, const pthread_mutex_t *mutex)
 {
-	struct pthread *curthread = _get_curthread();
 	struct pthread_mutex *m;
-	uint32_t id;
 
 	m = *mutex;
 	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
@@ -561,12 +559,26 @@ mutex_unlock_common(pthread_mutex_t *mut
 			return (EINVAL);
 		return (EPERM);
 	}
-
 	/*
 	 * Check if the running thread is not the owner of the mutex.
 	 */
 	if (__predict_false(m->m_owner != curthread))
 		return (EPERM);
+	return (0);
+}
+
+static int
+mutex_unlock_common(pthread_mutex_t *mutex)
+{
+	struct pthread *curthread = _get_curthread();
+	struct pthread_mutex *m;
+	uint32_t id;
+	int err;
+
+	if ((err = _mutex_owned(curthread, mutex)) != 0)
+		return (err);
+
+	m = *mutex;
 
 	id = TID(curthread);
 	if (__predict_false(
@@ -607,6 +619,41 @@ _mutex_cv_lock(pthread_mutex_t *mutex, i
 }
 
 int
+_mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
+{
+	struct pthread *curthread = _get_curthread();
+	struct pthread_mutex *m;
+	int err;
+
+	if ((err = _mutex_owned(curthread, mutex)) != 0)
+		return (err);
+
+	m = *mutex;
+
+	/*
+	 * Clear the count in case this is a recursive mutex.
+	 */
+	*count = m->m_count;
+	m->m_refcount++;
+	m->m_count = 0;
+	m->m_owner = NULL;
+	/* Remove the mutex from the threads queue. */
+	MUTEX_ASSERT_IS_OWNED(m);
+	if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
+		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
+	else {
+		TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
+		set_inherited_priority(curthread, m);
+	}
+	MUTEX_INIT_LINK(m);
+	_thr_umutex_unlock(&m->m_lock, TID(curthread));
+
+	if (m->m_private)
+		THR_CRITICAL_LEAVE(curthread);
+	return (0);
+}
+
+int
 _mutex_cv_attach(pthread_mutex_t *mutex, int count)
 {
 	struct pthread *	curthread = _get_curthread();
@@ -625,19 +672,12 @@ _mutex_cv_detach(pthread_mutex_t *mutex,
 {
 	struct pthread *curthread = _get_curthread();
 	struct pthread_mutex *m;
+	int err;
 
-	m = *mutex;
-	if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
-		if (m == THR_MUTEX_DESTROYED)
-			return (EINVAL);
-		return (EPERM);
-	}
+	if ((err = _mutex_owned(curthread, mutex)) != 0)
+		return (err);
 
-	/*
-	 * Check if the running thread is not the owner of the mutex.
-	 */
-	if (__predict_false(m->m_owner != curthread))
-		return (EPERM);
+	m = *mutex;
 
 	/*
 	 * Clear the count in case this is a recursive mutex.

Modified: user/davidxu/libthr/lib/libthr/thread/thr_private.h
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_private.h	Thu Nov  4 22:05:50 2010	(r214813)
+++ user/davidxu/libthr/lib/libthr/thread/thr_private.h	Fri Nov  5 00:07:56 2010	(r214814)
@@ -169,6 +169,15 @@ struct pthread_mutex_attr {
 
 struct pthread_cond {
 	struct ucond	c_kerncv;
+	/*
+	 * Following is userlevel condition variable which is
+	 * used for time-sharing scheduling, it is a bit fast.
+	 */
+	struct umutex	c_lock;
+	int		c_waiters;
+	int		c_signaled;
+	uint32_t	c_seq;
+	uint64_t	c_broadcast_seq;
 };
 
 struct pthread_cond_attr {
@@ -674,8 +683,10 @@ extern struct umutex	_thr_event_lock __h
 __BEGIN_DECLS
 int	_thr_setthreaded(int) __hidden;
 int	_mutex_cv_lock(pthread_mutex_t *, int count) __hidden;
+int	_mutex_cv_unlock(pthread_mutex_t *, int *count) __hidden;
 int	_mutex_cv_attach(pthread_mutex_t *, int count) __hidden;
 int	_mutex_cv_detach(pthread_mutex_t *, int *count) __hidden;
+int	_mutex_owned(struct pthread *, const pthread_mutex_t *) __hidden;
 int	_mutex_reinit(pthread_mutex_t *) __hidden;
 void	_mutex_fork(struct pthread *curthread) __hidden;
 void	_libpthread_init(struct pthread *) __hidden;


More information about the svn-src-user mailing list