svn commit: r214661 - user/davidxu/libthr/lib/libthr/thread

David Xu davidxu at FreeBSD.org
Tue Nov 2 07:49:21 UTC 2010


Author: davidxu
Date: Tue Nov  2 07:49:21 2010
New Revision: 214661
URL: http://svn.freebsd.org/changeset/base/214661

Log:
  Remove link field in pthread_mutex, because it can not be used in
  process-shared mutex, otherwise, another process can corrupt your
  mutex link list when you owned it. Instead, allocate mutex link
  entry from heap, keep it private.

Modified:
  user/davidxu/libthr/lib/libthr/thread/thr_kern.c
  user/davidxu/libthr/lib/libthr/thread/thr_mutex.c
  user/davidxu/libthr/lib/libthr/thread/thr_private.h

Modified: user/davidxu/libthr/lib/libthr/thread/thr_kern.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_kern.c	Tue Nov  2 07:46:31 2010	(r214660)
+++ user/davidxu/libthr/lib/libthr/thread/thr_kern.c	Tue Nov  2 07:49:21 2010	(r214661)
@@ -45,6 +45,8 @@
 static struct umutex addr_lock;
 static struct wake_addr *wake_addr_head;
 static struct wake_addr default_wake_addr;
+static struct umutex mutex_link_lock;
+static struct mutex_queue mutex_link_freeq;
 
 struct wake_addr *
 _thr_alloc_wake_addr(void)
@@ -89,6 +91,44 @@ _thr_release_wake_addr(struct wake_addr 
 	THR_UMUTEX_UNLOCK(curthread, &addr_lock);
 }
 
+void
+_thr_mutex_link_init(void)
+{
+	TAILQ_INIT(&mutex_link_freeq);
+	_thr_umutex_init(&mutex_link_lock);
+}
+
+struct mutex_link *
+_thr_mutex_link_alloc(void)
+{
+	struct pthread *curthread = _get_curthread();
+	struct mutex_link *p;
+
+	THR_LOCK_ACQUIRE(curthread, &mutex_link_lock);
+	p = TAILQ_FIRST(&mutex_link_freeq);
+	if (p == NULL) {
+		unsigned i;
+		unsigned pagesize = getpagesize();
+		struct mutex_link *pp = (struct mutex_link *)mmap(NULL, getpagesize(),
+			 PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
+		for (i = 1; i < pagesize/sizeof(struct mutex_link); ++i)
+			TAILQ_INSERT_TAIL(&mutex_link_freeq, &pp[i], qe);
+		p = &pp[0];
+	}
+	THR_LOCK_RELEASE(curthread, &mutex_link_lock);
+	return (p);
+}
+
+void
+_thr_mutex_link_free(struct mutex_link *p)
+{
+	struct pthread *curthread = _get_curthread();
+
+	THR_LOCK_ACQUIRE(curthread, &mutex_link_lock);
+	TAILQ_INSERT_TAIL(&mutex_link_freeq, p, qe);
+	THR_LOCK_RELEASE(curthread, &mutex_link_lock);
+}
+
 /*
  * This is called when the first thread (other than the initial
  * thread) is created.

Modified: user/davidxu/libthr/lib/libthr/thread/thr_mutex.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_mutex.c	Tue Nov  2 07:46:31 2010	(r214660)
+++ user/davidxu/libthr/lib/libthr/thread/thr_mutex.c	Tue Nov  2 07:49:21 2010	(r214661)
@@ -49,26 +49,6 @@
 #define UMUTEX_TIDMASK	(~UMUTEX_CONTESTED)
 #endif
 
-#if defined(_PTHREADS_INVARIANTS)
-#define MUTEX_INIT_LINK(m) 		do {		\
-	(m)->m_qe.tqe_prev = NULL;			\
-	(m)->m_qe.tqe_next = NULL;			\
-} while (0)
-#define MUTEX_ASSERT_IS_OWNED(m)	do {		\
-	if (__predict_false((m)->m_qe.tqe_prev == NULL))\
-		PANIC("mutex is not on list");		\
-} while (0)
-#define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
-	if (__predict_false((m)->m_qe.tqe_prev != NULL ||\
-	    (m)->m_qe.tqe_next != NULL))		\
-		PANIC("mutex is on list");		\
-} while (0)
-#else
-#define MUTEX_INIT_LINK(m)
-#define MUTEX_ASSERT_IS_OWNED(m)
-#define MUTEX_ASSERT_NOT_OWNED(m)
-#endif
-
 /*
  * For adaptive mutexes, how many times to spin doing trylock2
  * before entering the kernel to block
@@ -153,7 +133,6 @@ mutex_init(pthread_mutex_t *mutex,
 	pmutex->m_refcount = 0;
 	pmutex->m_spinloops = 0;
 	pmutex->m_yieldloops = 0;
-	MUTEX_INIT_LINK(pmutex);
 	switch(attr->m_protocol) {
 	case PTHREAD_PRIO_NONE:
 		pmutex->m_lock.m_owner = 0;
@@ -200,15 +179,53 @@ init_static(struct pthread *thread, pthr
 static void
 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
 {
-	struct pthread_mutex *m2;
+	struct mutex_link *ml2;
 
-	m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
-	if (m2 != NULL)
-		m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
+	ml2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
+	if (ml2 != NULL)
+		m->m_lock.m_ceilings[1] = ml2->mutexp->m_lock.m_ceilings[0];
 	else
 		m->m_lock.m_ceilings[1] = -1;
 }
 
+static void
+enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
+{
+	m->m_owner = curthread;
+	struct mutex_link *ml = _thr_mutex_link_alloc();
+	ml->mutexp = m;
+	if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
+		TAILQ_INSERT_TAIL(&curthread->mutexq, ml, qe);
+	else
+		TAILQ_INSERT_TAIL(&curthread->pp_mutexq, ml, qe);
+}
+
+static void
+dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
+{
+	struct mutex_link *ml;
+
+	if ((((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))	 {
+		TAILQ_FOREACH(ml, &curthread->mutexq, qe) {
+			if (ml->mutexp == m) {
+				TAILQ_REMOVE(&curthread->mutexq, ml, qe);
+				_thr_mutex_link_free(ml);
+				break;
+			}
+		}
+	} else {
+		TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe) {
+			if (ml->mutexp == m) {
+				TAILQ_REMOVE(&curthread->mutexq, ml, qe);
+				set_inherited_priority(curthread, m);
+				_thr_mutex_link_free(ml);
+				break;
+			}
+		}
+	}
+	m->m_owner = NULL;
+}
+
 int
 __pthread_mutex_init(pthread_mutex_t *mutex,
     const pthread_mutexattr_t *mutex_attr)
@@ -237,7 +254,7 @@ _pthread_mutex_init_calloc_cb(pthread_mu
 void
 _mutex_fork(struct pthread *curthread)
 {
-	struct pthread_mutex *m;
+	struct mutex_link *ml;
 
 	/*
 	 * Fix mutex ownership for child process.
@@ -248,11 +265,10 @@ _mutex_fork(struct pthread *curthread)
 	 * process shared mutex is not supported, so I
 	 * am not worried.
 	 */
-	TAILQ_FOREACH(m, &curthread->mutexq, m_qe) {
-		m->m_lock.m_owner = TID(curthread);
-	}
-	TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
-		m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
+	TAILQ_FOREACH(ml, &curthread->mutexq, qe)
+		ml->mutexp->m_lock.m_owner = TID(curthread);
+	TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe)
+		ml->mutexp->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
 }
 
 int
@@ -271,7 +287,6 @@ _pthread_mutex_destroy(pthread_mutex_t *
 			ret = EBUSY;
 		} else {
 			*mutex = THR_MUTEX_DESTROYED;
-			MUTEX_ASSERT_NOT_OWNED(m);
 			free(m);
 			ret = 0;
 		}
@@ -280,29 +295,6 @@ _pthread_mutex_destroy(pthread_mutex_t *
 	return (ret);
 }
 
-#define ENQUEUE_MUTEX(curthread, m)  					\
-	do {								\
-		(m)->m_owner = curthread;				\
-		/* Add to the list of owned mutexes: */			\
-		MUTEX_ASSERT_NOT_OWNED((m));				\
-		if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))	\
-			TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
-		else							\
-			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
-	} while (0)
-
-#define DEQUEUE_MUTEX(curthread, m)					\
-	(m)->m_owner = NULL;						\
-	/* Remove the mutex from the threads queue. */			\
-	MUTEX_ASSERT_IS_OWNED(m);					\
-	if (__predict_true(((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))	\
-		TAILQ_REMOVE(&curthread->mutexq, (m), m_qe);		\
-	else {								\
-		TAILQ_REMOVE(&curthread->pp_mutexq, (m), m_qe);		\
-		set_inherited_priority(curthread, (m));			\
-	}								\
-	MUTEX_INIT_LINK(m);
-
 #define CHECK_AND_INIT_MUTEX						\
 	if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) {	\
 		if (m == THR_MUTEX_DESTROYED)				\
@@ -354,7 +346,7 @@ mutex_trylock_common(pthread_mutex_t *mu
 		id = TID(curthread);
 		error = _thr_umutex_trylock(&m->m_lock, id);
 		if (__predict_true(error == 0)) {
-			ENQUEUE_MUTEX(curthread, m);
+			enqueue_mutex(curthread, m);
 		} else if (m->m_owner == curthread) {
 			error = mutex_self_trylock(m);
 		} /* else {} */
@@ -485,7 +477,7 @@ mutex_lock_kernel(struct pthread_mutex *
 		error = __thr_umutex_timedlock(&m->m_lock, id, abstime);
 	}
 	if (error == 0)
-		ENQUEUE_MUTEX(curthread, m);
+		enqueue_mutex(curthread, m);
 	return (error);
 }
 
@@ -502,7 +494,7 @@ _mutex_lock_common(struct pthread_mutex 
 		return mutex_lock_queued(m, abstime);
 	} else {
 		if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
-			ENQUEUE_MUTEX(curthread, m);
+			enqueue_mutex(curthread, m);
 			return (0);
 		}
 		return mutex_lock_kernel(m, abstime);
@@ -718,7 +710,7 @@ _mutex_unlock_common(struct pthread *cur
 	if (__predict_true(IS_SIMPLE_MUTEX(m)))
 		mutex_unlock_queued(curthread, m);
 	else {
-		DEQUEUE_MUTEX(curthread, m);
+		dequeue_mutex(curthread, m);
 		_thr_umutex_unlock(&m->m_lock, tid);
 	}
 	return (0);
@@ -800,7 +792,7 @@ _mutex_cv_detach(pthread_mutex_t *mutex,
 	*recurse = m->m_recurse;
 	m->m_recurse = 0;
 	m->m_refcount++;
-	DEQUEUE_MUTEX(curthread, m);
+	dequeue_mutex(curthread, m);
 	return (0);
 }
 
@@ -828,7 +820,8 @@ _pthread_mutex_setprioceiling(pthread_mu
 			      int ceiling, int *old_ceiling)
 {
 	struct pthread *curthread = _get_curthread();
-	struct pthread_mutex *m, *m1, *m2;
+	struct mutex_link *ml, *ml1, *ml2;
+	struct pthread_mutex *m;
 	int ret;
 
 	m = *mutex;
@@ -839,21 +832,25 @@ _pthread_mutex_setprioceiling(pthread_mu
 	ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
 	if (ret != 0)
 		return (ret);
-
 	if (m->m_owner == curthread) {
-		MUTEX_ASSERT_IS_OWNED(m);
-		m1 = TAILQ_PREV(m, mutex_queue, m_qe);
-		m2 = TAILQ_NEXT(m, m_qe);
-		if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
-		    (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
-			TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe);
-			TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) {
-				if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
-					TAILQ_INSERT_BEFORE(m2, m, m_qe);
+		TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe) {
+			if (ml->mutexp == m)
+				break;
+		}
+		if (ml == NULL) /* howto ? */
+			return (0);
+		ml1 = TAILQ_PREV(ml, mutex_queue, qe);
+		ml2 = TAILQ_NEXT(ml, qe);
+		if ((ml1 != NULL && ml1->mutexp->m_lock.m_ceilings[0] > (u_int)ceiling) ||
+		    (ml2 != NULL && ml2->mutexp->m_lock.m_ceilings[0] < (u_int)ceiling)) {
+			TAILQ_REMOVE(&curthread->pp_mutexq, ml, qe);
+			TAILQ_FOREACH(ml2, &curthread->pp_mutexq, qe) {
+				if (ml2->mutexp->m_lock.m_ceilings[0] > (u_int)ceiling) {
+					TAILQ_INSERT_BEFORE(ml2, ml, qe);
 					return (0);
 				}
 			}
-			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
+			TAILQ_INSERT_TAIL(&curthread->pp_mutexq, ml, qe);
 		}
 	}
 	return (0);

Modified: user/davidxu/libthr/lib/libthr/thread/thr_private.h
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_private.h	Tue Nov  2 07:46:31 2010	(r214660)
+++ user/davidxu/libthr/lib/libthr/thread/thr_private.h	Tue Nov  2 07:49:21 2010	(r214661)
@@ -77,7 +77,13 @@
 
 typedef TAILQ_HEAD(pthreadlist, pthread) pthreadlist;
 typedef TAILQ_HEAD(atfork_head, pthread_atfork) atfork_head;
-TAILQ_HEAD(mutex_queue, pthread_mutex);
+
+struct mutex_link {
+	TAILQ_ENTRY(mutex_link)	qe;
+	struct pthread_mutex	*mutexp;
+};
+
+TAILQ_HEAD(mutex_queue, mutex_link);
 
 /* Signal to do cancellation */
 #define	SIGCANCEL		32
@@ -161,10 +167,6 @@ struct pthread_mutex {
 	int				m_spinloops;
 	int				m_yieldloops;
 	int				m_private;
-	/*
-	 * Link for all mutexes a thread currently owns.
-	 */
-	TAILQ_ENTRY(pthread_mutex)	m_qe;
 };
 
 
@@ -845,6 +847,9 @@ void _thr_sigact_unload(struct dl_phdr_i
 struct wake_addr *_thr_alloc_wake_addr(void);
 void	_thr_release_wake_addr(struct wake_addr *);
 int	_thr_sleep(struct pthread *, const struct timespec *, int);
+void	_thr_mutex_link_init(void);
+struct mutex_link	*_thr_mutex_link_alloc(void);
+void	_thr_mutex_link_free(struct mutex_link *);
 
 void			_sleepq_init(void);
 struct sleepqueue *	_sleepq_alloc(void);


More information about the svn-src-user mailing list