PERFORCE change 69096 for review

David Xu davidxu at FreeBSD.org
Sat Jan 15 16:59:57 PST 2005


http://perforce.freebsd.org/chv.cgi?CH=69096

Change 69096 by davidxu at davidxu_tiger on 2005/01/16 00:59:51

	Revert some code, some tests show using a static lock is better.

Affected files ...

.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_cond.c#11 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_mutex.c#15 edit
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_rwlock.c#4 edit

Differences ...

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_cond.c#11 (text+ko) ====

@@ -73,13 +73,29 @@
 		pcond->c_waiters = 0;
 		pcond->c_wakeups = 0;
 		pcond->c_flags = 0;
-		if (!atomic_cmpset_acq_ptr(cond, NULL, pcond))
-			free(pcond);
+		*cond = pcond;
 	}
 	/* Return the completion status: */
 	return (rval);
 }
 
+static int
+init_static(struct pthread *thread, pthread_cond_t *cond)
+{
+	int ret;
+
+	THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
+
+	if (*cond == NULL)
+		ret = cond_init(cond, NULL);
+	else
+		ret = 0;
+
+	THR_LOCK_RELEASE(thread, &_cond_static_lock);
+
+	return (ret);
+}
+
 int
 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
 {
@@ -98,9 +114,9 @@
 		rval = EINVAL;
 	else {
 		/* Lock the condition variable structure: */
-		THR_UMTX_LOCK(curthread, &(*cond)->c_lock);
+		THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
 		if ((*cond)->c_waiters + (*cond)->c_wakeups != 0) {
-			THR_UMTX_UNLOCK(curthread, &(*cond)->c_lock);
+			THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
 			return (EBUSY);
 		}
 
@@ -112,7 +128,7 @@
 		*cond = NULL;
 
 		/* Unlock the condition variable structure: */
-		THR_UMTX_UNLOCK(curthread, &cv->c_lock);
+		THR_LOCK_RELEASE(curthread, &cv->c_lock);
 
 		/* Free the cond lock structure: */
 
@@ -142,7 +158,7 @@
 	pthread_cond_t cv;
 
 	cv = *(cci->cond);
-	THR_UMTX_LOCK(curthread, &cv->c_lock);
+	THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
 	if (cv->c_seqno != cci->seqno && cv->c_wakeups != 0) {
 		if (cv->c_waiters > 0) {
 			cv->c_seqno++;
@@ -152,7 +168,7 @@
 	} else {
 		cv->c_waiters--;
 	}
-	THR_UMTX_UNLOCK(curthread, &cv->c_lock);
+	THR_LOCK_RELEASE(curthread, &cv->c_lock);
 
 	_mutex_cv_lock(cci->mutex);
 }
@@ -173,14 +189,14 @@
 	 * perform the dynamic initialization:
 	 */
 	if (__predict_false(*cond == NULL &&
-	    (ret = cond_init(cond, NULL)) != 0))
+	    (ret = init_static(curthread, cond)) != 0))
 		return (ret);
 
 	cv = *cond;
-	THR_UMTX_LOCK(curthread, &cv->c_lock);
+	THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
 	ret = _mutex_cv_unlock(mutex);
 	if (ret) {
-		THR_UMTX_UNLOCK(curthread, &cv->c_lock);
+		THR_LOCK_RELEASE(curthread, &cv->c_lock);
 		return (ret);
 	}
 	oldseq = seq = cv->c_seqno;
@@ -192,18 +208,18 @@
 	do {
 		if (cancel) {
 			THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &cci);
-			THR_UMTX_UNLOCK(curthread, &cv->c_lock);
+			THR_LOCK_RELEASE(curthread, &cv->c_lock);
 			oldcancel = _thr_cancel_enter(curthread);
 			ret = umtx_timedwait((struct umtx *)&cv->c_seqno,
 				seq, abstime);
 			_thr_cancel_leave(curthread, oldcancel);
 			THR_CLEANUP_POP(curthread, 0);
 		} else {
-			THR_UMTX_UNLOCK(curthread, &cv->c_lock);
+			THR_LOCK_RELEASE(curthread, &cv->c_lock);
 			ret = umtx_timedwait((struct umtx *)&cv->c_seqno,
 				seq, abstime);
 		}
-		THR_UMTX_LOCK(curthread, &cv->c_lock);
+		THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
 		seq = cv->c_seqno;
 		if (abstime != NULL && ret != 0) {
 			if (ret == EINTR)
@@ -222,7 +238,7 @@
 	} else {
 		cv->c_waiters--;
 	}
-	THR_UMTX_UNLOCK(curthread, &cv->c_lock);
+	THR_LOCK_RELEASE(curthread, &cv->c_lock);
 	_mutex_cv_lock(mutex);
 	return (ret);
 }
@@ -280,12 +296,12 @@
 	 * initialization.
 	 */
 	if (__predict_false(*cond == NULL &&
-	    (ret = cond_init(cond, NULL)) != 0))
+	    (ret = init_static(curthread, cond)) != 0))
 		return (ret);
 
 	cv = *cond;
 	/* Lock the condition variable structure. */
-	THR_UMTX_LOCK(curthread, &cv->c_lock);
+	THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
 	if (cv->c_waiters) {
 		if (!broadcast) {
 			cv->c_wakeups++;
@@ -299,7 +315,7 @@
 			umtx_wake((struct umtx *)&cv->c_seqno, INT_MAX);
 		}
 	}
-	THR_UMTX_UNLOCK(curthread, &cv->c_lock);
+	THR_LOCK_RELEASE(curthread, &cv->c_lock);
 	return (ret);
 }
 

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_mutex.c#15 (text+ko) ====

@@ -84,7 +84,6 @@
 static inline pthread_t	mutex_queue_deq(pthread_mutex_t);
 static inline void	mutex_queue_remove(pthread_mutex_t, pthread_t);
 static inline void	mutex_queue_enq(pthread_mutex_t, pthread_t);
-static void		mutex_lock_backout(void *arg);
 
 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
@@ -182,8 +181,7 @@
 					pmutex->m_prio = -1;
 				pmutex->m_saved_prio = 0;
 				MUTEX_INIT_LINK(pmutex);
-				if (!atomic_cmpset_acq_ptr(mutex, NULL, pmutex))
-					MUTEX_DESTROY(pmutex);
+				*mutex = pmutex;
 			} else {
 				/* Free the mutex lock structure: */
 				MUTEX_DESTROY(pmutex);
@@ -195,11 +193,44 @@
 	return (ret);
 }
 
+static int
+init_static(struct pthread *thread, pthread_mutex_t *mutex)
+{
+	int ret;
+
+	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
+
+	if (*mutex == NULL)
+		ret = mutex_init(mutex, NULL, 0);
+	else
+		ret = 0;
+
+	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
+
+	return (ret);
+}
+
+static int
+init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
+{
+	int ret;
+
+	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
+
+	if (*mutex == NULL)
+		ret = mutex_init(mutex, NULL, 1);
+	else
+		ret = 0;
+
+	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
+
+	return (ret);
+}
+
 int
 _pthread_mutex_init(pthread_mutex_t *mutex,
     const pthread_mutexattr_t *mutex_attr)
 {
-	*mutex = NULL;
 	return mutex_init(mutex, mutex_attr, 1);
 }
 
@@ -207,11 +238,9 @@
 __pthread_mutex_init(pthread_mutex_t *mutex,
     const pthread_mutexattr_t *mutex_attr)
 {
-	*mutex = NULL;
 	return mutex_init(mutex, mutex_attr, 0);
 }
 
-/* used to reinitialize mutex after fork() */
 int
 _mutex_reinit(pthread_mutex_t *mutex)
 {
@@ -302,7 +331,7 @@
 	int ret = 0;
 
 	THR_ASSERT((mutex != NULL) && (*mutex != NULL),
-	    "Uninitialized mutex in pthread_mutex_trylock");
+	    "Uninitialized mutex in mutex_trylock_common");
 
 	/* Short cut for simple mutex. */
 	if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
@@ -426,15 +455,12 @@
 	struct pthread *curthread = _get_curthread();
 	int ret = 0;
 
-	if (mutex == NULL)
-		ret = EINVAL;
-
 	/*
 	 * If the mutex is statically initialized, perform the dynamic
 	 * initialization:
 	 */
-	else if ((*mutex != NULL) ||
-	    ((ret = mutex_init(mutex, NULL, 0)) == 0))
+	if ((*mutex != NULL) ||
+	    ((ret = init_static(curthread, mutex)) == 0))
 		ret = mutex_trylock_common(curthread, mutex);
 
 	return (ret);
@@ -446,15 +472,12 @@
 	struct pthread	*curthread = _get_curthread();
 	int	ret = 0;
 
-	if (mutex == NULL)
-		ret = EINVAL;
-
 	/*
 	 * If the mutex is statically initialized, perform the dynamic
 	 * initialization marking the mutex private (delete safe):
 	 */
-	else if ((*mutex != NULL) ||
-	    ((ret = mutex_init(mutex, NULL, 1)) == 0))
+	if ((*mutex != NULL) ||
+	    ((ret = init_static_private(curthread, mutex)) == 0))
 		ret = mutex_trylock_common(curthread, mutex);
 
 	return (ret);
@@ -470,6 +493,10 @@
 	THR_ASSERT((m != NULL) && (*m != NULL),
 	    "Uninitialized mutex in mutex_lock_common");
 
+	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+	    abstime->tv_nsec >= 1000000000))
+		return (EINVAL);
+
 	/* Short cut for simple mutex. */
 
 	if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
@@ -510,10 +537,6 @@
 
 	/* Code for priority mutex */
 
-	if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
-	    abstime->tv_nsec >= 1000000000))
-		return (EINVAL);
-
 	/*
 	 * Enter a loop waiting to become the mutex owner.  We need a
 	 * loop in case the waiting thread is interrupted by a signal
@@ -580,7 +603,6 @@
 				 */
 				mutex_queue_enq(*m, curthread);
 				curthread->data.mutex = *m;
-				curthread->sigbackout = mutex_lock_backout;
 
 				if (curthread->active_priority > (*m)->m_prio)
 					/* Adjust priorities: */
@@ -612,7 +634,6 @@
 				 * thread is dequeued.
 				 */
 				curthread->data.mutex = NULL;
-				curthread->sigbackout = NULL;
 			}
 			break;
 
@@ -668,7 +689,6 @@
 				 */
 				mutex_queue_enq(*m, curthread);
 				curthread->data.mutex = *m;
-				curthread->sigbackout = mutex_lock_backout;
 
 				/* Clear any previous error: */
 				curthread->error = 0;
@@ -700,7 +720,6 @@
 				 * thread is dequeued.
 				 */
 				curthread->data.mutex = NULL;
-				curthread->sigbackout = NULL;
 
 				/*
 				 * The threads priority may have changed while
@@ -737,14 +756,12 @@
 	_thr_check_init();
 
 	curthread = _get_curthread();
-	if (m == NULL)
-		ret = EINVAL;
 
 	/*
 	 * If the mutex is statically initialized, perform the dynamic
 	 * initialization:
 	 */
-	else if ((*m != NULL) || ((ret = mutex_init(m, NULL, 0)) == 0))
+	if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
 		ret = mutex_lock_common(curthread, m, NULL);
 
 	return (ret);
@@ -762,15 +779,12 @@
 
 	curthread = _get_curthread();
 
-	if (m == NULL)
-		ret = EINVAL;
-
 	/*
 	 * If the mutex is statically initialized, perform the dynamic
 	 * initialization marking it private (delete safe):
 	 */
-	else if ((*m != NULL) ||
-	    ((ret = mutex_init(m, NULL, 1)) == 0))
+	if ((*m != NULL) ||
+	    ((ret = init_static_private(curthread, m)) == 0))
 		ret = mutex_lock_common(curthread, m, NULL);
 
 	return (ret);
@@ -786,14 +800,12 @@
 	_thr_check_init();
 
 	curthread = _get_curthread();
-	if (m == NULL)
-		ret = EINVAL;
 
 	/*
 	 * If the mutex is statically initialized, perform the dynamic
 	 * initialization:
 	 */
-	else if ((*m != NULL) || ((ret = mutex_init(m, NULL, 0)) == 0))
+	if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
 		ret = mutex_lock_common(curthread, m, abs_timeout);
 
 	return (ret);
@@ -810,15 +822,12 @@
 
 	curthread = _get_curthread();
 
-	if (m == NULL)
-		ret = EINVAL;
-
 	/*
 	 * If the mutex is statically initialized, perform the dynamic
 	 * initialization marking it private (delete safe):
 	 */
-	else if ((*m != NULL) ||
-	    ((ret = mutex_init(m, NULL, 1)) == 0))
+	if ((*m != NULL) ||
+	    ((ret = init_static_private(curthread, m)) == 0))
 		ret = mutex_lock_common(curthread, m, abs_timeout);
 
 	return (ret);
@@ -889,11 +898,18 @@
 	switch (m->m_type) {
 	/* case PTHREAD_MUTEX_DEFAULT: */
 	case PTHREAD_MUTEX_ERRORCHECK:
-		/*
-		 * POSIX specifies that mutexes should return EDEADLK if a
-		 * recursive lock is detected.
-		 */
-		ret = EDEADLK; 
+		if (abstime) {
+			clock_gettime(CLOCK_REALTIME, &ts1);
+			TIMESPEC_SUB(&ts2, abstime, &ts1);
+			__sys_nanosleep(&ts2, NULL);
+			ret = ETIMEDOUT;
+		} else {
+			/*
+			 * POSIX specifies that mutexes should return EDEADLK if a
+			 * recursive lock is detected.
+			 */
+			ret = EDEADLK; 
+		}
 		break;
 
 	case PTHREAD_MUTEX_NORMAL:
@@ -970,6 +986,11 @@
 				 */
 				(*m)->m_count = 0;
 				(*m)->m_owner = NULL;
+				/*
+				 * XXX there should be a separated list
+				 * for owned mutex, separated it from
+				 * priority mutex list
+				 */
 				/* Remove the mutex from the threads queue. */
 				MUTEX_ASSERT_IS_OWNED(*m);
 				TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
@@ -1476,58 +1497,6 @@
 }
 
 /*
- * This is called by the current thread when it wants to back out of a
- * mutex_lock in order to run a signal handler.
- */
-static void
-mutex_lock_backout(void *arg)
-{
-	struct pthread *curthread = (struct pthread *)arg;
-	struct pthread_mutex *m;
-
-	if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
-		/*
-		 * Any other thread may clear the "in sync queue flag",
-		 * but only the current thread can clear the pointer
-		 * to the mutex.  So if the flag is set, we can
-		 * guarantee that the pointer to the mutex is valid.
-		 * The only problem may be if the mutex is destroyed
-		 * out from under us, but that should be considered
-		 * an application bug.
-		 */
-		m = curthread->data.mutex;
-
-		/* Lock the mutex structure: */
-		THR_LOCK_ACQUIRE(curthread, &m->m_lock);
-		curthread->data.mutex = NULL;
-
-		/*
-		 * Check to make sure this thread doesn't already own
-		 * the mutex.  Since mutexes are unlocked with direct
-		 * handoffs, it is possible the previous owner gave it
-		 * to us after we checked the sync queue flag and before
-		 * we locked the mutex structure.
-		 */
-		if (m->m_owner == curthread) {
-			THR_LOCK_RELEASE(curthread, &m->m_lock);
-			mutex_unlock_common(&m, /* add_reference */ 0);
-		} else {
-			/*
-			 * Remove ourselves from the mutex queue and
-			 * clear the pointer to the mutex.  We may no
-			 * longer be in the mutex queue, but the removal
-			 * function will DTRT.
-			 */
-			mutex_queue_remove(m, curthread);
-			curthread->data.mutex = NULL;
-			THR_LOCK_RELEASE(curthread, &m->m_lock);
-		}
-	}
-	/* No need to call this again. */
-	curthread->sigbackout = NULL;
-}
-
-/*
  * Dequeue a waiting thread from the head of a mutex queue in descending
  * priority order.
  *

==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_rwlock.c#4 (text+ko) ====

@@ -86,12 +86,7 @@
 				/* success */
 				prwlock->state = 0;
 				prwlock->blocked_writers = 0;
-				if (!atomic_cmpset_acq_ptr(rwlock, NULL, prwlock)) {
-					/* we lost a race, it was already initialized */
-					_pthread_cond_destroy(&prwlock->read_signal);
-					_pthread_mutex_destroy(&prwlock->lock);
-					free(prwlock);
-				}
+				*rwlock = prwlock;
 			}
 		}
 	}
@@ -123,6 +118,23 @@
 	return (ret);
 }
 
+static int
+init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
+{
+	int ret;
+
+	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
+
+	if (*rwlock == NULL)
+		ret = rwlock_init(rwlock, NULL);
+	else
+		ret = 0;
+
+	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
+
+	return (ret);
+}
+
 int
 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
 {
@@ -133,8 +145,8 @@
 static int
 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
 {
+	struct pthread *curthread = _get_curthread();
 	pthread_rwlock_t prwlock;
-	struct pthread *curthread;
 	int ret;
 
 	if (rwlock == NULL)
@@ -144,7 +156,7 @@
 
 	/* check for static initialization */
 	if (prwlock == NULL) {
-		if ((ret = rwlock_init(rwlock, NULL)) != 0)
+		if ((ret = init_static(curthread, rwlock)) != 0)
 			return (ret);
 
 		prwlock = *rwlock;
@@ -225,7 +237,7 @@
 int
 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
 {
-	struct pthread *curthread;
+	struct pthread *curthread = _get_curthread();
 	pthread_rwlock_t prwlock;
 	int ret;
 
@@ -236,7 +248,7 @@
 
 	/* check for static initialization */
 	if (prwlock == NULL) {
-		if ((ret = rwlock_init(rwlock, NULL)) != 0)
+		if ((ret = init_static(curthread, rwlock)) != 0)
 			return (ret);
 
 		prwlock = *rwlock;
@@ -271,6 +283,7 @@
 int
 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
 {
+	struct pthread *curthread = _get_curthread();
 	pthread_rwlock_t prwlock;
 	int ret;
 
@@ -281,7 +294,7 @@
 
 	/* check for static initialization */
 	if (prwlock == NULL) {
-		if ((ret = rwlock_init(rwlock, NULL)) != 0)
+		if ((ret = init_static(curthread, rwlock)) != 0)
 			return (ret);
 
 		prwlock = *rwlock;
@@ -349,6 +362,7 @@
 static int
 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
 {
+	struct pthread *curthread = _get_curthread();
 	pthread_rwlock_t prwlock;
 	int ret;
 
@@ -359,7 +373,7 @@
 
 	/* check for static initialization */
 	if (prwlock == NULL) {
-		if ((ret = rwlock_init(rwlock, NULL)) != 0)
+		if ((ret = init_static(curthread, rwlock)) != 0)
 			return (ret);
 
 		prwlock = *rwlock;


More information about the p4-projects mailing list