svn commit: r214768 - in user/davidxu/libthr/lib:
libthr.user_requeue libthr/thread
David Xu
davidxu at FreeBSD.org
Thu Nov 4 02:09:36 UTC 2010
Author: davidxu
Date: Thu Nov 4 02:09:36 2010
New Revision: 214768
URL: http://svn.freebsd.org/changeset/base/214768
Log:
Save previous work in libthr.user_requeue.
Follow the change in kernel, use kernel's requeue function.
Added:
user/davidxu/libthr/lib/libthr.user_requeue/
- copied from r214766, user/davidxu/libthr/lib/libthr/
Modified:
user/davidxu/libthr/lib/libthr/thread/Makefile.inc
user/davidxu/libthr/lib/libthr/thread/thr_barrier.c
user/davidxu/libthr/lib/libthr/thread/thr_cond.c
user/davidxu/libthr/lib/libthr/thread/thr_init.c
user/davidxu/libthr/lib/libthr/thread/thr_kern.c
user/davidxu/libthr/lib/libthr/thread/thr_list.c
user/davidxu/libthr/lib/libthr/thread/thr_mutex.c
user/davidxu/libthr/lib/libthr/thread/thr_private.h
user/davidxu/libthr/lib/libthr/thread/thr_umtx.c
user/davidxu/libthr/lib/libthr/thread/thr_umtx.h
Modified: user/davidxu/libthr/lib/libthr/thread/Makefile.inc
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/Makefile.inc Thu Nov 4 02:03:26 2010 (r214767)
+++ user/davidxu/libthr/lib/libthr/thread/Makefile.inc Thu Nov 4 02:09:36 2010 (r214768)
@@ -45,7 +45,6 @@ SRCS+= \
thr_setschedparam.c \
thr_sig.c \
thr_single_np.c \
- thr_sleepq.c \
thr_spec.c \
thr_spinlock.c \
thr_stack.c \
Modified: user/davidxu/libthr/lib/libthr/thread/thr_barrier.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_barrier.c Thu Nov 4 02:03:26 2010 (r214767)
+++ user/davidxu/libthr/lib/libthr/thread/thr_barrier.c Thu Nov 4 02:09:36 2010 (r214768)
@@ -102,7 +102,7 @@ _pthread_barrier_wait(pthread_barrier_t
} else {
cycle = bar->b_cycle;
do {
- _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, 0);
+ _thr_ucond_wait(&bar->b_cv, &bar->b_lock, NULL, CVWAIT_BIND_MUTEX);
THR_UMUTEX_LOCK(curthread, &bar->b_lock);
/* test cycle to avoid bogus wakeup */
} while (cycle == bar->b_cycle);
Modified: user/davidxu/libthr/lib/libthr/thread/thr_cond.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_cond.c Thu Nov 4 02:03:26 2010 (r214767)
+++ user/davidxu/libthr/lib/libthr/thread/thr_cond.c Thu Nov 4 02:09:36 2010 (r214768)
@@ -45,10 +45,7 @@ int __pthread_cond_timedwait(pthread_con
static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime, int cancel);
-static int cond_signal_common(pthread_cond_t *cond);
-static int cond_broadcast_common(pthread_cond_t *cond);
-
-#define CV_PSHARED(cv) (((cv)->c_kerncv.c_flags & USYNC_PROCESS_SHARED) != 0)
+static int cond_signal_common(pthread_cond_t *cond, int broadcast);
/*
* Double underscore versions are cancellation points. Single underscore
@@ -77,10 +74,10 @@ cond_init(pthread_cond_t *cond, const pt
* Initialise the condition variable structure:
*/
if (cond_attr == NULL || *cond_attr == NULL) {
+ pcond->c_pshared = 0;
pcond->c_clockid = CLOCK_REALTIME;
} else {
- if ((*cond_attr)->c_pshared)
- pcond->c_kerncv.c_flags |= USYNC_PROCESS_SHARED;
+ pcond->c_pshared = (*cond_attr)->c_pshared;
pcond->c_clockid = (*cond_attr)->c_clockid;
}
*cond = pcond;
@@ -139,10 +136,9 @@ _pthread_cond_destroy(pthread_cond_t *co
rval = EINVAL;
else {
cv = *cond;
- if (cv->c_mutex != NULL)
- return (EBUSY);
_thr_ucond_broadcast(&cv->c_kerncv);
*cond = THR_COND_DESTROYED;
+
/*
* Free the memory allocated for the condition
* variable structure:
@@ -152,43 +148,36 @@ _pthread_cond_destroy(pthread_cond_t *co
return (rval);
}
-struct cond_cancel_info
-{
- pthread_mutex_t *mutex;
- pthread_cond_t *cond;
- int recurse;
-};
-
-static void
-cond_cancel_handler(void *arg)
-{
- struct cond_cancel_info *info = (struct cond_cancel_info *)arg;
-
- _mutex_cv_lock(info->mutex, info->recurse, 1);
-}
-
/*
- * Wait on kernel based condition variable.
+ * Cancellation behaivor:
+ * Thread may be canceled at start, if thread is canceled, it means it
+ * did not get a wakeup from pthread_cond_signal(), otherwise, it is
+ * not canceled.
+ * Thread cancellation never cause wakeup from pthread_cond_signal()
+ * to be lost.
*/
static int
-cond_wait_kernel(pthread_cond_t *cond, pthread_mutex_t *mutex,
+cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime, int cancel)
{
struct pthread *curthread = _get_curthread();
struct timespec ts, ts2, *tsp;
- struct pthread_mutex *m;
- struct cond_cancel_info info;
pthread_cond_t cv;
- int error, error2;
+ struct pthread_mutex *m;
+ int recurse;
+ int ret;
+
+ /*
+ * If the condition variable is statically initialized,
+ * perform the dynamic initialization:
+ */
+ CHECK_AND_INIT_COND
cv = *cond;
+ ret = _mutex_cv_detach(mutex, &recurse);
+ if (__predict_false(ret != 0))
+ return (ret);
m = *mutex;
- error = _mutex_cv_detach(mutex, &info.recurse);
- if (__predict_false(error != 0))
- return (error);
-
- info.mutex = mutex;
- info.cond = cond;
if (abstime != NULL) {
clock_gettime(cv->c_clockid, &ts);
@@ -198,131 +187,23 @@ cond_wait_kernel(pthread_cond_t *cond, p
tsp = NULL;
if (cancel) {
- THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info);
_thr_cancel_enter2(curthread, 0);
- error = _thr_ucond_wait(&cv->c_kerncv, &m->m_lock, tsp, 1);
- info.cond = NULL;
- _thr_cancel_leave(curthread, (error != 0));
- THR_CLEANUP_POP(curthread, 0);
+ ret = _thr_ucond_wait(&cv->c_kerncv, &m->m_lock, tsp, CVWAIT_BIND_MUTEX);
+ _thr_cancel_leave(curthread, 0);
} else {
- error = _thr_ucond_wait(&cv->c_kerncv, &m->m_lock, tsp, 0);
- }
- if (error == EINTR)
- error = 0;
- error2 = _mutex_cv_lock(mutex, info.recurse, 1);
- return (error || error2);
-}
-
-/*
- * Cancellation behaivor:
- * Thread may be canceled at start, if thread is canceled, it means it
- * did not get a wakeup from pthread_cond_signal(), otherwise, it is
- * not canceled.
- * Thread cancellation never cause wakeup from pthread_cond_signal()
- * to be lost.
- */
-static int
-cond_wait_queue(pthread_cond_t *cond, pthread_mutex_t *mutex,
- const struct timespec *abstime, int cancel)
-{
- struct pthread *curthread = _get_curthread();
- struct pthread_mutex *m;
- struct sleepqueue *sq;
- pthread_cond_t cv;
- int recurse;
- int error;
-
- cv = *cond;
- /*
- * Enqueue thread before unlocking mutex, so we can avoid
- * sleep lock in pthread_cond_signal whenever possible.
- */
- if ((error = _mutex_owned(curthread, mutex)) != 0)
- return (error);
- sq = _sleepq_lock(cv, CV);
- if (cv->c_mutex != NULL && cv->c_mutex != mutex) {
- _sleepq_unlock(sq);
- return (EINVAL);
- }
- cv->c_mutex = mutex;
- _sleepq_add(sq, curthread);
- _thr_clear_wake(curthread);
- _sleepq_unlock(sq);
- (void)_mutex_cv_unlock(mutex, &recurse);
- m = *mutex;
- for (;;) {
- if (cancel) {
- _thr_cancel_enter2(curthread, 0);
- error = _thr_sleep(curthread, abstime, cv->c_clockid);
- _thr_cancel_leave(curthread, 0);
- } else {
- error = _thr_sleep(curthread, abstime, cv->c_clockid);
- }
- _thr_clear_wake(curthread);
-
- sq = _sleepq_lock(cv, CV);
- if (curthread->wchan == NULL) {
- /*
- * This must be signaled by mutex unlocking,
- * they remove us from mutex queue.
- */
- _sleepq_unlock(sq);
- error = 0;
- break;
- } if (curthread->wchan == m) {
- _sleepq_unlock(sq);
- /*
- * This must be signaled by cond_signal and there
- * is no owner for the mutex.
- */
- sq = _sleepq_lock(m, MX);
- if (curthread->wchan == m)
- _sleepq_remove(sq, curthread);
- _sleepq_unlock(sq);
- error = 0;
- break;
- } if (abstime != NULL && error == ETIMEDOUT) {
- _sleepq_remove(sq, curthread);
- if (_sleepq_empty(sq))
- cv->c_mutex = NULL;
- _sleepq_unlock(sq);
- break;
- } else if (SHOULD_CANCEL(curthread)) {
- _sleepq_remove(sq, curthread);
- if (_sleepq_empty(sq))
- cv->c_mutex = NULL;
- _sleepq_unlock(sq);
- (void)_mutex_cv_lock(mutex, recurse, 0);
- _pthread_exit(PTHREAD_CANCELED);
- }
- _sleepq_unlock(sq);
+ ret = _thr_ucond_wait(&cv->c_kerncv, &m->m_lock, tsp, CVWAIT_BIND_MUTEX);
}
- _mutex_cv_lock(mutex, recurse, 0);
- return (error);
-}
-
-static int
-cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
- const struct timespec *abstime, int cancel)
-{
- pthread_cond_t cv;
- struct pthread_mutex *m;
-
- /*
- * If the condition variable is statically initialized,
- * perform the dynamic initialization:
- */
- CHECK_AND_INIT_COND
- if ((m = *mutex) == NULL || m < THR_MUTEX_DESTROYED)
- return (EINVAL);
- if (IS_SIMPLE_MUTEX(m)) {
- if (!CV_PSHARED(cv))
- return cond_wait_queue(cond, mutex, abstime, cancel);
- else
- return (EINVAL);
- } else {
- return cond_wait_kernel(cond, mutex, abstime, cancel);
+ if (ret == EINTR)
+ ret = 0;
+ if (ret == 0 || ret == ETIMEDOUT)
+ return _mutex_cv_lock(mutex, recurse);
+ else {
+ /* We know that it didn't unlock the mutex. */
+ _mutex_cv_attach(mutex, recurse);
+ if (cancel)
+ _thr_testcancel(curthread);
}
+ return (ret);
}
int
@@ -364,74 +245,10 @@ __pthread_cond_timedwait(pthread_cond_t
}
static int
-cond_signal_common(pthread_cond_t *cond)
+cond_signal_common(pthread_cond_t *cond, int broadcast)
{
- pthread_mutex_t *mutex;
- struct pthread_mutex *m;
- struct pthread *td;
- struct pthread_cond *cv;
- struct sleepqueue *cv_sq, *mx_sq;
- unsigned *waddr = NULL;
-
- /*
- * If the condition variable is statically initialized, perform dynamic
- * initialization.
- */
- CHECK_AND_INIT_COND
-
- _thr_ucond_signal(&cv->c_kerncv);
-
- if (CV_PSHARED(cv))
- return (0);
-
- /* There is no waiter. */
- if (cv->c_mutex == NULL)
- return (0);
-
- cv_sq = _sleepq_lock(cv, CV);
- if (_sleepq_empty(cv_sq)) {
- _sleepq_unlock(cv_sq);
- return (0);
- }
- /*
- * Check if we owned the temporarily binding mutex,
- * if owned, we can migrate thread to mutex wait
- * queue without waking up thread.
- */
- if ((mutex = cv->c_mutex) != NULL)
- m = *mutex;
- else {
- _sleepq_unlock(cv_sq);
- PANIC("mutex == NULL");
- }
-
- td = _sleepq_first(cv_sq);
- if (m->m_owner == NULL)
- waddr = WAKE_ADDR(td);
- _sleepq_remove(cv_sq, td);
- mx_sq = _sleepq_lock(m, MX);
- _sleepq_add(mx_sq, td);
- _mutex_set_contested(m);
- _sleepq_unlock(mx_sq);
- if (_sleepq_empty(cv_sq))
- cv->c_mutex = NULL;
- _sleepq_unlock(cv_sq);
- if (waddr != NULL) {
- _thr_set_wake(waddr);
- _thr_umtx_wake(waddr, INT_MAX, 0);
- }
- return (0);
-}
-
-static int
-cond_broadcast_common(pthread_cond_t *cond)
-{
- pthread_mutex_t *mutex;
- struct pthread_mutex *m;
- struct pthread *td;
- struct pthread_cond *cv;
- struct sleepqueue *cv_sq, *mx_sq;
- unsigned *waddr = NULL;
+ pthread_cond_t cv;
+ int ret = 0;
/*
* If the condition variable is statically initialized, perform dynamic
@@ -439,58 +256,23 @@ cond_broadcast_common(pthread_cond_t *co
*/
CHECK_AND_INIT_COND
- _thr_ucond_broadcast(&cv->c_kerncv);
-
- if (CV_PSHARED(cv))
- return (0);
-
- /* There is no waiter. */
- if (cv->c_mutex == NULL)
- return (0);
-
- cv_sq = _sleepq_lock(cv, CV);
- if (_sleepq_empty(cv_sq)) {
- _sleepq_unlock(cv_sq);
- return (0);
- }
- /*
- * Check if we owned the temporarily binding mutex,
- * if owned, we can migrate thread to mutex wait
- * queue without waking up thread.
- */
- if ((mutex = cv->c_mutex) != NULL)
- m = *mutex;
- else {
- _sleepq_unlock(cv_sq);
- PANIC("mutex == NULL");
- }
-
- td = _sleepq_first(cv_sq);
- if (m->m_owner == NULL)
- waddr = WAKE_ADDR(td);
- mx_sq = _sleepq_lock(m, MX);
- _sleepq_concat(mx_sq, cv_sq);
- _mutex_set_contested(m);
- _sleepq_unlock(mx_sq);
- cv->c_mutex = NULL;
- _sleepq_unlock(cv_sq);
- if (waddr != NULL) {
- _thr_set_wake(waddr);
- _thr_umtx_wake(waddr, INT_MAX, 0);
- }
- return (0);
+ if (!broadcast)
+ ret = _thr_ucond_signal(&cv->c_kerncv);
+ else
+ ret = _thr_ucond_broadcast(&cv->c_kerncv);
+ return (ret);
}
int
_pthread_cond_signal(pthread_cond_t * cond)
{
- return (cond_signal_common(cond));
+ return (cond_signal_common(cond, 0));
}
int
_pthread_cond_broadcast(pthread_cond_t * cond)
{
- return (cond_broadcast_common(cond));
+ return (cond_signal_common(cond, 1));
}
Modified: user/davidxu/libthr/lib/libthr/thread/thr_init.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_init.c Thu Nov 4 02:03:26 2010 (r214767)
+++ user/davidxu/libthr/lib/libthr/thread/thr_init.c Thu Nov 4 02:09:36 2010 (r214768)
@@ -444,7 +444,6 @@ init_private(void)
_thr_once_init();
_thr_spinlock_init();
_thr_list_init();
- _sleepq_init();
/*
* Avoid reinitializing some things if they don't need to be,
Modified: user/davidxu/libthr/lib/libthr/thread/thr_kern.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_kern.c Thu Nov 4 02:03:26 2010 (r214767)
+++ user/davidxu/libthr/lib/libthr/thread/thr_kern.c Thu Nov 4 02:09:36 2010 (r214768)
@@ -31,7 +31,6 @@
#include <sys/signalvar.h>
#include <sys/rtprio.h>
#include <pthread.h>
-#include <sys/mman.h>
#include "thr_private.h"
@@ -42,93 +41,6 @@
#define DBG_MSG(x...)
#endif
-static struct umutex addr_lock;
-static struct wake_addr *wake_addr_head;
-static struct wake_addr default_wake_addr;
-static struct umutex mutex_link_lock;
-static struct mutex_queue mutex_link_freeq;
-
-struct wake_addr *
-_thr_alloc_wake_addr(void)
-{
- struct pthread *curthread;
- struct wake_addr *p;
-
- if (_thr_initial == NULL) {
- return &default_wake_addr;
- }
-
- curthread = _get_curthread();
-
- THR_UMUTEX_LOCK(curthread, &addr_lock);
- if (wake_addr_head == NULL) {
- unsigned i;
- unsigned pagesize = getpagesize();
- struct wake_addr *pp = (struct wake_addr *)mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
- for (i = 1; i < pagesize/sizeof(struct wake_addr); ++i)
- pp[i].link = &pp[i+1];
- pp[i-1].link = NULL;
- wake_addr_head = &pp[1];
- p = &pp[0];
- } else {
- p = wake_addr_head;
- wake_addr_head = p->link;
- }
- THR_UMUTEX_UNLOCK(curthread, &addr_lock);
- return (p);
-}
-
-void
-_thr_release_wake_addr(struct wake_addr *wa)
-{
- struct pthread *curthread = _get_curthread();
-
- if (wa == &default_wake_addr)
- return;
- THR_UMUTEX_LOCK(curthread, &addr_lock);
- wa->link = wake_addr_head;
- wake_addr_head = wa;
- THR_UMUTEX_UNLOCK(curthread, &addr_lock);
-}
-
-void
-_thr_mutex_link_init(void)
-{
- TAILQ_INIT(&mutex_link_freeq);
- _thr_umutex_init(&mutex_link_lock);
-}
-
-struct mutex_link *
-_thr_mutex_link_alloc(void)
-{
- struct pthread *curthread = _get_curthread();
- struct mutex_link *p;
-
- THR_LOCK_ACQUIRE(curthread, &mutex_link_lock);
- p = TAILQ_FIRST(&mutex_link_freeq);
- if (p == NULL) {
- unsigned i;
- unsigned pagesize = getpagesize();
- struct mutex_link *pp = (struct mutex_link *)mmap(NULL, getpagesize(),
- PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
- for (i = 1; i < pagesize/sizeof(struct mutex_link); ++i)
- TAILQ_INSERT_TAIL(&mutex_link_freeq, &pp[i], qe);
- p = &pp[0];
- }
- THR_LOCK_RELEASE(curthread, &mutex_link_lock);
- return (p);
-}
-
-void
-_thr_mutex_link_free(struct mutex_link *p)
-{
- struct pthread *curthread = _get_curthread();
-
- THR_LOCK_ACQUIRE(curthread, &mutex_link_lock);
- TAILQ_INSERT_TAIL(&mutex_link_freeq, p, qe);
- THR_LOCK_RELEASE(curthread, &mutex_link_lock);
-}
-
/*
* This is called when the first thread (other than the initial
* thread) is created.
@@ -218,29 +130,3 @@ _thr_setscheduler(lwpid_t lwpid, int pol
_schedparam_to_rtp(policy, param, &rtp);
return (rtprio_thread(RTP_SET, lwpid, &rtp));
}
-
-/* Sleep on thread wakeup address */
-int
-_thr_sleep(struct pthread *curthread, const struct timespec *abstime, int clockid)
-{
- struct timespec *tsp, ts, ts2;
- int error;
-
- if (abstime != NULL) {
- if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= 1000000000) {
- return (EINVAL);
- }
- clock_gettime(clockid, &ts);
- TIMESPEC_SUB(&ts2, abstime, &ts);
- if (ts2.tv_sec < 0 || ts2.tv_nsec <= 0)
- return (ETIMEDOUT);
- tsp = &ts2;
- } else {
- tsp = NULL;
- }
-
- error = _thr_umtx_wait_uint(&curthread->wake_addr->value,
- 0, tsp, 0);
- return (error);
-}
Modified: user/davidxu/libthr/lib/libthr/thread/thr_list.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_list.c Thu Nov 4 02:03:26 2010 (r214767)
+++ user/davidxu/libthr/lib/libthr/thread/thr_list.c Thu Nov 4 02:09:36 2010 (r214768)
@@ -165,8 +165,6 @@ _thr_alloc(struct pthread *curthread)
if (tcb != NULL) {
memset(thread, 0, sizeof(*thread));
thread->tcb = tcb;
- thread->wake_addr = _thr_alloc_wake_addr();
- thread->sleepqueue = _sleepq_alloc();
} else {
thr_destroy(curthread, thread);
atomic_fetchadd_int(&total_threads, -1);
@@ -195,8 +193,6 @@ _thr_free(struct pthread *curthread, str
thread->tcb = NULL;
if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
thr_destroy(curthread, thread);
- _thr_release_wake_addr(thread->wake_addr);
- _sleepq_free(thread->sleepqueue);
atomic_fetchadd_int(&total_threads, -1);
} else {
/*
Modified: user/davidxu/libthr/lib/libthr/thread/thr_mutex.c
==============================================================================
--- user/davidxu/libthr/lib/libthr/thread/thr_mutex.c Thu Nov 4 02:03:26 2010 (r214767)
+++ user/davidxu/libthr/lib/libthr/thread/thr_mutex.c Thu Nov 4 02:09:36 2010 (r214768)
@@ -45,8 +45,24 @@
#include "thr_private.h"
-#ifndef UMUTEX_TIDMASK
-#define UMUTEX_TIDMASK (~UMUTEX_CONTESTED)
+#if defined(_PTHREADS_INVARIANTS)
+#define MUTEX_INIT_LINK(m) do { \
+ (m)->m_qe.tqe_prev = NULL; \
+ (m)->m_qe.tqe_next = NULL; \
+} while (0)
+#define MUTEX_ASSERT_IS_OWNED(m) do { \
+ if (__predict_false((m)->m_qe.tqe_prev == NULL))\
+ PANIC("mutex is not on list"); \
+} while (0)
+#define MUTEX_ASSERT_NOT_OWNED(m) do { \
+ if (__predict_false((m)->m_qe.tqe_prev != NULL || \
+ (m)->m_qe.tqe_next != NULL)) \
+ PANIC("mutex is on list"); \
+} while (0)
+#else
+#define MUTEX_INIT_LINK(m)
+#define MUTEX_ASSERT_IS_OWNED(m)
+#define MUTEX_ASSERT_NOT_OWNED(m)
#endif
/*
@@ -77,6 +93,8 @@ static int mutex_self_trylock(pthread_mu
static int mutex_self_lock(pthread_mutex_t,
const struct timespec *abstime);
static int mutex_unlock_common(pthread_mutex_t *);
+static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
+ const struct timespec *);
__weak_reference(__pthread_mutex_init, pthread_mutex_init);
__strong_reference(__pthread_mutex_init, _pthread_mutex_init);
@@ -129,13 +147,14 @@ mutex_init(pthread_mutex_t *mutex,
pmutex->m_type = attr->m_type;
pmutex->m_owner = NULL;
- pmutex->m_recurse = 0;
+ pmutex->m_count = 0;
pmutex->m_refcount = 0;
pmutex->m_spinloops = 0;
pmutex->m_yieldloops = 0;
+ MUTEX_INIT_LINK(pmutex);
switch(attr->m_protocol) {
case PTHREAD_PRIO_NONE:
- pmutex->m_lock.m_owner = 0;
+ pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
pmutex->m_lock.m_flags = 0;
break;
case PTHREAD_PRIO_INHERIT:
@@ -154,6 +173,7 @@ mutex_init(pthread_mutex_t *mutex,
_thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
pmutex->m_yieldloops = _thr_yieldloops;
}
+
*mutex = pmutex;
return (0);
}
@@ -161,71 +181,33 @@ mutex_init(pthread_mutex_t *mutex,
static int
init_static(struct pthread *thread, pthread_mutex_t *mutex)
{
- int error;
+ int ret;
THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == THR_MUTEX_INITIALIZER)
- error = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
+ ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
- error = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
+ ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
else
- error = 0;
+ ret = 0;
THR_LOCK_RELEASE(thread, &_mutex_static_lock);
- return (error);
+ return (ret);
}
static void
set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
{
- struct mutex_link *ml2;
+ struct pthread_mutex *m2;
- ml2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
- if (ml2 != NULL)
- m->m_lock.m_ceilings[1] = ml2->mutexp->m_lock.m_ceilings[0];
+ m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue);
+ if (m2 != NULL)
+ m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
else
m->m_lock.m_ceilings[1] = -1;
}
-static void
-enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
-{
- m->m_owner = curthread;
- struct mutex_link *ml = _thr_mutex_link_alloc();
- ml->mutexp = m;
- if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
- TAILQ_INSERT_TAIL(&curthread->mutexq, ml, qe);
- else
- TAILQ_INSERT_TAIL(&curthread->pp_mutexq, ml, qe);
-}
-
-static void
-dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
-{
- struct mutex_link *ml;
-
- if ((((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)) {
- TAILQ_FOREACH(ml, &curthread->mutexq, qe) {
- if (ml->mutexp == m) {
- TAILQ_REMOVE(&curthread->mutexq, ml, qe);
- _thr_mutex_link_free(ml);
- break;
- }
- }
- } else {
- TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe) {
- if (ml->mutexp == m) {
- TAILQ_REMOVE(&curthread->mutexq, ml, qe);
- set_inherited_priority(curthread, m);
- _thr_mutex_link_free(ml);
- break;
- }
- }
- }
- m->m_owner = NULL;
-}
-
int
__pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
@@ -243,18 +225,18 @@ _pthread_mutex_init_calloc_cb(pthread_mu
.m_protocol = PTHREAD_PRIO_NONE,
.m_ceiling = 0
};
- int error;
+ int ret;
- error = mutex_init(mutex, &attr, calloc_cb);
- if (error == 0)
+ ret = mutex_init(mutex, &attr, calloc_cb);
+ if (ret == 0)
(*mutex)->m_private = 1;
- return (error);
+ return (ret);
}
void
_mutex_fork(struct pthread *curthread)
{
- struct mutex_link *ml;
+ struct pthread_mutex *m;
/*
* Fix mutex ownership for child process.
@@ -265,10 +247,11 @@ _mutex_fork(struct pthread *curthread)
* process shared mutex is not supported, so I
* am not worried.
*/
- TAILQ_FOREACH(ml, &curthread->mutexq, qe)
- ml->mutexp->m_lock.m_owner = TID(curthread);
- TAILQ_FOREACH(ml, &curthread->pp_mutexq, qe)
- ml->mutexp->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
+
+ TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
+ m->m_lock.m_owner = TID(curthread);
+ TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe)
+ m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED;
}
int
@@ -287,6 +270,7 @@ _pthread_mutex_destroy(pthread_mutex_t *
ret = EBUSY;
} else {
*mutex = THR_MUTEX_DESTROYED;
+ MUTEX_ASSERT_NOT_OWNED(m);
free(m);
ret = 0;
}
@@ -295,66 +279,48 @@ _pthread_mutex_destroy(pthread_mutex_t *
return (ret);
}
+#define ENQUEUE_MUTEX(curthread, m) \
+ do { \
+ (m)->m_owner = curthread; \
+ /* Add to the list of owned mutexes: */ \
+ MUTEX_ASSERT_NOT_OWNED((m)); \
+ if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
+ TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\
+ else \
+ TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\
+ } while (0)
+
#define CHECK_AND_INIT_MUTEX \
if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \
if (m == THR_MUTEX_DESTROYED) \
return (EINVAL); \
- int error; \
- error = init_static(_get_curthread(), mutex); \
- if (error) \
- return (error); \
+ int ret; \
+ ret = init_static(_get_curthread(), mutex); \
+ if (ret) \
+ return (ret); \
m = *mutex; \
}
-static inline int
-set_lockword(struct pthread_mutex *m)
-{
- uint32_t old;
-
- if (atomic_cmpset_acq_32(&m->m_lockword, 0, MTX_LOCKED))
- return (0);
- old = m->m_lockword;
- while ((old & MTX_LOCKED) == 0) {
- if (atomic_cmpset_acq_32(&m->m_lockword, old, old|MTX_LOCKED))
- return (0);
- old = m->m_lockword;
- }
- return (EBUSY);
-}
-
static int
mutex_trylock_common(pthread_mutex_t *mutex)
{
struct pthread *curthread = _get_curthread();
struct pthread_mutex *m = *mutex;
- int error;
-
+ uint32_t id;
+ int ret;
+
+ id = TID(curthread);
if (m->m_private)
THR_CRITICAL_ENTER(curthread);
- if (IS_SIMPLE_MUTEX(m)) {
- if (set_lockword(m) == 0) {
- m->m_owner = curthread;
- return (0);
- }
-
- if (m->m_owner == curthread)
- error = mutex_self_trylock(m);
- else
- error = EBUSY;
- } else {
- uint32_t id;
-
- id = TID(curthread);
- error = _thr_umutex_trylock(&m->m_lock, id);
- if (__predict_true(error == 0)) {
- enqueue_mutex(curthread, m);
- } else if (m->m_owner == curthread) {
- error = mutex_self_trylock(m);
- } /* else {} */
- }
- if (error != 0 && m->m_private)
+ ret = _thr_umutex_trylock(&m->m_lock, id);
+ if (__predict_true(ret == 0)) {
+ ENQUEUE_MUTEX(curthread, m);
+ } else if (m->m_owner == curthread) {
+ ret = mutex_self_trylock(m);
+ } /* else {} */
+ if (ret && m->m_private)
THR_CRITICAL_LEAVE(curthread);
- return (error);
+ return (ret);
}
int
@@ -367,157 +333,92 @@ __pthread_mutex_trylock(pthread_mutex_t
return (mutex_trylock_common(mutex));
}
-/* Lock user-mode queue based mutex. */
static int
-mutex_lock_queued(struct pthread_mutex *m,
+mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
const struct timespec *abstime)
{
- struct pthread *curthread = _get_curthread();
- struct sleepqueue *sq;
- uint32_t old;
- int error = 0;
- int spin;
+ uint32_t id, owner;
+ int count;
+ int ret;
if (m->m_owner == curthread)
return mutex_self_lock(m, abstime);
- if (__predict_false(abstime != NULL &&
- (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= 1000000000)))
- return (EINVAL);
-
- spin = m->m_spinloops;
- for (;;) {
- if (!_thr_is_smp)
- goto sleep;
- while (spin-- > 0) {
- /*
- * For adaptive mutexes, spin for a bit in the expectation
- * that if the application requests this mutex type then
- * the lock is likely to be released quickly and it is
- * faster than entering the kernel
- */
- old = m->m_lockword;
- if ((old & MTX_LOCKED) == 0) {
- if (set_lockword(m) == 0) {
- m->m_owner = curthread;
- error = 0;
- goto out;
- }
+ id = TID(curthread);
+ /*
+ * For adaptive mutexes, spin for a bit in the expectation
+ * that if the application requests this mutex type then
+ * the lock is likely to be released quickly and it is
+ * faster than entering the kernel
+ */
+ if (__predict_false(
+ (m->m_lock.m_flags &
+ (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
+ goto sleep_in_kernel;
+
+ if (!_thr_is_smp)
+ goto yield_loop;
+
+ count = m->m_spinloops;
+ while (count--) {
+ owner = m->m_lock.m_owner;
+ if ((owner & ~UMUTEX_CONTESTED) == 0) {
+ if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
+ ret = 0;
+ goto done;
}
- CPU_SPINWAIT;
}
-sleep:
- _thr_clear_wake(curthread);
+ CPU_SPINWAIT;
+ }
- sq = _sleepq_lock(m, MX);
- if (curthread->wchan == NULL)
- _sleepq_add(sq, curthread);
- _sleepq_unlock(sq);
- old = m->m_lockword;
- /* Set contested bit. */
- while ((old & MTX_LOCKED) != 0 && (old & MTX_CONTESTED) == 0) {
- if (atomic_cmpset_acq_32(&m->m_lockword,
- old, old|MTX_CONTESTED))
- break;
- old = m->m_lockword;
- }
- if ((old & MTX_LOCKED) != 0) {
- error = _thr_sleep(curthread, abstime, CLOCK_REALTIME);
- if (error != EINTR) {
- if (curthread->wchan != NULL) {
- sq = _sleepq_lock(m, MX);
- if (curthread->wchan != NULL)
- _sleepq_remove(sq, curthread);
- _sleepq_unlock(sq);
- }
- } else
- error = 0;
- old = m->m_lockword;
- }
- if (set_lockword(m) == 0) {
- m->m_owner = curthread;
- error = 0;
- break;
+yield_loop:
+ count = m->m_yieldloops;
+ while (count--) {
+ _sched_yield();
+ owner = m->m_lock.m_owner;
+ if ((owner & ~UMUTEX_CONTESTED) == 0) {
+ if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
+ ret = 0;
+ goto done;
+ }
}
- if (error != 0)
- break;
- spin = m->m_spinloops;
}
-out:
- if (curthread->wchan != NULL) {
- sq = _sleepq_lock(m, MX);
- if (curthread->wchan != NULL)
- _sleepq_remove(sq, curthread);
- _sleepq_unlock(sq);
- }
- return (error);
-}
-/* Enter kernel and lock mutex */
-static int
-mutex_lock_kernel(struct pthread_mutex *m,
- const struct timespec *abstime)
-{
- struct pthread *curthread = _get_curthread();
- uint32_t id;
- int error;
-
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-user
mailing list