PERFORCE change 65255 for review
David Xu
davidxu at freebsd.org
Tue Nov 16 05:28:15 PST 2004
Ouch, so big diff, why ?
it really should be :
@@ -155,14 +155,16 @@
}
static void
-umtx_remove(struct umtx_q *uq, struct thread *td)
+umtx_remove(struct umtx_q *uq, struct thread *td, struct umtx *umtx)
{
TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx);
if (TAILQ_EMPTY(&uq->uq_tdq)) {
LIST_REMOVE(uq, uq_next);
+ UMTX_UNLOCK(td, umtx);
free(uq, M_UMTX);
- }
+ } else
+ UMTX_UNLOCK(td, umtx);
}
int
@@ -237,8 +239,7 @@
/* The address was invalid. */
if (old == -1) {
UMTX_LOCK(td, umtx);
- umtx_remove(uq, td);
- UMTX_UNLOCK(td, umtx);
+ umtx_remove(uq, td, umtx);
return (EFAULT);
}
@@ -253,8 +254,7 @@
td->td_priority | PCATCH, "umtx", 0);
else
error = 0;
- umtx_remove(uq, td);
- UMTX_UNLOCK(td, umtx);
+ umtx_remove(uq, td, umtx);
if (td->td_flags & TDF_UMTXWAKEUP) {
/*
David Xu wrote:
>http://perforce.freebsd.org/chv.cgi?CH=65255
>
>Change 65255 by davidxu at davidxu_alona on 2004/11/16 13:16:46
>
> Don't free memory with mutex holding.
>
>Affected files ...
>
>.. //depot/projects/davidxu_ksedbg/sys/kern/kern_umtx.c#2 edit
>
>Differences ...
>
>==== //depot/projects/davidxu_ksedbg/sys/kern/kern_umtx.c#2 (text+ko) ====
>
>@@ -49,25 +49,48 @@
> pid_t uq_pid; /* Pid key component. */
> };
>
>+LIST_HEAD(umtx_head, umtx_q);
>+struct umtxq_chain {
>+ struct umtx_head uc_queues; /* List of sleep queues. */
>+ struct mtx uc_lock; /* lock for this chain. */
>+};
>+
> #define UMTX_QUEUES 128
>-#define UMTX_HASH(pid, umtx) \
>- (((uintptr_t)pid + ((uintptr_t)umtx & ~65535)) % UMTX_QUEUES)
>+#define UMTX_HASH(pid, umtx) \
>+ ((((uintptr_t)pid << 16) + (((uintptr_t)umtx >> 4) & 65535)) % UMTX_QUEUES)
>
>-LIST_HEAD(umtx_head, umtx_q);
>-static struct umtx_head queues[UMTX_QUEUES];
>+static struct umtxq_chain umtxq_chains[UMTX_QUEUES];
> static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
>
>-static struct mtx umtx_lock;
>-MTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF);
>-
>-#define UMTX_LOCK() mtx_lock(&umtx_lock);
>-#define UMTX_UNLOCK() mtx_unlock(&umtx_lock);
>+#define UMTX_LOCK(td, umtx) \
>+ mtx_lock(&umtxq_chains[UMTX_HASH((td)->td_proc->p_pid, \
>+ (umtx))].uc_lock)
>+#define UMTX_UNLOCK(td, umtx) \
>+ mtx_unlock(&umtxq_chains[UMTX_HASH((td)->td_proc->p_pid, \
>+ (umtx))].uc_lock);
>+#define UMTX_MUTEX(td, umtx) \
>+ (&umtxq_chains[UMTX_HASH(td->td_proc->p_pid, umtx)].uc_lock)
>
> #define UMTX_CONTESTED LONG_MIN
>
>+static void umtx_queues_init(void *);
> static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx);
> static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx);
>
>+SYSINIT(umtx, SI_SUB_LOCK, SI_ORDER_MIDDLE, umtx_queues_init, NULL);
>+
>+static void
>+umtx_queues_init(void *arg)
>+{
>+ int i;
>+
>+ for (i = 0; i < UMTX_QUEUES; ++i) {
>+ LIST_INIT(&umtxq_chains[i].uc_queues);
>+ mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL,
>+ MTX_DEF | MTX_DUPOK);
>+ }
>+}
>+
> static struct umtx_q *
> umtx_lookup(struct thread *td, struct umtx *umtx)
> {
>@@ -75,9 +98,11 @@
> struct umtx_q *uq;
> pid_t pid;
>
>+ mtx_assert(UMTXQ_MUTEX(td, umtx), MA_OWNED);
>+
> pid = td->td_proc->p_pid;
>
>- head = &queues[UMTX_HASH(td->td_proc->p_pid, umtx)];
>+ head = &umtxq_chains[UMTX_HASH(td->td_proc->p_pid, umtx)].uc_queues;
>
> LIST_FOREACH(uq, head, uq_next) {
> if (uq->uq_pid == pid && uq->uq_umtx == umtx)
>@@ -102,16 +127,16 @@
> if ((uq = umtx_lookup(td, umtx)) == NULL) {
> struct umtx_q *ins;
>
>- UMTX_UNLOCK();
>+ UMTX_UNLOCK(td, umtx);
> ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK);
>- UMTX_LOCK();
>+ UMTX_LOCK(td, umtx);
>
> /*
> * Some one else could have succeeded while we were blocked
> * waiting on memory.
> */
> if ((uq = umtx_lookup(td, umtx)) == NULL) {
>- head = &queues[UMTX_HASH(pid, umtx)];
>+ head = &umtxq_chains[UMTX_HASH(pid, umtx)].uc_queues;
> uq = ins;
> uq->uq_pid = pid;
> uq->uq_umtx = umtx;
>@@ -130,14 +155,16 @@
> }
>
> static void
>-umtx_remove(struct umtx_q *uq, struct thread *td)
>+umtx_remove(struct umtx_q *uq, struct thread *td, struct umtx *umtx)
> {
> TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx);
>
> if (TAILQ_EMPTY(&uq->uq_tdq)) {
> LIST_REMOVE(uq, uq_next);
>+ UMTX_UNLOCK(td, umtx);
> free(uq, M_UMTX);
>- }
>+ } else
>+ UMTX_UNLOCK(td, umtx);
> }
>
> int
>@@ -148,7 +175,7 @@
> struct umtx *umtx;
> intptr_t owner;
> intptr_t old;
>- int error;
>+ int error = 0;
>
> uq = NULL;
>
>@@ -165,34 +192,40 @@
> owner = casuptr((intptr_t *)&umtx->u_owner,
> UMTX_UNOWNED, td->td_tid);
>
>+ /* The acquire succeeded. */
>+ if (owner == UMTX_UNOWNED)
>+ return (0);
>+
> /* The address was invalid. */
> if (owner == -1)
> return (EFAULT);
>
>- /* The acquire succeeded. */
>- if (owner == UMTX_UNOWNED)
>- return (0);
>-
> /* If no one owns it but it is contested try to acquire it. */
> if (owner == UMTX_CONTESTED) {
> owner = casuptr((intptr_t *)&umtx->u_owner,
> UMTX_CONTESTED, td->td_tid | UMTX_CONTESTED);
>
>+ if (owner == UMTX_CONTESTED)
>+ return (0);
>+
> /* The address was invalid. */
> if (owner == -1)
> return (EFAULT);
>
>- if (owner == UMTX_CONTESTED)
>- return (0);
>-
> /* If this failed the lock has changed, restart. */
> continue;
> }
>
>+ /*
>+ * If we caught a signal, we have retried and now
>+ * exit immediately.
>+ */
>+ if (error)
>+ return (error);
>
>- UMTX_LOCK();
>+ UMTX_LOCK(td, umtx);
> uq = umtx_insert(td, umtx);
>- UMTX_UNLOCK();
>+ UMTX_UNLOCK(td, umtx);
>
> /*
> * Set the contested bit so that a release in user space
>@@ -205,9 +238,8 @@
>
> /* The address was invalid. */
> if (old == -1) {
>- UMTX_LOCK();
>- umtx_remove(uq, td);
>- UMTX_UNLOCK();
>+ UMTX_LOCK(td, umtx);
>+ umtx_remove(uq, td, umtx);
> return (EFAULT);
> }
>
>@@ -216,24 +248,28 @@
> * and we need to retry or we lost a race to the thread
> * unlocking the umtx.
> */
>- PROC_LOCK(td->td_proc);
>+ UMTX_LOCK(td, umtx);
> if (old == owner && (td->td_flags & TDF_UMTXWAKEUP) == 0)
>- error = msleep(td, &td->td_proc->p_mtx,
>+ error = msleep(td, UMTX_MUTEX(td, umtx),
> td->td_priority | PCATCH, "umtx", 0);
> else
> error = 0;
>- mtx_lock_spin(&sched_lock);
>- td->td_flags &= ~TDF_UMTXWAKEUP;
>- mtx_unlock_spin(&sched_lock);
>- PROC_UNLOCK(td->td_proc);
>+ umtx_remove(uq, td, umtx);
>
>- UMTX_LOCK();
>- umtx_remove(uq, td);
>- UMTX_UNLOCK();
>+ if (td->td_flags & TDF_UMTXWAKEUP) {
>+ /*
>+ * if we were resumed by umtx_unlock, we should retry
>+ * to avoid a race.
>+ */
>+ mtx_lock_spin(&sched_lock);
>+ td->td_flags &= ~TDF_UMTXWAKEUP;
>+ mtx_unlock_spin(&sched_lock);
>+ continue;
>+ }
>
> /*
>- * If we caught a signal we might have to retry or exit
>- * immediately.
>+ * If we caught a signal without resumed by umtx_unlock,
>+ * exit immediately.
> */
> if (error)
> return (error);
>@@ -246,7 +282,7 @@
> _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
> /* struct umtx *umtx */
> {
>- struct thread *blocked;
>+ struct thread *blocked, *blocked2;
> struct umtx *umtx;
> struct umtx_q *uq;
> intptr_t owner;
>@@ -269,63 +305,69 @@
> /* We should only ever be in here for contested locks */
> if ((owner & UMTX_CONTESTED) == 0)
> return (EINVAL);
>- blocked = NULL;
>
> /*
> * When unlocking the umtx, it must be marked as unowned if
> * there is zero or one thread only waiting for it.
> * Otherwise, it must be marked as contested.
> */
>- UMTX_LOCK();
>+ old = casuptr((intptr_t *)&umtx->u_owner, owner, UMTX_UNOWNED);
>+ if (old == -1)
>+ return (EFAULT);
>+ if (old != owner)
>+ return (EINVAL);
>+ /*
>+ * At the point, a new thread can lock the umtx before we
>+ * reach here, so contested bit will not be set, if there
>+ * are two or more threads on wait queue, we should set
>+ * contensted bit for them.
>+ */
>+ blocked = NULL;
>+ UMTX_LOCK(td, umtx);
> uq = umtx_lookup(td, umtx);
>- if (uq == NULL ||
>- (uq != NULL && (blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
>- TAILQ_NEXT(blocked, td_umtx) == NULL)) {
>- UMTX_UNLOCK();
>- old = casuptr((intptr_t *)&umtx->u_owner, owner,
>- UMTX_UNOWNED);
>- if (old == -1)
>- return (EFAULT);
>- if (old != owner)
>- return (EINVAL);
>+ if (uq == NULL) {
>+ UMTX_UNLOCK(td, umtx);
>+ return (0);
>+ }
>+ blocked2 = NULL;
>+ if ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL) {
>+ mtx_lock_spin(&sched_lock);
>+ blocked->td_flags |= TDF_UMTXWAKEUP;
>+ mtx_unlock_spin(&sched_lock);
>+ blocked2 = TAILQ_NEXT(blocked, td_umtx);
>+ }
>+ UMTX_UNLOCK(td, umtx);
>
>+ /*
>+ * If there is second thread waiting on umtx, set contested bit,
>+ * if they are resumed before we reach here, it is harmless,
>+ * just a bit nonefficient.
>+ */
>+ if (blocked2 != NULL) {
>+ owner = UMTX_UNOWNED;
>+ for (;;) {
>+ old = casuptr((intptr_t *)&umtx->u_owner, owner,
>+ owner | UMTX_CONTESTED);
>+ if (old == -1)
>+ return (EFAULT);
>+ if (old == owner)
>+ break;
>+ owner = old;
>+ }
> /*
>- * Recheck the umtx queue to make sure another thread
>- * didn't put itself on it after it was unlocked.
>+ * Another thread locked the umtx before us, so don't bother
>+ * to wake more threads, that thread will do it when it unlocks
>+ * the umtx.
> */
>- UMTX_LOCK();
>- uq = umtx_lookup(td, umtx);
>- if (uq != NULL &&
>- ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
>- TAILQ_NEXT(blocked, td_umtx) != NULL)) {
>- UMTX_UNLOCK();
>- old = casuptr((intptr_t *)&umtx->u_owner,
>- UMTX_UNOWNED, UMTX_CONTESTED);
>- } else {
>- UMTX_UNLOCK();
>- }
>- } else {
>- UMTX_UNLOCK();
>- old = casuptr((intptr_t *)&umtx->u_owner,
>- owner, UMTX_CONTESTED);
>- if (old != -1 && old != owner)
>- return (EINVAL);
>+ if ((owner & ~UMTX_CONTESTED) != 0)
>+ return (0);
> }
>
>- if (old == -1)
>- return (EFAULT);
>-
> /*
> * If there is a thread waiting on the umtx, wake it up.
> */
>- if (blocked != NULL) {
>- PROC_LOCK(blocked->td_proc);
>- mtx_lock_spin(&sched_lock);
>- blocked->td_flags |= TDF_UMTXWAKEUP;
>- mtx_unlock_spin(&sched_lock);
>- PROC_UNLOCK(blocked->td_proc);
>+ if (blocked != NULL)
> wakeup(blocked);
>- }
>
> return (0);
> }
>
>
>
>
More information about the p4-projects
mailing list