PERFORCE change 65074 for review

David Xu davidxu at FreeBSD.org
Sat Nov 13 21:13:25 PST 2004


http://perforce.freebsd.org/chv.cgi?CH=65074

Change 65074 by davidxu at davidxu_alona on 2004/11/14 05:12:40

	1. Fix a race between signal and umtx_unlock. a waiter
	   may be resumed by signal and left or exited, heavily
	   loaded test causes kernel to crash.
	2. Use distributed queue locks instead of single giant
	   lock.

Affected files ...

.. //depot/projects/davidxu_ksedbg/src/sys/kern/kern_umtx.c#4 edit

Differences ...

==== //depot/projects/davidxu_ksedbg/src/sys/kern/kern_umtx.c#4 (text+ko) ====

@@ -49,25 +49,48 @@
 	pid_t		uq_pid;		/* Pid key component. */
 };
 
+LIST_HEAD(umtx_head, umtx_q);
+struct umtxq_chain {
+	struct umtx_head uc_queues;	/* List of sleep queues. */
+	struct mtx uc_lock;		/* lock for this chain. */
+};
+
 #define	UMTX_QUEUES	128
 #define	UMTX_HASH(pid, umtx)						\
-    (((uintptr_t)pid + ((uintptr_t)umtx & ~65535)) % UMTX_QUEUES)
+    ((((uintptr_t)pid << 16) + ((uintptr_t)umtx & 65535)) % UMTX_QUEUES)
 
-LIST_HEAD(umtx_head, umtx_q);
-static struct umtx_head queues[UMTX_QUEUES];
+static struct umtxq_chain umtxq_chains[UMTX_QUEUES];
 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
 
-static struct mtx umtx_lock;
-MTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF);
-
-#define	UMTX_LOCK()	mtx_lock(&umtx_lock);
-#define	UMTX_UNLOCK()	mtx_unlock(&umtx_lock);
+#define	UMTX_LOCK(td, umtx)						\
+	mtx_lock(&umtxq_chains[UMTX_HASH((td)->td_proc->p_pid,		\
+		 (umtx))].uc_lock)
+#define	UMTX_UNLOCK(td, umtx)						\
+	mtx_unlock(&umtxq_chains[UMTX_HASH((td)->td_proc->p_pid,	\
+		 (umtx))].uc_lock);
+#define UMTX_MUTEX(td, umtx)						\
+	(&umtxq_chains[UMTX_HASH(td->td_proc->p_pid, umtx)].uc_lock)
 
 #define	UMTX_CONTESTED	LONG_MIN
 
+static void umtx_sysinit(void *);
 static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx);
 static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx);
 
+SYSINIT(umtx, SI_SUB_LOCK, SI_ORDER_MIDDLE, umtx_sysinit, NULL);
+
+static void
+umtx_sysinit(void *arg)
+{
+	int i;
+
+	for (i = 0; i < UMTX_QUEUES; ++i) {
+		LIST_INIT(&umtxq_chains[i].uc_queues);
+		mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL,
+			 MTX_DEF | MTX_DUPOK);
+	}
+}
+
 static struct umtx_q *
 umtx_lookup(struct thread *td, struct umtx *umtx)
 {
@@ -75,9 +98,11 @@
 	struct umtx_q *uq;
 	pid_t pid;
 
+	mtx_assert(UMTXQ_MUTEX(td, umtx), MA_OWNED);
+
 	pid = td->td_proc->p_pid;
 
-	head = &queues[UMTX_HASH(td->td_proc->p_pid, umtx)];
+	head = &umtxq_chains[UMTX_HASH(td->td_proc->p_pid, umtx)].uc_queues;
 
 	LIST_FOREACH(uq, head, uq_next) {
 		if (uq->uq_pid == pid && uq->uq_umtx == umtx)
@@ -102,16 +127,16 @@
 	if ((uq = umtx_lookup(td, umtx)) == NULL) {
 		struct umtx_q *ins;
 
-		UMTX_UNLOCK();
+		UMTX_UNLOCK(td, umtx);
 		ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK);
-		UMTX_LOCK();
+		UMTX_LOCK(td, umtx);
 
 		/*
 		 * Some one else could have succeeded while we were blocked
 		 * waiting on memory.
 		 */
 		if ((uq = umtx_lookup(td, umtx)) == NULL) {
-			head = &queues[UMTX_HASH(pid, umtx)];
+			head = &umtxq_chains[UMTX_HASH(pid, umtx)].uc_queues;
 			uq = ins;
 			uq->uq_pid = pid;
 			uq->uq_umtx = umtx;
@@ -148,7 +173,7 @@
 	struct umtx *umtx;
 	intptr_t owner;
 	intptr_t old;
-	int error;
+	int error = 0;
 
 	uq = NULL;
 
@@ -189,10 +214,16 @@
 			continue;
 		}
 
+		/*
+		 * If we caught a signal, we have retried and now
+		 * exit immediately.
+		 */
+		if (error)
+			return (error);
 
-		UMTX_LOCK();
+		UMTX_LOCK(td, umtx);
 		uq = umtx_insert(td, umtx);
-		UMTX_UNLOCK();
+		UMTX_UNLOCK(td, umtx);
 
 		/*
 		 * Set the contested bit so that a release in user space
@@ -205,9 +236,9 @@
 
 		/* The address was invalid. */
 		if (old == -1) {
-			UMTX_LOCK();
+			UMTX_LOCK(td, umtx);
 			umtx_remove(uq, td);
-			UMTX_UNLOCK();
+			UMTX_UNLOCK(td, umtx);
 			return (EFAULT);
 		}
 
@@ -216,24 +247,29 @@
 		 * and we need to retry or we lost a race to the thread
 		 * unlocking the umtx.
 		 */
-		PROC_LOCK(td->td_proc);
+		UMTX_LOCK(td, umtx);
 		if (old == owner && (td->td_flags & TDF_UMTXWAKEUP) == 0)
-			error = msleep(td, &td->td_proc->p_mtx,
+			error = msleep(td, UMTX_MUTEX(td, umtx),
 			    td->td_priority | PCATCH, "umtx", 0);
 		else
 			error = 0;
-		mtx_lock_spin(&sched_lock);
-		td->td_flags &= ~TDF_UMTXWAKEUP;
-		mtx_unlock_spin(&sched_lock);
-		PROC_UNLOCK(td->td_proc);
+		umtx_remove(uq, td);
+		UMTX_UNLOCK(td, umtx);
 
-		UMTX_LOCK();
-		umtx_remove(uq, td);
-		UMTX_UNLOCK();
+		if (td->td_flags & TDF_UMTXWAKEUP) {
+			/*
+			 * if we were resumed by umtx_unlock, we should retry
+			 * to avoid a race.
+			 */
+			mtx_lock_spin(&sched_lock);
+			td->td_flags &= ~TDF_UMTXWAKEUP;
+			mtx_unlock_spin(&sched_lock);
+			continue;
+		}
 
 		/*
-		 * If we caught a signal we might have to retry or exit 
-		 * immediately.
+		 * If we caught a signal without resumed by umtx_unlock,
+		 * exit immediately.
 		 */
 		if (error)
 			return (error);
@@ -246,7 +282,7 @@
 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
     /* struct umtx *umtx */
 {
-	struct thread *blocked;
+	struct thread *blocked, *blocked2;
 	struct umtx *umtx;
 	struct umtx_q *uq;
 	intptr_t owner;
@@ -276,12 +312,17 @@
 	 * there is zero or one thread only waiting for it.
 	 * Otherwise, it must be marked as contested.
 	 */
-	UMTX_LOCK();
+	UMTX_LOCK(td, umtx);
 	uq = umtx_lookup(td, umtx);
 	if (uq == NULL ||
 	    (uq != NULL && (blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
 	    TAILQ_NEXT(blocked, td_umtx) == NULL)) {
-		UMTX_UNLOCK();
+		if (blocked) {
+			mtx_lock_spin(&sched_lock);
+			blocked->td_flags |= TDF_UMTXWAKEUP;
+			mtx_unlock_spin(&sched_lock);
+		}
+		UMTX_UNLOCK(td, umtx);
 		old = casuptr((intptr_t *)&umtx->u_owner, owner,
 		    UMTX_UNOWNED);
 		if (old == -1)
@@ -293,19 +334,25 @@
 		 * Recheck the umtx queue to make sure another thread
 		 * didn't put itself on it after it was unlocked.
 		 */
-		UMTX_LOCK();
+		UMTX_LOCK(td, umtx);
 		uq = umtx_lookup(td, umtx);
 		if (uq != NULL &&
-		    ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
+		    ((blocked2 = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
 		    TAILQ_NEXT(blocked, td_umtx) != NULL)) {
-			UMTX_UNLOCK();
+			if (blocked == NULL) {
+				mtx_lock_spin(&sched_lock);
+				blocked2->td_flags |= TDF_UMTXWAKEUP;
+				mtx_unlock_spin(&sched_lock);
+				blocked = blocked2;
+			}
+			UMTX_UNLOCK(td, umtx);
 			old = casuptr((intptr_t *)&umtx->u_owner,
 			    UMTX_UNOWNED, UMTX_CONTESTED);
 		} else {
-			UMTX_UNLOCK();
+			UMTX_UNLOCK(td, umtx);
 		}
 	} else {
-		UMTX_UNLOCK();
+		UMTX_UNLOCK(td, umtx);
 		old = casuptr((intptr_t *)&umtx->u_owner,
 		    owner, UMTX_CONTESTED);
 		if (old != -1 && old != owner)
@@ -318,14 +365,7 @@
 	/*
 	 * If there is a thread waiting on the umtx, wake it up.
 	 */
-	if (blocked != NULL) {
-		PROC_LOCK(blocked->td_proc);
-		mtx_lock_spin(&sched_lock);
-		blocked->td_flags |= TDF_UMTXWAKEUP;
-		mtx_unlock_spin(&sched_lock);
-		PROC_UNLOCK(blocked->td_proc);
+	if (blocked != NULL)
 		wakeup(blocked);
-	}
-
 	return (0);
 }


More information about the p4-projects mailing list