PERFORCE change 65088 for review

David Xu davidxu at FreeBSD.org
Sun Nov 14 03:30:10 PST 2004


http://perforce.freebsd.org/chv.cgi?CH=65088

Change 65088 by davidxu at davidxu_alona on 2004/11/14 11:29:43

	1. Fix another race in umtx_unlock, after current thread sets
	   UMTX_UNOWNED, a new thread can come in and lock the umtx,
	   this results current thread can not set UMTX_CONTESTED bit,
	   Although current thread will wake a thread on wait queue,but
	   that thread can receive a signal and breaks out of loop, so
	   no contested bit will be set.
	
	2. Macro optimization in umtx_lock, test sucessful before
	   failure case.

Affected files ...

.. //depot/projects/davidxu_ksedbg/src/sys/kern/kern_umtx.c#7 edit

Differences ...

==== //depot/projects/davidxu_ksedbg/src/sys/kern/kern_umtx.c#7 (text+ko) ====

@@ -56,8 +56,8 @@
 };
 
 #define	UMTX_QUEUES	128
-#define	UMTX_HASH(pid, umtx)						\
-    ((((uintptr_t)pid << 16) + ((uintptr_t)umtx & 65535)) % UMTX_QUEUES)
+#define	UMTX_HASH(pid, umtx)	\
+    ((((uintptr_t)pid << 16) + (((uintptr_t)umtx >> 4) & 65535)) % UMTX_QUEUES)
 
 static struct umtxq_chain umtxq_chains[UMTX_QUEUES];
 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
@@ -73,14 +73,14 @@
 
 #define	UMTX_CONTESTED	LONG_MIN
 
-static void umtx_sysinit(void *);
+static void umtx_queues_init(void *);
 static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx);
 static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx);
 
-SYSINIT(umtx, SI_SUB_LOCK, SI_ORDER_MIDDLE, umtx_sysinit, NULL);
+SYSINIT(umtx, SI_SUB_LOCK, SI_ORDER_MIDDLE, umtx_queues_init, NULL);
 
 static void
-umtx_sysinit(void *arg)
+umtx_queues_init(void *arg)
 {
 	int i;
 
@@ -190,26 +190,26 @@
 		owner = casuptr((intptr_t *)&umtx->u_owner,
 		    UMTX_UNOWNED, td->td_tid);
 
+		/* The acquire succeeded. */
+		if (owner == UMTX_UNOWNED)
+			return (0);
+
 		/* The address was invalid. */
 		if (owner == -1)
 			return (EFAULT);
 
-		/* The acquire succeeded. */
-		if (owner == UMTX_UNOWNED)
-			return (0);
-
 		/* If no one owns it but it is contested try to acquire it. */
 		if (owner == UMTX_CONTESTED) {
 			owner = casuptr((intptr_t *)&umtx->u_owner,
 			    UMTX_CONTESTED, td->td_tid | UMTX_CONTESTED);
 
+			if (owner == UMTX_CONTESTED)
+				return (0);
+
 			/* The address was invalid. */
 			if (owner == -1)
 				return (EFAULT);
 
-			if (owner == UMTX_CONTESTED)
-				return (0);
-
 			/* If this failed the lock has changed, restart. */
 			continue;
 		}
@@ -305,72 +305,69 @@
 	/* We should only ever be in here for contested locks */
 	if ((owner & UMTX_CONTESTED) == 0)
 		return (EINVAL);
-	blocked = NULL;
 
 	/*
 	 * When unlocking the umtx, it must be marked as unowned if
 	 * there is zero or one thread only waiting for it.
 	 * Otherwise, it must be marked as contested.
 	 */
+	old = casuptr((intptr_t *)&umtx->u_owner, owner, UMTX_UNOWNED);
+	if (old == -1)
+		return (EFAULT);
+	if (old != owner)
+		return (EINVAL);
+	/*
+	 * At the point, a new thread can lock the umtx before we
+	 * reach here, so contested bit will not be set, if there
+	 * are two or more threads on wait queue, we should set
+	 * contensted bit for them.
+	 */
+	blocked = NULL;
 	UMTX_LOCK(td, umtx);
 	uq = umtx_lookup(td, umtx);
-	if (uq == NULL ||
-	    (uq != NULL && (blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
-	    TAILQ_NEXT(blocked, td_umtx) == NULL)) {
-		if (blocked) {
-			mtx_lock_spin(&sched_lock);
-			blocked->td_flags |= TDF_UMTXWAKEUP;
-			mtx_unlock_spin(&sched_lock);
-		}
+	if (uq == NULL) {
 		UMTX_UNLOCK(td, umtx);
-		old = casuptr((intptr_t *)&umtx->u_owner, owner,
-		    UMTX_UNOWNED);
-		if (old == -1)
-			return (EFAULT);
-		if (old != owner)
-			return (EINVAL);
+		return (0);
+	}
+	blocked2 = NULL;
+	if ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL) {
+		mtx_lock_spin(&sched_lock);
+		blocked->td_flags |= TDF_UMTXWAKEUP;
+		mtx_unlock_spin(&sched_lock);
+		blocked2 = TAILQ_NEXT(blocked, td_umtx);
+	}
+	UMTX_UNLOCK(td, umtx);
 
+	/*
+	 * If there is second thread waiting on umtx, set contested bit,
+	 * if they are resumed before we reach here, it is harmless,
+	 * just a bit nonefficient.
+	 */
+	if (blocked2 != NULL) {
+		owner = UMTX_UNOWNED;
+		for (;;) {
+			old = casuptr((intptr_t *)&umtx->u_owner, owner,
+				    owner | UMTX_CONTESTED);
+			if (old == -1)
+				return (EFAULT);
+			if (old == owner)
+				break;
+			owner = old;
+		}
 		/*
-		 * Recheck the umtx queue to make sure another thread
-		 * didn't put itself on it after it was unlocked.
+		 * Another thread locked the umtx before us, so don't bother
+		 * to wake more threads, that thread will do it when it unlocks
+		 * the umtx.
 		 */
-		UMTX_LOCK(td, umtx);
-		uq = umtx_lookup(td, umtx);
-		if (uq != NULL &&
-		    ((blocked2 = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
-		    TAILQ_NEXT(blocked2, td_umtx) != NULL)) {
-			if (blocked == NULL) {
-				mtx_lock_spin(&sched_lock);
-				blocked2->td_flags |= TDF_UMTXWAKEUP;
-				mtx_unlock_spin(&sched_lock);
-				blocked = blocked2;
-			}
-			UMTX_UNLOCK(td, umtx);
-			old = casuptr((intptr_t *)&umtx->u_owner,
-			    UMTX_UNOWNED, UMTX_CONTESTED);
-		} else {
-			UMTX_UNLOCK(td, umtx);
-		}
-	} else {
-		if (blocked != NULL) {
-			mtx_lock_spin(&sched_lock);
-			blocked->td_flags |= TDF_UMTXWAKEUP;
-			mtx_unlock_spin(&sched_lock);
-		}
-		UMTX_UNLOCK(td, umtx);
-		old = casuptr((intptr_t *)&umtx->u_owner,
-		    owner, UMTX_CONTESTED);
-		if (old != -1 && old != owner)
-			return (EINVAL);
+		if ((owner & ~UMTX_CONTESTED) != 0)
+			return (0);
 	}
 
-	if (old == -1)
-		return (EFAULT);
-
 	/*
 	 * If there is a thread waiting on the umtx, wake it up.
 	 */
 	if (blocked != NULL)
 		wakeup(blocked);
+
 	return (0);
 }


More information about the p4-projects mailing list