PERFORCE change 95513 for review

John Baldwin jhb at FreeBSD.org
Tue Apr 18 18:51:42 UTC 2006


http://perforce.freebsd.org/chv.cgi?CH=95513

Change 95513 by jhb at jhb_slimer on 2006/04/18 18:50:52

	IFC @95511.

Affected files ...

.. //depot/projects/smpng/sys/kern/kern_rwlock.c#5 integrate
.. //depot/projects/smpng/sys/kern/subr_turnstile.c#26 integrate
.. //depot/projects/smpng/sys/sys/turnstile.h#9 integrate

Differences ...

==== //depot/projects/smpng/sys/kern/kern_rwlock.c#5 (text+ko) ====

@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_rwlock.c,v 1.6 2006/04/17 21:11:01 jhb Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_rwlock.c,v 1.7 2006/04/18 18:27:54 jhb Exp $");
 
 #include "opt_ddb.h"
 
@@ -135,6 +135,7 @@
 void
 _rw_rlock(struct rwlock *rw, const char *file, int line)
 {
+	volatile struct thread *owner;
 	uintptr_t x;
 
 	KASSERT(rw_wowner(rw) != curthread,
@@ -182,6 +183,7 @@
 					    (void *)(x + RW_ONE_READER));
 				break;
 			}
+			cpu_spinwait();
 			continue;
 		}
 
@@ -200,6 +202,7 @@
 		x = rw->rw_lock;
 		if (x & RW_LOCK_READ) {
 			turnstile_release(&rw->rw_object);
+			cpu_spinwait();
 			continue;
 		}
 
@@ -221,7 +224,26 @@
 				    __func__, rw);
 		}
 
+#ifdef SMP
 		/*
+		 * If the owner is running on another CPU, spin until
+		 * the owner stops running or the state of the lock
+		 * changes.
+		 */
+		owner = (struct thread *)RW_OWNER(x);
+		if (TD_IS_RUNNING(owner)) {
+			turnstile_release(&rw->rw_object);
+			if (LOCK_LOG_TEST(&rw->rw_object, 0))
+				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
+				    __func__, rw, owner);
+			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
+			    TD_IS_RUNNING(owner))
+				cpu_spinwait();
+			continue;
+		}
+#endif
+
+		/*
 		 * We were unable to acquire the lock and the read waiters
 		 * flag is set, so we must block on the turnstile.
 		 */
@@ -351,6 +373,7 @@
 		 * release the lock.
 		 */
 		ts = turnstile_lookup(&rw->rw_object);
+		MPASS(ts != NULL);
 		turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
 		turnstile_unpend(ts, TS_SHARED_LOCK);
 		break;
@@ -365,6 +388,7 @@
 void
 _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
 {
+	volatile struct thread *owner;
 	uintptr_t v;
 
 	if (LOCK_LOG_TEST(&rw->rw_object, 0))
@@ -426,7 +450,24 @@
 				    __func__, rw);
 		}
 
-		/* XXX: Adaptively spin if current wlock owner on another CPU? */
+#ifdef SMP
+		/*
+		 * If the lock is write locked and the owner is
+		 * running on another CPU, spin until the owner stops
+		 * running or the state of the lock changes.
+		 */
+		owner = (struct thread *)RW_OWNER(v);
+		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
+			turnstile_release(&rw->rw_object);
+			if (LOCK_LOG_TEST(&rw->rw_object, 0))
+				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
+				    __func__, rw, owner);
+			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
+			    TD_IS_RUNNING(owner))
+				cpu_spinwait();
+			continue;
+		}
+#endif
 
 		/*
 		 * We were unable to acquire the lock and the write waiters
@@ -464,8 +505,22 @@
 	turnstile_lock(&rw->rw_object);
 	ts = turnstile_lookup(&rw->rw_object);
 
-	/* XXX: Adaptive fixup would be required here. */
+#ifdef SMP
+	/*
+	 * There might not be a turnstile for this lock if all of
+	 * the waiters are adaptively spinning.  In that case, just
+	 * reset the lock to the unlocked state and return.
+	 */
+	if (ts == NULL) {
+		atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
+		if (LOCK_LOG_TEST(&rw->rw_object, 0))
+			CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw);
+		turnstile_release(&rw->rw_object);
+		return;
+	}
+#else
 	MPASS(ts != NULL);
+#endif
 
 	/*
 	 * Use the same algo as sx locks for now.  Prefer waking up shared
@@ -482,19 +537,45 @@
 	 * above.  There is probably a potential priority inversion in
 	 * there that could be worked around either by waking both queues
 	 * of waiters or doing some complicated lock handoff gymnastics.
+	 *
+	 * Note that in the SMP case, if both flags are set, there might
+	 * not be any actual writers on the turnstile as they might all
+	 * be spinning.  In that case, we don't want to preserve the
+	 * RW_LOCK_WRITE_WAITERS flag as the turnstile is going to go
+	 * away once we wakeup all the readers.
 	 */
+	v = RW_UNLOCKED;
 	if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
 		queue = TS_SHARED_QUEUE;
-		v = RW_UNLOCKED | (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
-	} else {
+#ifdef SMP
+		if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
+		    !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
+			v |= RW_LOCK_WRITE_WAITERS;
+#else
+		v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
+#endif
+	} else
 		queue = TS_EXCLUSIVE_QUEUE;
-		v = RW_UNLOCKED;
+
+#ifdef SMP
+	/*
+	 * We have to make sure that we actually have waiters to
+	 * wakeup.  If they are all spinning, then we just need to
+	 * disown the turnstile and return.
+	 */
+	if (turnstile_empty(ts, queue)) {
+		if (LOCK_LOG_TEST(&rw->rw_object, 0))
+			CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
+		atomic_store_rel_ptr(&rw->rw_lock, v);
+		turnstile_disown(ts);
+		return;
 	}
+#endif
+
+	/* Wake up all waiters for the specific queue. */
 	if (LOCK_LOG_TEST(&rw->rw_object, 0))
 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
 		    queue == TS_SHARED_QUEUE ? "read" : "write");
-
-	/* Wake up all waiters for the specific queue. */
 	turnstile_broadcast(ts, queue);
 	atomic_store_rel_ptr(&rw->rw_lock, v);
 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);

==== //depot/projects/smpng/sys/kern/subr_turnstile.c#26 (text+ko) ====

@@ -57,7 +57,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/subr_turnstile.c,v 1.157 2006/03/29 23:24:55 jhb Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/subr_turnstile.c,v 1.158 2006/04/18 18:16:53 jhb Exp $");
 
 #include "opt_ddb.h"
 #include "opt_turnstile_profiling.h"
@@ -879,6 +879,56 @@
 }
 
 /*
+ * Give up ownership of a turnstile.  This must be called with the
+ * turnstile chain locked.
+ */
+void
+turnstile_disown(struct turnstile *ts)
+{
+	struct turnstile_chain *tc;
+	struct thread *td;
+	u_char cp, pri;
+
+	MPASS(ts != NULL);
+	MPASS(ts->ts_owner == curthread);
+	tc = TC_LOOKUP(ts->ts_lockobj);
+	mtx_assert(&tc->tc_lock, MA_OWNED);
+	MPASS(TAILQ_EMPTY(&ts->ts_pending));
+	MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
+	    !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
+
+	/*
+	 * Remove the turnstile from this thread's list of contested locks
+	 * since this thread doesn't own it anymore.  New threads will
+	 * not be blocking on the turnstile until it is claimed by a new
+	 * owner.
+	 */
+	mtx_lock_spin(&td_contested_lock);
+	ts->ts_owner = NULL;
+	LIST_REMOVE(ts, ts_link);
+	mtx_unlock_spin(&td_contested_lock);
+	mtx_unlock_spin(&tc->tc_lock);
+
+	/*
+	 * Adjust the priority of curthread based on other contested
+	 * locks it owns.  Don't lower the priority below the base
+	 * priority however.
+	 */
+	td = curthread;
+	pri = PRI_MAX;
+	mtx_lock_spin(&sched_lock);
+	mtx_lock_spin(&td_contested_lock);
+	LIST_FOREACH(ts, &td->td_contested, ts_link) {
+		cp = turnstile_first_waiter(ts)->td_priority;
+		if (cp < pri)
+			pri = cp;
+	}
+	mtx_unlock_spin(&td_contested_lock);
+	sched_unlend_prio(td, pri);
+	mtx_unlock_spin(&sched_lock);
+}
+
+/*
  * Return the first thread in a turnstile.
  */
 struct thread *
@@ -895,6 +945,23 @@
 	return (TAILQ_FIRST(&ts->ts_blocked[queue]));
 }
 
+/*
+ * Returns true if a sub-queue of a turnstile is empty.
+ */
+int
+turnstile_empty(struct turnstile *ts, int queue)
+{
+#ifdef INVARIANTS
+	struct turnstile_chain *tc;
+
+	MPASS(ts != NULL);
+	MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
+	tc = TC_LOOKUP(ts->ts_lockobj);
+	mtx_assert(&tc->tc_lock, MA_OWNED);
+#endif
+	return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
+}
+
 #ifdef DDB
 static void
 print_thread(struct thread *td, const char *prefix)

==== //depot/projects/smpng/sys/sys/turnstile.h#9 (text+ko) ====

@@ -26,7 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: src/sys/sys/turnstile.h,v 1.9 2006/01/27 22:42:12 jhb Exp $
+ * $FreeBSD: src/sys/sys/turnstile.h,v 1.11 2006/04/18 18:21:38 jhb Exp $
  */
 
 #ifndef _SYS_TURNSTILE_H_
@@ -34,7 +34,10 @@
 
 /*
  * Turnstile interface.  Non-sleepable locks use a turnstile for the
- * queue of threads blocked on them when they are contested.
+ * queue of threads blocked on them when they are contested.  Each
+ * turnstile contains two sub-queues: one for threads waiting for a
+ * shared, or eread, lock, and one for threads waiting for an
+ * exclusive, or write, lock.
  *
  * A thread calls turnstile_lock() to lock the turnstile chain associated
  * with a given lock.  A thread calls turnstile_wait() when the lock is
@@ -50,7 +53,10 @@
  * blocked threads.  The turnstile_signal() function returns true if the
  * turnstile became empty as a result.  After the higher level code finishes
  * releasing the lock, turnstile_unpend() must be called to wake up the
- * pending thread(s).
+ * pending thread(s) and give up ownership of the turnstile.
+ *
+ * Alternatively, if a thread wishes to relinquish ownership of a thread
+ * without waking up any waiters, it may call turnstile_disown().
  *
  * When a lock is acquired that already has at least one thread contested
  * on it, the new owner of the lock must claim ownership of the turnstile
@@ -62,8 +68,9 @@
  * released at thread destruction may not be the same turnstile that the
  * thread allocated when it was created.
  *
- * The highest priority thread blocked on a turnstile can be obtained via
- * turnstile_head().
+ * The highest priority thread blocked on a specified queue of a
+ * turnstile can be obtained via turnstile_head().  A given queue can
+ * also be queried to see if it is empty via turnstile_empty().
  */
 
 struct lock_object;
@@ -85,6 +92,8 @@
 struct turnstile *turnstile_alloc(void);
 void	turnstile_broadcast(struct turnstile *, int);
 void	turnstile_claim(struct lock_object *);
+void	turnstile_disown(struct turnstile *);
+int	turnstile_empty(struct turnstile *ts, int queue);
 void	turnstile_free(struct turnstile *);
 struct thread *turnstile_head(struct turnstile *, int);
 void	turnstile_lock(struct lock_object *);


More information about the p4-projects mailing list