svn commit: r327477 - stable/11/sys/kern

Mateusz Guzik mjg at FreeBSD.org
Mon Jan 1 23:45:10 UTC 2018


Author: mjg
Date: Mon Jan  1 23:45:09 2018
New Revision: 327477
URL: https://svnweb.freebsd.org/changeset/base/327477

Log:
  MFC r327394,r327395:
  
      mtx: pre-read the lock value in thread_lock_flags_
  
      Since this function is effectively slow path, if we get here the lock is most
      likely already taken in which case it is cheaper to not blindly attempt the
      atomic op.
  
      While here move hwpmc probe out of the loop to match other primitives.
  
  =============
  
      mtx: deduplicate indefinite wait check in spinlocks and thread lock

Modified:
  stable/11/sys/kern/kern_mutex.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/kern/kern_mutex.c
==============================================================================
--- stable/11/sys/kern/kern_mutex.c	Mon Jan  1 22:33:57 2018	(r327476)
+++ stable/11/sys/kern/kern_mutex.c	Mon Jan  1 23:45:09 2018	(r327477)
@@ -168,6 +168,8 @@ LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
 struct mtx blocked_lock;
 struct mtx Giant;
 
+static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
+
 void
 assert_mtx(const struct lock_object *lock, int what)
 {
@@ -672,25 +674,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
 #endif
 }
 
-static void
-_mtx_lock_spin_failed(struct mtx *m)
-{
-	struct thread *td;
-
-	td = mtx_owner(m);
-
-	/* If the mutex is unlocked, try again. */
-	if (td == NULL)
-		return;
-
-	printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
-	    m, m->lock_object.lo_name, td, td->td_tid);
-#ifdef WITNESS
-	witness_display_spinlock(&m->lock_object, td, printf);
-#endif
-	panic("spin lock held too long");
-}
-
 #ifdef SMP
 /*
  * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
@@ -762,16 +745,10 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t
 		/* Give interrupts a chance while we spin. */
 		spinlock_exit();
 		do {
-			if (lda.spin_cnt < 10000000) {
+			if (__predict_true(lda.spin_cnt < 10000000)) {
 				lock_delay(&lda);
 			} else {
-				lda.spin_cnt++;
-				if (lda.spin_cnt < 60000000 || kdb_active ||
-				    panicstr != NULL)
-					DELAY(1);
-				else
-					_mtx_lock_spin_failed(m);
-				cpu_spinwait();
+				_mtx_lock_indefinite_check(m, &lda);
 			}
 			v = MTX_READ_VALUE(m);
 		} while (v != MTX_UNOWNED);
@@ -897,6 +874,10 @@ thread_lock_flags_(struct thread *td, int opts, const 
 
 	lock_delay_arg_init(&lda, &mtx_spin_delay);
 
+#ifdef HWPMC_HOOKS
+	PMC_SOFT_CALL( , , lock, failed);
+#endif
+
 #ifdef LOCK_PROFILING
 	doing_lockprof = 1;
 #elif defined(KDTRACE_HOOKS)
@@ -906,37 +887,29 @@ thread_lock_flags_(struct thread *td, int opts, const 
 #endif
 	for (;;) {
 retry:
-		v = MTX_UNOWNED;
 		spinlock_enter();
 		m = td->td_lock;
 		thread_lock_validate(m, opts, file, line);
+		v = MTX_READ_VALUE(m);
 		for (;;) {
-			if (_mtx_obtain_lock_fetch(m, &v, tid))
-				break;
-			if (v == MTX_UNOWNED)
+			if (v == MTX_UNOWNED) {
+				if (_mtx_obtain_lock_fetch(m, &v, tid))
+					break;
 				continue;
+			}
 			if (v == tid) {
 				m->mtx_recurse++;
 				break;
 			}
-#ifdef HWPMC_HOOKS
-			PMC_SOFT_CALL( , , lock, failed);
-#endif
 			lock_profile_obtain_lock_failed(&m->lock_object,
 			    &contested, &waittime);
 			/* Give interrupts a chance while we spin. */
 			spinlock_exit();
 			do {
-				if (lda.spin_cnt < 10000000) {
+				if (__predict_true(lda.spin_cnt < 10000000)) {
 					lock_delay(&lda);
 				} else {
-					lda.spin_cnt++;
-					if (lda.spin_cnt < 60000000 ||
-					    kdb_active || panicstr != NULL)
-						DELAY(1);
-					else
-						_mtx_lock_spin_failed(m);
-					cpu_spinwait();
+					_mtx_lock_indefinite_check(m, &lda);
 				}
 				if (m != td->td_lock)
 					goto retry;
@@ -1225,6 +1198,31 @@ mutex_init(void)
 	mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
 	mtx_lock(&Giant);
+}
+
+static void __noinline
+_mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
+{
+	struct thread *td;
+
+	ldap->spin_cnt++;
+	if (ldap->spin_cnt < 60000000 || kdb_active || panicstr != NULL)
+		DELAY(1);
+	else {
+		td = mtx_owner(m);
+
+		/* If the mutex is unlocked, try again. */
+		if (td == NULL)
+			return;
+
+		printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
+		    m, m->lock_object.lo_name, td, td->td_tid);
+#ifdef WITNESS
+		witness_display_spinlock(&m->lock_object, td, printf);
+#endif
+		panic("spin lock held too long");
+	}
+	cpu_spinwait();
 }
 
 #ifdef DDB


More information about the svn-src-all mailing list