svn commit: r324335 - in head/sys: kern sys

Mateusz Guzik mjg at FreeBSD.org
Thu Oct 5 19:18:04 UTC 2017


Author: mjg
Date: Thu Oct  5 19:18:02 2017
New Revision: 324335
URL: https://svnweb.freebsd.org/changeset/base/324335

Log:
  locks: take the number of readers into account when waiting
  
  Previous code would always spin once before checking the lock. But a lock
  with e.g. 6 readers is not going to become free in the duration of once spin
  even if they start draining immediately.
  
  Conservatively perform one for each reader.
  
  Note that the total number of allowed spins is still extremely small and is
  subject to change later.
  
  MFC after:	1 week

Modified:
  head/sys/kern/kern_rwlock.c
  head/sys/kern/kern_sx.c
  head/sys/sys/lock.h

Modified: head/sys/kern/kern_rwlock.c
==============================================================================
--- head/sys/kern/kern_rwlock.c	Thu Oct  5 19:11:25 2017	(r324334)
+++ head/sys/kern/kern_rwlock.c	Thu Oct  5 19:18:02 2017	(r324335)
@@ -414,7 +414,7 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *
 #ifdef ADAPTIVE_RWLOCKS
 	volatile struct thread *owner;
 	int spintries = 0;
-	int i;
+	int i, n;
 #endif
 #ifdef LOCK_PROFILING
 	uint64_t waittime = 0;
@@ -488,8 +488,9 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *
 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
 			    "spinning", "lockname:\"%s\"",
 			    rw->lock_object.lo_name);
-			for (i = 0; i < rowner_loops; i++) {
-				cpu_spinwait();
+			for (i = 0; i < rowner_loops; i += n) {
+				n = RW_READERS(v);
+				lock_delay_spin(n);
 				v = RW_READ_VALUE(rw);
 				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(td, v))
 					break;
@@ -830,7 +831,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, ui
 #ifdef ADAPTIVE_RWLOCKS
 	volatile struct thread *owner;
 	int spintries = 0;
-	int i;
+	int i, n;
 #endif
 	uintptr_t x;
 #ifdef LOCK_PROFILING
@@ -928,8 +929,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, ui
 			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
 			    "spinning", "lockname:\"%s\"",
 			    rw->lock_object.lo_name);
-			for (i = 0; i < rowner_loops; i++) {
-				cpu_spinwait();
+			for (i = 0; i < rowner_loops; i += n) {
+				n = RW_READERS(v);
+				lock_delay_spin(n);
 				v = RW_READ_VALUE(rw);
 				if ((v & RW_LOCK_WRITE_SPINNER) == 0)
 					break;

Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c	Thu Oct  5 19:11:25 2017	(r324334)
+++ head/sys/kern/kern_sx.c	Thu Oct  5 19:18:02 2017	(r324335)
@@ -502,7 +502,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t t
 	GIANT_DECLARE;
 #ifdef ADAPTIVE_SX
 	volatile struct thread *owner;
-	u_int i, spintries = 0;
+	u_int i, n, spintries = 0;
 #endif
 #ifdef LOCK_PROFILING
 	uint64_t waittime = 0;
@@ -600,12 +600,13 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t t
 				    "lockname:\"%s\"", sx->lock_object.lo_name);
 				GIANT_SAVE();
 				spintries++;
-				for (i = 0; i < asx_loops; i++) {
+				for (i = 0; i < asx_loops; i += n) {
 					if (LOCK_LOG_TEST(&sx->lock_object, 0))
 						CTR4(KTR_LOCK,
 				    "%s: shared spinning on %p with %u and %u",
 						    __func__, sx, spintries, i);
-					cpu_spinwait();
+					n = SX_SHARERS(x);
+					lock_delay_spin(n);
 					x = SX_READ_VALUE(sx);
 					if ((x & SX_LOCK_SHARED) == 0 ||
 					    SX_SHARERS(x) == 0)

Modified: head/sys/sys/lock.h
==============================================================================
--- head/sys/sys/lock.h	Thu Oct  5 19:11:25 2017	(r324334)
+++ head/sys/sys/lock.h	Thu Oct  5 19:18:02 2017	(r324335)
@@ -226,6 +226,13 @@ lock_delay_arg_init(struct lock_delay_arg *la, struct 
 	la->spin_cnt = 0;
 }
 
+#define lock_delay_spin(n)	do {	\
+	u_int _i;			\
+					\
+	for (_i = (n); _i > 0; _i--)	\
+		cpu_spinwait();		\
+} while (0)
+
 #define	LOCK_DELAY_SYSINIT(func) \
 	SYSINIT(func##_ld, SI_SUB_LOCK, SI_ORDER_ANY, func, NULL)
 


More information about the svn-src-head mailing list