svn commit: r357355 - in head/sys: kern sys

Jeff Roberson jeff at FreeBSD.org
Fri Jan 31 22:21:17 UTC 2020


Author: jeff
Date: Fri Jan 31 22:21:15 2020
New Revision: 357355
URL: https://svnweb.freebsd.org/changeset/base/357355

Log:
  Add two missing fences with comments describing them.  These were found by
  inspection and after a lengthy discussion with jhb and kib.  They have not
  produced test failures.
  
  Don't pointer chase through cpu0's smr.  Use cpu correct smr even when not
  in a critical section to reduce the likelihood of false sharing.

Modified:
  head/sys/kern/subr_smr.c
  head/sys/sys/smr.h

Modified: head/sys/kern/subr_smr.c
==============================================================================
--- head/sys/kern/subr_smr.c	Fri Jan 31 21:20:22 2020	(r357354)
+++ head/sys/kern/subr_smr.c	Fri Jan 31 22:21:15 2020	(r357355)
@@ -195,7 +195,7 @@ smr_advance(smr_t smr)
 	 * odd and an observed value of 0 in a particular CPU means
 	 * it is not currently in a read section.
 	 */
-	s = smr->c_shared;
+	s = zpcpu_get(smr)->c_shared;
 	goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR;
 
 	/*
@@ -242,16 +242,21 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
 	 */
 	success = true;
 	critical_enter();
-	s = smr->c_shared;
+	s = zpcpu_get(smr)->c_shared;
 
 	/*
 	 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not
 	 * observe an updated read sequence that is larger than write.
 	 */
 	s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
-	s_wr_seq = smr_current(smr);
 
 	/*
+	 * wr_seq must be loaded prior to any c_seq value so that a stale
+	 * c_seq can only reference time after this wr_seq.
+	 */
+	s_wr_seq = atomic_load_acq_int(&s->s_wr_seq);
+
+	/*
 	 * Detect whether the goal is valid and has already been observed.
 	 *
 	 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for
@@ -335,6 +340,12 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
 
 out:
 	critical_exit();
+
+	/*
+	 * Serialize with smr_advance()/smr_exit().  The caller is now free
+	 * to modify memory as expected.
+	 */
+	atomic_thread_fence_acq();
 
 	return (success);
 }

Modified: head/sys/sys/smr.h
==============================================================================
--- head/sys/sys/smr.h	Fri Jan 31 21:20:22 2020	(r357354)
+++ head/sys/sys/smr.h	Fri Jan 31 22:21:15 2020	(r357355)
@@ -70,10 +70,17 @@ struct smr {
  * Return the current write sequence number.
  */
 static inline smr_seq_t
+smr_shared_current(smr_shared_t s)
+{
+
+	return (atomic_load_int(&s->s_wr_seq));
+}
+
+static inline smr_seq_t
 smr_current(smr_t smr)
 {
 
-	return (atomic_load_int(&smr->c_shared->s_wr_seq));
+	return (smr_shared_current(zpcpu_get(smr)->c_shared));
 }
 
 /*
@@ -106,7 +113,7 @@ smr_enter(smr_t smr)
 	 * is detected and handled there.
 	 */
 	/* This is an add because we do not have atomic_store_acq_int */
-	atomic_add_acq_int(&smr->c_seq, smr_current(smr));
+	atomic_add_acq_int(&smr->c_seq, smr_shared_current(smr->c_shared));
 }
 
 /*


More information about the svn-src-head mailing list