git: b5449c92b489 - main - Use atomic_interrupt_fence() instead of bare __compiler_membar()

Konstantin Belousov kib at FreeBSD.org
Sat Feb 27 23:27:49 UTC 2021


The branch main has been updated by kib:

URL: https://cgit.FreeBSD.org/src/commit/?id=b5449c92b489445635c7962875ce73b2c9211bba

commit b5449c92b489445635c7962875ce73b2c9211bba
Author:     Konstantin Belousov <kib at FreeBSD.org>
AuthorDate: 2021-02-26 23:54:17 +0000
Commit:     Konstantin Belousov <kib at FreeBSD.org>
CommitDate: 2021-02-27 23:27:29 +0000

    Use atomic_interrupt_fence() instead of bare __compiler_membar()
    
    for the which which definitely use membar to sync with interrupt handlers.
    
    libc and rtld uses of __compiler_membar() seems to want compiler barriers
    proper.
    
    The barrier in sched_unpin_lite() after td_pinned decrement seems to be not
    needed and removed, instead of convertion.
    
    Reviewed by:    markj
    MFC after:      1 week
    Sponsored by:   The FreeBSD Foundation
    Differential Revision:  https://reviews.freebsd.org/D28956
---
 sys/kern/kern_rmlock.c | 32 +++++++++++++++-----------------
 sys/sys/kpilite.h      |  5 ++---
 sys/sys/mount.h        |  4 ++--
 sys/sys/sched.h        |  4 ++--
 sys/sys/systm.h        |  6 +++---
 5 files changed, 24 insertions(+), 27 deletions(-)

diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index 9135709d88cf..f661e209b633 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -366,7 +366,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
 	 * Check to see if the IPI granted us the lock after all.  The load of
 	 * rmp_flags must happen after the tracker is removed from the list.
 	 */
-	__compiler_membar();
+	atomic_interrupt_fence();
 	if (tracker->rmp_flags) {
 		/* Just add back tracker - we hold the lock. */
 		rm_tracker_add(pc, tracker);
@@ -448,7 +448,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
 
 	td->td_critnest++;	/* critical_enter(); */
 
-	__compiler_membar();
+	atomic_interrupt_fence();
 
 	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
 
@@ -456,7 +456,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
 
 	sched_pin();
 
-	__compiler_membar();
+	atomic_interrupt_fence();
 
 	td->td_critnest--;
 
@@ -873,17 +873,15 @@ db_show_rm(const struct lock_object *lock)
  * Concurrent writers take turns taking the lock while going off cpu. If this is
  * of concern for your usecase, this is not the right primitive.
  *
- * Neither rms_rlock nor rms_runlock use fences. Instead compiler barriers are
- * inserted to prevert reordering of generated code. Execution ordering is
- * provided with the use of an IPI handler.
+ * Neither rms_rlock nor rms_runlock use thread fences. Instead interrupt
+ * fences are inserted to ensure ordering with the code executed in the IPI
+ * handler.
  *
  * No attempt is made to track which CPUs read locked at least once,
  * consequently write locking sends IPIs to all of them. This will become a
  * problem at some point. The easiest way to lessen it is to provide a bitmap.
  */
 
-#define rms_int_membar()	__compiler_membar()
-
 #define	RMS_NOOWNER	((void *)0x1)
 #define	RMS_TRANSIENT	((void *)0x2)
 #define	RMS_FLAGMASK	0xf
@@ -1030,14 +1028,14 @@ rms_rlock(struct rmslock *rms)
 	critical_enter();
 	pcpu = rms_int_pcpu(rms);
 	rms_int_influx_enter(rms, pcpu);
-	rms_int_membar();
+	atomic_interrupt_fence();
 	if (__predict_false(rms->writers > 0)) {
 		rms_rlock_fallback(rms);
 		return;
 	}
-	rms_int_membar();
+	atomic_interrupt_fence();
 	rms_int_readers_inc(rms, pcpu);
-	rms_int_membar();
+	atomic_interrupt_fence();
 	rms_int_influx_exit(rms, pcpu);
 	critical_exit();
 }
@@ -1052,15 +1050,15 @@ rms_try_rlock(struct rmslock *rms)
 	critical_enter();
 	pcpu = rms_int_pcpu(rms);
 	rms_int_influx_enter(rms, pcpu);
-	rms_int_membar();
+	atomic_interrupt_fence();
 	if (__predict_false(rms->writers > 0)) {
 		rms_int_influx_exit(rms, pcpu);
 		critical_exit();
 		return (0);
 	}
-	rms_int_membar();
+	atomic_interrupt_fence();
 	rms_int_readers_inc(rms, pcpu);
-	rms_int_membar();
+	atomic_interrupt_fence();
 	rms_int_influx_exit(rms, pcpu);
 	critical_exit();
 	return (1);
@@ -1092,14 +1090,14 @@ rms_runlock(struct rmslock *rms)
 	critical_enter();
 	pcpu = rms_int_pcpu(rms);
 	rms_int_influx_enter(rms, pcpu);
-	rms_int_membar();
+	atomic_interrupt_fence();
 	if (__predict_false(rms->writers > 0)) {
 		rms_runlock_fallback(rms);
 		return;
 	}
-	rms_int_membar();
+	atomic_interrupt_fence();
 	rms_int_readers_dec(rms, pcpu);
-	rms_int_membar();
+	atomic_interrupt_fence();
 	rms_int_influx_exit(rms, pcpu);
 	critical_exit();
 }
diff --git a/sys/sys/kpilite.h b/sys/sys/kpilite.h
index 8742ef5cfbe8..2a7e9743f799 100644
--- a/sys/sys/kpilite.h
+++ b/sys/sys/kpilite.h
@@ -38,7 +38,7 @@ sched_pin_lite(struct thread_lite *td)
 
 	KASSERT((struct thread *)td == curthread, ("sched_pin called on non curthread"));
 	td->td_pinned++;
-	__compiler_membar();
+	atomic_interrupt_fence();
 }
 
 static __inline void
@@ -47,9 +47,8 @@ sched_unpin_lite(struct thread_lite *td)
 
 	KASSERT((struct thread *)td == curthread, ("sched_unpin called on non curthread"));
 	KASSERT(td->td_pinned > 0, ("sched_unpin called on non pinned thread"));
-	__compiler_membar();
+	atomic_interrupt_fence();
 	td->td_pinned--;
-	__compiler_membar();
 }
 #endif
 #endif
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index ca3261a22e62..630cd521fbd0 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -1091,7 +1091,7 @@ void resume_all_fs(void);
 	_mpcpu = vfs_mount_pcpu(mp);				\
 	MPASS(mpcpu->mntp_thread_in_ops == 0);			\
 	_mpcpu->mntp_thread_in_ops = 1;				\
-	__compiler_membar();					\
+	atomic_interrupt_fence();					\
 	if (__predict_false(mp->mnt_vfs_ops > 0)) {		\
 		vfs_op_thread_exit_crit(mp, _mpcpu);		\
 		_retval_crit = false;				\
@@ -1111,7 +1111,7 @@ void resume_all_fs(void);
 #define vfs_op_thread_exit_crit(mp, _mpcpu) do {		\
 	MPASS(_mpcpu == vfs_mount_pcpu(mp));			\
 	MPASS(_mpcpu->mntp_thread_in_ops == 1);			\
-	__compiler_membar();					\
+	atomic_interrupt_fence();					\
 	_mpcpu->mntp_thread_in_ops = 0;				\
 } while (0)
 
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index d734ec61266e..64651ffa9c90 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -173,13 +173,13 @@ static __inline void
 sched_pin(void)
 {
 	curthread->td_pinned++;
-	__compiler_membar();
+	atomic_interrupt_fence();
 }
 
 static __inline void
 sched_unpin(void)
 {
-	__compiler_membar();
+	atomic_interrupt_fence();
 	curthread->td_pinned--;
 }
 
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index 5de12e5bc1e5..72a10c401af9 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -284,7 +284,7 @@ critical_enter(void)
 
 	td = (struct thread_lite *)curthread;
 	td->td_critnest++;
-	__compiler_membar();
+	atomic_interrupt_fence();
 }
 
 static __inline void
@@ -295,9 +295,9 @@ critical_exit(void)
 	td = (struct thread_lite *)curthread;
 	KASSERT(td->td_critnest != 0,
 	    ("critical_exit: td_critnest == 0"));
-	__compiler_membar();
+	atomic_interrupt_fence();
 	td->td_critnest--;
-	__compiler_membar();
+	atomic_interrupt_fence();
 	if (__predict_false(td->td_owepreempt))
 		critical_exit_preempt();
 


More information about the dev-commits-src-all mailing list