svn commit: r355230 - in head/sys: kern sys

Mateusz Guzik mjg at FreeBSD.org
Sat Nov 30 17:22:11 UTC 2019


Author: mjg
Date: Sat Nov 30 17:22:10 2019
New Revision: 355230
URL: https://svnweb.freebsd.org/changeset/base/355230

Log:
  Add a way to inject fences using IPIs
  
  A variant of this facility was already used by rmlocks where IPIs would
  enforce ordering.
  
  This allows to elide fences where they are rarely needed and the cost of
  IPI (should it be necessary) is cheaper.
  
  Reviewed by:	kib, jeff (previous version)
  Sponsored by:	The FreeBSD Foundation
  Differential Revision:	https://reviews.freebsd.org/D21740

Modified:
  head/sys/kern/subr_smp.c
  head/sys/sys/smp.h

Modified: head/sys/kern/subr_smp.c
==============================================================================
--- head/sys/kern/subr_smp.c	Sat Nov 30 16:59:29 2019	(r355229)
+++ head/sys/kern/subr_smp.c	Sat Nov 30 17:22:10 2019	(r355230)
@@ -929,6 +929,66 @@ quiesce_all_cpus(const char *wmesg, int prio)
 	return quiesce_cpus(all_cpus, wmesg, prio);
 }
 
+/*
+ * Observe all CPUs not executing in critical section.
+ * We are not in one so the check for us is safe. If the found
+ * thread changes to something else we know the section was
+ * exited as well.
+ */
+void
+quiesce_all_critical(void)
+{
+	struct thread *td, *newtd;
+	struct pcpu *pcpu;
+	int cpu;
+
+	MPASS(curthread->td_critnest == 0);
+
+	CPU_FOREACH(cpu) {
+		pcpu = cpuid_to_pcpu[cpu];
+		td = pcpu->pc_curthread;
+		for (;;) {
+			if (td->td_critnest == 0)
+				break;
+			cpu_spinwait();
+			newtd = (struct thread *)
+			    atomic_load_acq_ptr((u_long *)pcpu->pc_curthread);
+			if (td != newtd)
+				break;
+		}
+	}
+}
+
+static void
+cpus_fence_seq_cst_issue(void *arg __unused)
+{
+
+	atomic_thread_fence_seq_cst();
+}
+
+/*
+ * Send an IPI forcing a sequentially consistent fence.
+ *
+ * Allows replacement of an explicitly fence with a compiler barrier.
+ * Trades speed up during normal execution for a significant slowdown when
+ * the barrier is needed.
+ */
+void
+cpus_fence_seq_cst(void)
+{
+
+#ifdef SMP
+	smp_rendezvous(
+	    smp_no_rendezvous_barrier,
+	    cpus_fence_seq_cst_issue,
+	    smp_no_rendezvous_barrier,
+	    NULL
+	);
+#else
+	cpus_fence_seq_cst_issue(NULL);
+#endif
+}
+
 /* Extra care is taken with this sysctl because the data type is volatile */
 static int
 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)

Modified: head/sys/sys/smp.h
==============================================================================
--- head/sys/sys/smp.h	Sat Nov 30 16:59:29 2019	(r355229)
+++ head/sys/sys/smp.h	Sat Nov 30 17:22:10 2019	(r355230)
@@ -264,6 +264,8 @@ extern	struct mtx smp_ipi_mtx;
 
 int	quiesce_all_cpus(const char *, int);
 int	quiesce_cpus(cpuset_t, const char *, int);
+void	quiesce_all_critical(void);
+void	cpus_fence_seq_cst(void);
 void	smp_no_rendezvous_barrier(void *);
 void	smp_rendezvous(void (*)(void *), 
 		       void (*)(void *),


More information about the svn-src-head mailing list