PERFORCE change 143145 for review
John Birrell
jb at FreeBSD.org
Mon Jun 9 02:48:06 UTC 2008
http://perforce.freebsd.org/chv.cgi?CH=143145
Change 143145 by jb at freebsd3 on 2008/06/09 02:47:36
Add supposrt to send IPIs to just one or more CPUs.
Affected files ...
.. //depot/projects/dtrace6/src/sys/kern/subr_smp.c#2 edit
.. //depot/projects/dtrace6/src/sys/sys/smp.h#2 edit
Differences ...
==== //depot/projects/dtrace6/src/sys/kern/subr_smp.c#2 (text+ko) ====
@@ -75,6 +75,9 @@
SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
+SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD, &mp_maxid, 0,
+ "Max CPU ID.");
+
SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD, &mp_maxcpus, 0,
"Max number of CPUs that the system was compiled for.");
@@ -105,11 +108,12 @@
"Forwarding of roundrobin to all other CPUs");
/* Variables needed for SMP rendezvous. */
-static void (*smp_rv_setup_func)(void *arg);
-static void (*smp_rv_action_func)(void *arg);
-static void (*smp_rv_teardown_func)(void *arg);
-static void *smp_rv_func_arg;
-static volatile int smp_rv_waiters[2];
+static volatile cpumask_t smp_rv_cpumask;
+static void (*volatile smp_rv_setup_func)(void *arg);
+static void (*volatile smp_rv_action_func)(void *arg);
+static void (*volatile smp_rv_teardown_func)(void *arg);
+static void * volatile smp_rv_func_arg;
+static volatile int smp_rv_waiters[3];
/*
* Shared mutex to restrict busywaits between smp_rendezvous() and
@@ -149,7 +153,7 @@
mp_ncpus);
cpu_mp_announce();
}
-SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL)
+SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL)
void
forward_signal(struct thread *td)
@@ -330,32 +334,57 @@
void
smp_rendezvous_action(void)
{
+ cpumask_t map = smp_rv_cpumask;
+ int i, ncpus = 0;
+ void* local_func_arg = smp_rv_func_arg;
+ void (*local_setup_func)(void*) = smp_rv_setup_func;
+ void (*local_action_func)(void*) = smp_rv_action_func;
+ void (*local_teardown_func)(void*) = smp_rv_teardown_func;
+
+ for (i = 0; i < MAXCPU; i++)
+ if (((1 << i) & map) != 0 && pcpu_find(i) != NULL)
+ ncpus++;
+ /* Ensure we have up-to-date values. */
+ atomic_add_acq_int(&smp_rv_waiters[0], 1);
+ while (smp_rv_waiters[0] < ncpus)
+ cpu_spinwait();
+
/* setup function */
- if (smp_rv_setup_func != NULL)
- smp_rv_setup_func(smp_rv_func_arg);
- /* spin on entry rendezvous */
- atomic_add_int(&smp_rv_waiters[0], 1);
- while (atomic_load_acq_int(&smp_rv_waiters[0]) < mp_ncpus)
- ; /* nothing */
+ if (local_setup_func != smp_no_rendevous_barrier) {
+ if (smp_rv_setup_func != NULL)
+ smp_rv_setup_func(smp_rv_func_arg);
+
+ /* spin on entry rendezvous */
+ atomic_add_int(&smp_rv_waiters[1], 1);
+ while (smp_rv_waiters[1] < ncpus)
+ cpu_spinwait();
+ }
+
/* action function */
- if (smp_rv_action_func != NULL)
- smp_rv_action_func(smp_rv_func_arg);
+ if (local_action_func != NULL)
+ local_action_func(local_func_arg);
+
/* spin on exit rendezvous */
- atomic_add_int(&smp_rv_waiters[1], 1);
- while (atomic_load_acq_int(&smp_rv_waiters[1]) < mp_ncpus)
- ; /* nothing */
+ atomic_add_int(&smp_rv_waiters[2], 1);
+ if (local_teardown_func == smp_no_rendevous_barrier)
+ return;
+ while (smp_rv_waiters[2] < ncpus)
+ cpu_spinwait();
+
/* teardown function */
- if (smp_rv_teardown_func != NULL)
- smp_rv_teardown_func(smp_rv_func_arg);
+ if (local_teardown_func != NULL)
+ local_teardown_func(local_func_arg);
}
void
-smp_rendezvous(void (* setup_func)(void *),
- void (* action_func)(void *),
- void (* teardown_func)(void *),
- void *arg)
+smp_rendezvous_cpus(cpumask_t map,
+ void (* setup_func)(void *),
+ void (* action_func)(void *),
+ void (* teardown_func)(void *),
+ void *arg)
{
+ int i, ncpus = 0;
if (!smp_started) {
if (setup_func != NULL)
@@ -366,27 +395,48 @@
teardown_func(arg);
return;
}
+
+ for (i = 0; i < MAXCPU; i++)
+ if (((1 << i) & map) != 0 && pcpu_find(i) != NULL)
+ ncpus++;
/* obtain rendezvous lock */
mtx_lock_spin(&smp_ipi_mtx);
/* set static function pointers */
+ smp_rv_cpumask = map & ~(1 << curcpu);
smp_rv_setup_func = setup_func;
smp_rv_action_func = action_func;
smp_rv_teardown_func = teardown_func;
smp_rv_func_arg = arg;
- smp_rv_waiters[0] = 0;
smp_rv_waiters[1] = 0;
+ smp_rv_waiters[2] = 0;
+ atomic_store_rel_int(&smp_rv_waiters[0], 0);
/* signal other processors, which will enter the IPI with interrupts off */
- ipi_all_but_self(IPI_RENDEZVOUS);
+ ipi_selected(map, IPI_RENDEZVOUS);
+
+ /* Check if the current CPU is in the map */
+ if ((map & (1 << curcpu)) != 0)
+ /* call executor function for the current CPU */
+ smp_rendezvous_action();
- /* call executor function */
- smp_rendezvous_action();
+ if (teardown_func == smp_no_rendevous_barrier)
+ while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
+ cpu_spinwait();
/* release lock */
mtx_unlock_spin(&smp_ipi_mtx);
}
+
+void
+smp_rendezvous(void (* setup_func)(void *),
+ void (* action_func)(void *),
+ void (* teardown_func)(void *),
+ void *arg)
+{
+ smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
+}
#else /* !SMP */
/*
@@ -405,9 +455,24 @@
mp_setvariables_for_up, NULL)
void
-smp_rendezvous(void (* setup_func)(void *),
- void (* action_func)(void *),
- void (* teardown_func)(void *),
+smp_rendezvous_cpus(cpumask_t map,
+ void (*setup_func)(void *),
+ void (*action_func)(void *),
+ void (*teardown_func)(void *),
+ void *arg)
+{
+ if (setup_func != NULL)
+ setup_func(arg);
+ if (action_func != NULL)
+ action_func(arg);
+ if (teardown_func != NULL)
+ teardown_func(arg);
+}
+
+void
+smp_rendezvous(void (*setup_func)(void *),
+ void (*action_func)(void *),
+ void (*teardown_func)(void *),
void *arg)
{
@@ -419,3 +484,11 @@
teardown_func(arg);
}
#endif /* SMP */
+
+void
+smp_no_rendevous_barrier(void *dummy)
+{
+#ifdef SMP
+ KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
+#endif
+}
==== //depot/projects/dtrace6/src/sys/sys/smp.h#2 (text+ko) ====
@@ -106,10 +106,16 @@
int stop_cpus_nmi(cpumask_t);
#endif
#endif /* SMP */
+void smp_no_rendevous_barrier(void *);
void smp_rendezvous(void (*)(void *),
void (*)(void *),
void (*)(void *),
void *arg);
+void smp_rendezvous_cpus(cpumask_t,
+ void (*)(void *),
+ void (*)(void *),
+ void (*)(void *),
+ void *arg);
#endif /* !LOCORE */
#endif /* _KERNEL */
#endif /* _SYS_SMP_H_ */
More information about the p4-projects
mailing list