svn commit: r210939 - in head/sys: amd64/amd64 amd64/include i386/i386 i386/include i386/xen ia64/ia64 ia64/include kern mips/include mips/mips powerpc/include powerpc/powerpc sparc64/include sun4v...

John Baldwin jhb at FreeBSD.org
Fri Aug 6 15:37:00 UTC 2010


Author: jhb
Date: Fri Aug  6 15:36:59 2010
New Revision: 210939
URL: http://svn.freebsd.org/changeset/base/210939

Log:
  Add a new ipi_cpu() function to the MI IPI API that can be used to send an
  IPI to a specific CPU by its cpuid.  Replace calls to ipi_selected() that
  constructed a mask for a single CPU with calls to ipi_cpu() instead.  This
  will matter more in the future when we transition from cpumask_t to
  cpuset_t for CPU masks in which case building a CPU mask is more expensive.
  
  Submitted by:	peter, sbruno
  Reviewed by:	rookie
  Obtained from:	Yahoo! (x86)
  MFC after:	1 month

Modified:
  head/sys/amd64/amd64/mp_machdep.c
  head/sys/amd64/include/smp.h
  head/sys/i386/i386/mp_machdep.c
  head/sys/i386/include/smp.h
  head/sys/i386/xen/mp_machdep.c
  head/sys/ia64/ia64/mp_machdep.c
  head/sys/ia64/include/smp.h
  head/sys/kern/sched_4bsd.c
  head/sys/kern/sched_ule.c
  head/sys/kern/subr_smp.c
  head/sys/mips/include/smp.h
  head/sys/mips/mips/mp_machdep.c
  head/sys/powerpc/include/smp.h
  head/sys/powerpc/powerpc/mp_machdep.c
  head/sys/sparc64/include/smp.h
  head/sys/sun4v/include/smp.h
  head/sys/sun4v/sun4v/mp_machdep.c

Modified: head/sys/amd64/amd64/mp_machdep.c
==============================================================================
--- head/sys/amd64/amd64/mp_machdep.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/amd64/amd64/mp_machdep.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -1239,15 +1239,51 @@ ipi_selected(cpumask_t cpus, u_int ipi)
 			do {
 				old_pending = cpu_ipi_pending[cpu];
 				new_pending = old_pending | bitmap;
-			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));	
-
+			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+			    old_pending, new_pending));	
 			if (old_pending)
 				continue;
 		}
-
 		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
 	}
+}
+
+/*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+	u_int bitmap = 0;
+	u_int old_pending;
+	u_int new_pending;
+
+	if (IPI_IS_BITMAPED(ipi)) { 
+		bitmap = 1 << ipi;
+		ipi = IPI_BITMAP_VECTOR;
+	}
 
+	/*
+	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
+	 * of help in order to understand what is the source.
+	 * Set the mask of receiving CPUs for this purpose.
+	 */
+	if (ipi == IPI_STOP_HARD)
+		atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+
+	CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+	KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
+
+	if (bitmap) {
+		do {
+			old_pending = cpu_ipi_pending[cpu];
+			new_pending = old_pending | bitmap;
+		} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+		    old_pending, new_pending));	
+		if (old_pending)
+			return;
+	}
+	lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
 }
 
 /*

Modified: head/sys/amd64/include/smp.h
==============================================================================
--- head/sys/amd64/include/smp.h	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/amd64/include/smp.h	Fri Aug  6 15:36:59 2010	(r210939)
@@ -60,10 +60,11 @@ void	cpu_add(u_int apic_id, char boot_cp
 void	cpustop_handler(void);
 void	cpususpend_handler(void);
 void	init_secondary(void);
-int	ipi_nmi_handler(void);
-void	ipi_selected(cpumask_t cpus, u_int ipi);
 void	ipi_all_but_self(u_int ipi);
 void 	ipi_bitmap_handler(struct trapframe frame);
+void	ipi_cpu(int cpu, u_int ipi);
+int	ipi_nmi_handler(void);
+void	ipi_selected(cpumask_t cpus, u_int ipi);
 u_int	mp_bootaddress(u_int);
 int	mp_grab_cpu_hlt(void);
 void	smp_cache_flush(void);

Modified: head/sys/i386/i386/mp_machdep.c
==============================================================================
--- head/sys/i386/i386/mp_machdep.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/i386/i386/mp_machdep.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -1327,15 +1327,51 @@ ipi_selected(cpumask_t cpus, u_int ipi)
 			do {
 				old_pending = cpu_ipi_pending[cpu];
 				new_pending = old_pending | bitmap;
-			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));	
-
+			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+			    old_pending, new_pending));	
 			if (old_pending)
 				continue;
 		}
-
 		lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
 	}
+}
+
+/*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+	u_int bitmap = 0;
+	u_int old_pending;
+	u_int new_pending;
+
+	if (IPI_IS_BITMAPED(ipi)) { 
+		bitmap = 1 << ipi;
+		ipi = IPI_BITMAP_VECTOR;
+	}
 
+	/*
+	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
+	 * of help in order to understand what is the source.
+	 * Set the mask of receiving CPUs for this purpose.
+	 */
+	if (ipi == IPI_STOP_HARD)
+		atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+
+	CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+	KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
+
+	if (bitmap) {
+		do {
+			old_pending = cpu_ipi_pending[cpu];
+			new_pending = old_pending | bitmap;
+		} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+		    old_pending, new_pending));	
+		if (old_pending)
+			return;
+	}
+	lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
 }
 
 /*

Modified: head/sys/i386/include/smp.h
==============================================================================
--- head/sys/i386/include/smp.h	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/i386/include/smp.h	Fri Aug  6 15:36:59 2010	(r210939)
@@ -60,12 +60,13 @@ inthand_t
 void	cpu_add(u_int apic_id, char boot_cpu);
 void	cpustop_handler(void);
 void	init_secondary(void);
-int	ipi_nmi_handler(void);
-void	ipi_selected(cpumask_t cpus, u_int ipi);
 void	ipi_all_but_self(u_int ipi);
 #ifndef XEN
 void 	ipi_bitmap_handler(struct trapframe frame);
 #endif
+void	ipi_cpu(int cpu, u_int ipi);
+int	ipi_nmi_handler(void);
+void	ipi_selected(cpumask_t cpus, u_int ipi);
 u_int	mp_bootaddress(u_int);
 int	mp_grab_cpu_hlt(void);
 void	smp_cache_flush(void);

Modified: head/sys/i386/xen/mp_machdep.c
==============================================================================
--- head/sys/i386/xen/mp_machdep.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/i386/xen/mp_machdep.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -1121,19 +1121,14 @@ ipi_selected(cpumask_t cpus, u_int ipi)
 		cpu--;
 		cpus &= ~(1 << cpu);
 
-		KASSERT(cpu_apic_ids[cpu] != -1,
-		    ("IPI to non-existent CPU %d", cpu));
-
 		if (bitmap) {
 			do {
 				old_pending = cpu_ipi_pending[cpu];
 				new_pending = old_pending | bitmap;
-			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));	
-
+			} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+			    old_pending, new_pending));	
 			if (!old_pending)
 				ipi_pcpu(cpu, RESCHEDULE_VECTOR);
-			continue;
-			
 		} else {
 			KASSERT(call_data != NULL, ("call_data not set"));
 			ipi_pcpu(cpu, CALL_FUNCTION_VECTOR);
@@ -1142,6 +1137,45 @@ ipi_selected(cpumask_t cpus, u_int ipi)
 }
 
 /*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+	u_int bitmap = 0;
+	u_int old_pending;
+	u_int new_pending;
+	
+	if (IPI_IS_BITMAPED(ipi)) { 
+		bitmap = 1 << ipi;
+		ipi = IPI_BITMAP_VECTOR;
+	} 
+
+	/*
+	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
+	 * of help in order to understand what is the source.
+	 * Set the mask of receiving CPUs for this purpose.
+	 */
+	if (ipi == IPI_STOP_HARD)
+		atomic_set_int(&ipi_nmi_pending, 1 << cpu);
+
+	CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
+
+	if (bitmap) {
+		do {
+			old_pending = cpu_ipi_pending[cpu];
+			new_pending = old_pending | bitmap;
+		} while  (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
+		    old_pending, new_pending));	
+		if (!old_pending)
+			ipi_pcpu(cpu, RESCHEDULE_VECTOR);
+	} else {
+		KASSERT(call_data != NULL, ("call_data not set"));
+		ipi_pcpu(cpu, CALL_FUNCTION_VECTOR);
+	}
+}
+
+/*
  * send an IPI to all CPUs EXCEPT myself
  */
 void

Modified: head/sys/ia64/ia64/mp_machdep.c
==============================================================================
--- head/sys/ia64/ia64/mp_machdep.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/ia64/ia64/mp_machdep.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -408,6 +408,16 @@ ipi_selected(cpumask_t cpus, int ipi)
 }
 
 /*
+ * send an IPI to a specific CPU.
+ */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	ipi_send(cpuid_to_pcpu[cpu], ipi);
+}
+
+/*
  * send an IPI to all CPUs EXCEPT myself.
  */
 void

Modified: head/sys/ia64/include/smp.h
==============================================================================
--- head/sys/ia64/include/smp.h	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/ia64/include/smp.h	Fri Aug  6 15:36:59 2010	(r210939)
@@ -25,6 +25,7 @@ extern int ia64_ipi_stop;
 extern int ia64_ipi_wakeup;
 
 void	ipi_all_but_self(int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
 void	ipi_selected(cpumask_t cpus, int ipi);
 void	ipi_send(struct pcpu *, int ipi);
 

Modified: head/sys/kern/sched_4bsd.c
==============================================================================
--- head/sys/kern/sched_4bsd.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/kern/sched_4bsd.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -1154,7 +1154,7 @@ kick_other_cpu(int pri, int cpuid)
 	pcpu = pcpu_find(cpuid);
 	if (idle_cpus_mask & pcpu->pc_cpumask) {
 		forward_wakeups_delivered++;
-		ipi_selected(pcpu->pc_cpumask, IPI_AST);
+		ipi_cpu(cpuid, IPI_AST);
 		return;
 	}
 
@@ -1167,13 +1167,13 @@ kick_other_cpu(int pri, int cpuid)
 	if (pri <= PRI_MAX_ITHD)
 #endif /* ! FULL_PREEMPTION */
 	{
-		ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
+		ipi_cpu(cpuid, IPI_PREEMPT);
 		return;
 	}
 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
 
 	pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
-	ipi_selected(pcpu->pc_cpumask, IPI_AST);
+	ipi_cpu(cpuid, IPI_AST);
 	return;
 }
 #endif /* SMP */
@@ -1666,7 +1666,7 @@ sched_affinity(struct thread *td)
 
 		td->td_flags |= TDF_NEEDRESCHED;
 		if (td != curthread)
-			ipi_selected(1 << cpu, IPI_AST);
+			ipi_cpu(cpu, IPI_AST);
 		break;
 	default:
 		break;

Modified: head/sys/kern/sched_ule.c
==============================================================================
--- head/sys/kern/sched_ule.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/kern/sched_ule.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -851,7 +851,7 @@ sched_balance_pair(struct tdq *high, str
 		 * IPI the target cpu to force it to reschedule with the new
 		 * workload.
 		 */
-		ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
+		ipi_cpu(TDQ_ID(low), IPI_PREEMPT);
 	}
 	tdq_unlock_pair(high, low);
 	return (moved);
@@ -974,7 +974,7 @@ tdq_notify(struct tdq *tdq, struct threa
 			return;
 	}
 	tdq->tdq_ipipending = 1;
-	ipi_selected(1 << cpu, IPI_PREEMPT);
+	ipi_cpu(cpu, IPI_PREEMPT);
 }
 
 /*
@@ -2411,7 +2411,7 @@ sched_affinity(struct thread *td)
 	cpu = ts->ts_cpu;
 	ts->ts_cpu = sched_pickcpu(td, 0);
 	if (cpu != PCPU_GET(cpuid))
-		ipi_selected(1 << cpu, IPI_PREEMPT);
+		ipi_cpu(cpu, IPI_PREEMPT);
 #endif
 }
 

Modified: head/sys/kern/subr_smp.c
==============================================================================
--- head/sys/kern/subr_smp.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/kern/subr_smp.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -181,7 +181,7 @@ forward_signal(struct thread *td)
 	id = td->td_oncpu;
 	if (id == NOCPU)
 		return;
-	ipi_selected(1 << id, IPI_AST);
+	ipi_cpu(id, IPI_AST);
 }
 
 /*

Modified: head/sys/mips/include/smp.h
==============================================================================
--- head/sys/mips/include/smp.h	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/mips/include/smp.h	Fri Aug  6 15:36:59 2010	(r210939)
@@ -33,6 +33,7 @@
 #ifndef LOCORE
 
 void	ipi_all_but_self(int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
 void	ipi_selected(cpumask_t cpus, int ipi);
 void	smp_init_secondary(u_int32_t cpuid);
 void	mpentry(void);

Modified: head/sys/mips/mips/mp_machdep.c
==============================================================================
--- head/sys/mips/mips/mp_machdep.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/mips/mips/mp_machdep.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -92,6 +92,15 @@ ipi_selected(cpumask_t cpus, int ipi)
 	}
 }
 
+/* Send an IPI to a specific CPU. */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
+	ipi_send(cpuid_to_pcpu[cpu], ipi);
+}
+
 /*
  * Handle an IPI sent to this processor.
  */

Modified: head/sys/powerpc/include/smp.h
==============================================================================
--- head/sys/powerpc/include/smp.h	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/powerpc/include/smp.h	Fri Aug  6 15:36:59 2010	(r210939)
@@ -40,6 +40,7 @@
 #ifndef LOCORE
 
 void	ipi_all_but_self(int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
 void	ipi_selected(cpumask_t cpus, int ipi);
 
 struct cpuref {

Modified: head/sys/powerpc/powerpc/mp_machdep.c
==============================================================================
--- head/sys/powerpc/powerpc/mp_machdep.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/powerpc/powerpc/mp_machdep.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -336,6 +336,14 @@ ipi_selected(cpumask_t cpus, int ipi)
 	}
 }
 
+/* Send an IPI to a specific CPU. */
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	ipi_send(cpuid_to_pcpu[cpu], ipi);
+}
+
 /* Send an IPI to all CPUs EXCEPT myself. */
 void
 ipi_all_but_self(int ipi)

Modified: head/sys/sparc64/include/smp.h
==============================================================================
--- head/sys/sparc64/include/smp.h	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/sparc64/include/smp.h	Fri Aug  6 15:36:59 2010	(r210939)
@@ -135,6 +135,17 @@ ipi_selected(u_int cpus, u_int ipi)
 	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
 }
 
+static __inline void
+ipi_cpu(int cpu, u_int ipi)
+{
+
+	/*
+	 * XXX: Not ideal, but would require more work to add a cpu_ipi_cpu
+	 * function pointer.
+	 */
+	cpu_ipi_selected(1 << cpu, 0, (u_long)tl_ipi_level, ipi);
+}
+
 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
 
 static __inline void *

Modified: head/sys/sun4v/include/smp.h
==============================================================================
--- head/sys/sun4v/include/smp.h	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/sun4v/include/smp.h	Fri Aug  6 15:36:59 2010	(r210939)
@@ -85,8 +85,9 @@ void cpu_ipi_preempt(struct trapframe *t
 void cpu_ipi_hardclock(struct trapframe *tf);
 void cpu_ipi_statclock(struct trapframe *tf);
 
-void	ipi_selected(u_int cpus, u_int ipi);
 void	ipi_all_but_self(u_int ipi);
+void	ipi_cpu(int cpu, u_int ipi);
+void	ipi_selected(u_int cpus, u_int ipi);
 
 vm_offset_t mp_tramp_alloc(void);
 void        mp_set_tsb_desc_ra(vm_paddr_t);

Modified: head/sys/sun4v/sun4v/mp_machdep.c
==============================================================================
--- head/sys/sun4v/sun4v/mp_machdep.c	Fri Aug  6 15:24:37 2010	(r210938)
+++ head/sys/sun4v/sun4v/mp_machdep.c	Fri Aug  6 15:36:59 2010	(r210939)
@@ -535,7 +535,6 @@ retry:
 	}
 }
 
-
 void
 ipi_selected(u_int icpus, u_int ipi)
 {
@@ -550,7 +549,6 @@ ipi_selected(u_int icpus, u_int ipi)
 	 * 4) handling 4-way threading vs 2-way threading should happen here
 	 *    and not in forward wakeup
 	 */
-	
 	cpulist = PCPU_GET(cpulist);
 	cpus = (icpus & ~PCPU_GET(cpumask));
 	
@@ -562,8 +560,32 @@ ipi_selected(u_int icpus, u_int ipi)
 		cpu_count++;
 	}
 
-	cpu_ipi_selected(cpu_count, cpulist, (u_long)tl_ipi_level, ipi, 0, &ackmask);
-	
+	cpu_ipi_selected(cpu_count, cpulist, (u_long)tl_ipi_level, ipi, 0,
+	    &ackmask);
+}
+
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+	int cpu_count;
+	uint16_t *cpulist;
+	uint64_t ackmask;
+
+	/* 
+	 * 
+	 * 3) forward_wakeup appears to abuse ASTs
+	 * 4) handling 4-way threading vs 2-way threading should happen here
+	 *    and not in forward wakeup
+	 */
+	cpulist = PCPU_GET(cpulist);
+	if (PCPU_GET(cpumask) & (1 << cpu))
+		cpu_count = 0;
+	else {
+		cpulist[0] = (uint16_t)cpu;
+		cpu_count = 1;
+	}
+	cpu_ipi_selected(cpu_count, cpulist, (u_long)tl_ipi_level, ipi, 0,
+	    &ackmask);
 }
 
 void


More information about the svn-src-all mailing list