PERFORCE change 146044 for review
John Baldwin
jhb at FreeBSD.org
Sun Jul 27 13:13:23 UTC 2008
http://perforce.freebsd.org/chv.cgi?CH=146044
Change 146044 by jhb at jhb_zion on 2008/07/27 13:12:26
Better affinity handing:
- Set a flag on the thread when a cpuset is applied if the set
doesn't include all CPUs. This lets us avoid invoking
sched_pickcpu() in the common case in sched_add().
- Handle threads executing on other CPUs and threads that are
on a runqueue in sched_affinity().
Affected files ...
.. //depot/projects/smpng/sys/kern/sched_4bsd.c#76 edit
Differences ...
==== //depot/projects/smpng/sys/kern/sched_4bsd.c#76 (text+ko) ====
@@ -97,6 +97,7 @@
/* flags kept in td_flags */
#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
#define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */
+#define TDF_AFFINITY TDF_SCHED2 /* Has a non-"full" CPU set. */
#define SKE_RUNQ_PCPU(ts) \
((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
@@ -1165,34 +1166,25 @@
static int
sched_pickcpu(struct thread *td)
{
- int best, cpu, fullset;
+ int best, cpu;
mtx_assert(&sched_lock, MA_OWNED);
- fullset = 1;
best = NOCPU;
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
- if (!THREAD_CAN_SCHED(td, cpu)) {
- /*
- * At least one available CPU isn't in our
- * set, so it isn't a "full" set.
- */
- fullset = 0;
+ if (!THREAD_CAN_SCHED(td, cpu))
continue;
- }
if (best == NOCPU)
best = cpu;
else if (runq_length[cpu] < runq_length[best])
best = cpu;
}
+ KASSERT(best != NOCPU, ("no valid CPUs"));
- if (fullset)
- return (NOCPU);
- else
- return (best);
+ return (best);
}
#endif
@@ -1232,25 +1224,25 @@
single_cpu = 1;
CTR3(KTR_RUNQ,
"sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
- } else if ((td)->td_flags & TDF_BOUND) {
+ } else if (td->td_flags & TDF_BOUND) {
/* Find CPU from bound runq */
KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq"));
cpu = ts->ts_runq - &runq_pcpu[0];
single_cpu = 1;
CTR3(KTR_RUNQ,
"sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
- } else {
+ } else if (td->td_flags & TDF_AFFINITY) {
/* Find a valid CPU for our cpuset */
cpu = sched_pickcpu(td);
- if (cpu == NOCPU) {
- CTR2(KTR_RUNQ,
+ ts->ts_runq = &runq_pcpu[cpu];
+ single_cpu = 1;
+ CTR3(KTR_RUNQ,
+ "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
+ } else {
+ CTR2(KTR_RUNQ,
"sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td);
- ts->ts_runq = &runq;
- } else {
- single_cpu = 1;
- CTR3(KTR_RUNQ,
- "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
- }
+ cpu = NOCPU;
+ ts->ts_runq = &runq;
}
if (single_cpu && (cpu != PCPU_GET(cpuid))) {
@@ -1577,16 +1569,56 @@
sched_affinity(struct thread *td)
{
#ifdef SMP
+ struct td_sched *ts;
+ int cpu;
+
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
- THREAD_LOCK_ASSERT(td, MA_OWNED);
+ /*
+ * Set the TDF_AFFINITY flag if there is at least one CPU this
+ * thread can't run on.
+ */
+ td->td_flags &= ~TDF_AFFINITY;
+ for (cpu = 0; cpu <= mp_maxid; cpu++) {
+ if (CPU_ABSENT(cpu))
+ continue;
+ if (!THREAD_CAN_SCHED(td, cpu)) {
+ td->td_flags |= TDF_AFFINITY;
+ break;
+ }
+ }
/*
- * See if our current CPU is in the set. If not, force a
- * context switch.
+ * If this thread can run on all CPUs, nothing else to do.
*/
- if (THREAD_CAN_SCHED(td, PCPU_GET(cpuid)))
+ if (!(td->td_flags & TDF_AFFINITY))
return;
- mi_switch(SW_VOL, NULL);
+ switch (td->td_state) {
+ case TDS_RUNQ:
+ /*
+ * If we are on a per-CPU runqueue that is in the set,
+ * then nothing needs to be done.
+ */
+ ts = td->td_sched;
+ if (ts->ts_runq != &runq &&
+ THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
+ return;
+
+ /* Put this thread on a valid per-CPU runqueue. */
+ sched_rem(td);
+ sched_add(td, SRQ_BORING);
+ break;
+ case TDS_RUNNING:
+ /*
+ * See if our current CPU is in the set. If not, force a
+ * context switch.
+ */
+ if (THREAD_CAN_SCHED(td, td->td_oncpu))
+ return;
+
+ mi_switch(SW_VOL, NULL);
+ break;
+ }
#endif
}
More information about the p4-projects
mailing list