PERFORCE change 138636 for review
Peter Wemm
peter at FreeBSD.org
Wed Mar 26 18:02:29 UTC 2008
http://perforce.freebsd.org/chv.cgi?CH=138636
Change 138636 by peter at peter_overcee on 2008/03/26 18:01:34
IFC @138629
Affected files ...
.. //depot/projects/bike_sched/sys/kern/sched_4bsd.c#23 integrate
Differences ...
==== //depot/projects/bike_sched/sys/kern/sched_4bsd.c#23 (text+ko) ====
@@ -33,9 +33,10 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/sched_4bsd.c,v 1.118 2008/03/16 10:58:05 rwatson Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/sched_4bsd.c,v 1.123 2008/03/20 05:51:16 jeff Exp $");
#include "opt_hwpmc_hooks.h"
+#include "opt_sched.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -81,15 +82,14 @@
*/
struct td_sched {
fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
- u_char ts_flags; /* (t) Flags */
int ts_cpticks; /* (j) Ticks of cpu time. */
int ts_slptime; /* (j) Seconds !RUNNING. */
struct runq *ts_runq; /* runq the thread is currently on */
};
-#define TSF_DIDRUN 0x01 /* thread actually ran. */
-#define TSF_EXIT 0x02 /* thread is being killed. */
-#define TSF_BOUND 0x04 /* stuck to one CPU */
+/* flags kept in td_flags */
+#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
+#define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */
#define SKE_RUNQ_PCPU(ts) \
((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
@@ -184,6 +184,9 @@
/* Enable forwarding of wakeups to all other cpus */
SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
+static int runq_fuzz = 1;
+SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
+
static int forward_wakeup_enabled = 1;
SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
&forward_wakeup_enabled, 0,
@@ -254,6 +257,89 @@
}
/*
+ * This function is called when a thread is about to be put on run queue
+ * because it has been made runnable or its priority has been adjusted. It
+ * determines if the new thread should be immediately preempted to. If so,
+ * it switches to it and eventually returns true. If not, it returns false
+ * so that the caller may place the thread on an appropriate run queue.
+ */
+int
+maybe_preempt(struct thread *td)
+{
+#ifdef PREEMPTION
+ struct thread *ctd;
+ int cpri, pri;
+#endif
+
+#ifdef PREEMPTION
+ /*
+ * The new thread should not preempt the current thread if any of the
+ * following conditions are true:
+ *
+ * - The kernel is in the throes of crashing (panicstr).
+ * - The current thread has a higher (numerically lower) or
+ * equivalent priority. Note that this prevents curthread from
+ * trying to preempt to itself.
+ * - It is too early in the boot for context switches (cold is set).
+ * - The current thread has an inhibitor set or is in the process of
+ * exiting. In this case, the current thread is about to switch
+ * out anyways, so there's no point in preempting. If we did,
+ * the current thread would not be properly resumed as well, so
+ * just avoid that whole landmine.
+ * - If the new thread's priority is not a realtime priority and
+ * the current thread's priority is not an idle priority and
+ * FULL_PREEMPTION is disabled.
+ *
+ * If all of these conditions are false, but the current thread is in
+ * a nested critical section, then we have to defer the preemption
+ * until we exit the critical section. Otherwise, switch immediately
+ * to the new thread.
+ */
+ ctd = curthread;
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ KASSERT((td->td_inhibitors == 0),
+ ("maybe_preempt: trying to run inhibited thread"));
+ pri = td->td_priority;
+ cpri = ctd->td_priority;
+ if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
+ TD_IS_INHIBITED(ctd))
+ return (0);
+#ifndef FULL_PREEMPTION
+ if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
+ return (0);
+#endif
+
+ if (ctd->td_critnest > 1) {
+ CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
+ ctd->td_critnest);
+ ctd->td_owepreempt = 1;
+ return (0);
+ }
+ /*
+ * Thread is runnable but not yet put on system run queue.
+ */
+ MPASS(ctd->td_lock == td->td_lock);
+ MPASS(TD_ON_RUNQ(td));
+ TD_SET_RUNNING(td);
+ CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
+ td->td_proc->p_pid, td->td_name);
+ SCHED_STAT_INC(switch_preempt);
+ mi_switch(SW_INVOL|SW_PREEMPT, td);
+ /*
+ * td's lock pointer may have changed. We have to return with it
+ * locked.
+ */
+ spinlock_enter();
+ thread_unlock(ctd);
+ thread_lock(td);
+ spinlock_exit();
+ return (1);
+#else
+ return (0);
+#endif
+}
+
+/*
* Constants for digital decay and forget:
* 90% of (td_estcpu) usage in 5 * loadav time
* 95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
@@ -356,7 +442,7 @@
realstathz = stathz ? stathz : hz;
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
- PROC_SLOCK(p);
+ PROC_LOCK(p);
FOREACH_THREAD_IN_PROC(p, td) {
awake = 0;
thread_lock(td);
@@ -372,13 +458,13 @@
*/
if (TD_ON_RUNQ(td)) {
awake = 1;
- ts->ts_flags &= ~TSF_DIDRUN;
+ td->td_flags &= ~TDF_DIDRUN;
} else if (TD_IS_RUNNING(td)) {
awake = 1;
- /* Do not clear TSF_DIDRUN */
- } else if (ts->ts_flags & TSF_DIDRUN) {
+ /* Do not clear TDF_DIDRUN */
+ } else if (td->td_flags & TDF_DIDRUN) {
awake = 1;
- ts->ts_flags &= ~TSF_DIDRUN;
+ td->td_flags &= ~TDF_DIDRUN;
}
/*
@@ -435,7 +521,7 @@
resetpriority_thread(td);
thread_unlock(td);
} /* end of thread loop */
- PROC_SUNLOCK(p);
+ PROC_UNLOCK(p);
} /* end of process loop */
sx_sunlock(&allproc_lock);
}
@@ -627,10 +713,13 @@
void
sched_fork(struct thread *td, struct thread *childtd)
{
+ struct td_sched *ts;
+
childtd->td_estcpu = td->td_estcpu;
childtd->td_lock = &sched_lock;
childtd->td_cpuset = cpuset_ref(td->td_cpuset);
- sched_newthread(childtd);
+ ts = childtd->td_sched;
+ bzero(ts, sizeof(*ts));
}
void
@@ -639,7 +728,6 @@
struct thread *td;
PROC_LOCK_ASSERT(p, MA_OWNED);
- PROC_SLOCK_ASSERT(p, MA_OWNED);
p->p_nice = nice;
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
@@ -670,8 +758,7 @@
if (td->td_priority == prio)
return;
td->td_priority = prio;
- if (TD_ON_RUNQ(td) &&
- td->td_rqindex != (prio / RQ_PPQ)) {
+ if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
sched_rem(td);
sched_add(td, SRQ_BORING);
}
@@ -852,7 +939,7 @@
*/
KASSERT((newtd->td_inhibitors == 0),
("trying to run inhibited thread"));
- TD_TO_TS(newtd)->ts_flags |= TSF_DIDRUN;
+ newtd->td_flags |= TDF_DIDRUN;
TD_SET_RUNNING(newtd);
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
@@ -1077,7 +1164,7 @@
single_cpu = 1;
CTR3(KTR_RUNQ,
"sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
- } else if ((ts)->ts_flags & TSF_BOUND) {
+ } else if ((td)->td_flags & TDF_BOUND) {
/* Find CPU from bound runq */
KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq"));
cpu = ts->ts_runq - &runq_pcpu[0];
@@ -1114,7 +1201,7 @@
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
- runq_add(ts->ts_runq, ts, flags);
+ runq_add(ts->ts_runq, td, flags);
}
#else /* SMP */
{
@@ -1159,7 +1246,7 @@
}
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
- runq_add(ts->ts_runq, ts, flags);
+ runq_add(ts->ts_runq, td, flags);
maybe_resched(td);
}
#endif /* SMP */
@@ -1187,7 +1274,7 @@
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_rem();
- runq_remove(ts->ts_runq, ts);
+ runq_remove(ts->ts_runq, td);
TD_SET_CAN_RUN(td);
}
@@ -1198,26 +1285,26 @@
struct thread *
sched_choose(void)
{
- struct td_sched *td;
+ struct thread *td;
struct runq *rq;
mtx_assert(&sched_lock, MA_OWNED);
#ifdef SMP
- struct thread *kecpu;
+ struct thread *tdcpu;
rq = &runq;
- td = runq_choose(&runq);
- kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
+ td = runq_choose_fuzz(&runq, runq_fuzz);
+ tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
if (td == NULL ||
- (kecpu != NULL &&
- kecpu->td_priority < td->td_priority)) {
- CTR2(KTR_RUNQ, "choosing thread %p from pcpu runq %d", kecpu,
+ (tdcpu != NULL &&
+ tdcpu->td_priority < td->td_priority)) {
+ CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
PCPU_GET(cpuid));
- td = kecpu;
+ td = tdcpu;
rq = &runq_pcpu[PCPU_GET(cpuid)];
} else {
- CTR1(KTR_RUNQ, "choosing thread %p from main runq", td);
+ CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
}
#else
@@ -1227,7 +1314,7 @@
if (td) {
runq_remove(rq, td);
- TD_TO_TS(ts)->ts_flags |= TSF_DIDRUN;
+ td->td_flags |= TDF_DIDRUN;
KASSERT(td->td_flags & TDF_INMEM,
("sched_choose: thread swapped out"));
@@ -1280,7 +1367,7 @@
ts = TD_TO_TS(td);
- ts->ts_flags |= TSF_BOUND;
+ td->td_flags |= TDF_BOUND;
#ifdef SMP
ts->ts_runq = &runq_pcpu[cpu];
if (PCPU_GET(cpuid) == cpu)
@@ -1294,14 +1381,14 @@
sched_unbind(struct thread* td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
- TD_TO_TS(td)->ts_flags &= ~TSF_BOUND;
+ td->td_flags &= ~TDF_BOUND;
}
int
sched_is_bound(struct thread *td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
- return (TD_TO_TS(td)->ts_flags & TSF_BOUND);
+ return (td->td_flags & TDF_BOUND);
}
void
More information about the p4-projects
mailing list