socsvn commit: r225038 - soc2011/rudot/kern
rudot at FreeBSD.org
rudot at FreeBSD.org
Fri Aug 12 11:51:27 UTC 2011
Author: rudot
Date: Fri Aug 12 11:51:25 2011
New Revision: 225038
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=225038
Log:
check if preemption is desirable on every invocation of sched_add()
Modified:
soc2011/rudot/kern/sched_fbfs.c
Modified: soc2011/rudot/kern/sched_fbfs.c
==============================================================================
--- soc2011/rudot/kern/sched_fbfs.c Fri Aug 12 10:52:46 2011 (r225037)
+++ soc2011/rudot/kern/sched_fbfs.c Fri Aug 12 11:51:25 2011 (r225038)
@@ -692,10 +692,6 @@
sched_wakeup(struct thread *td)
{
struct td_sched *ts;
- struct thread *thr_worst;
- struct cpu_group *cg;
- cpumask_t dontuse, map, me;
- u_char c;
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
@@ -703,6 +699,47 @@
td->td_slptick = 0;
sched_add(td, SRQ_BORING);
+}
+
+void
+sched_add(struct thread *td, int flags)
+{
+ struct td_sched *ts;
+ struct thread *thr_worst;
+ cpumask_t dontuse, map, me;
+ struct cpu_group *cg;
+ u_char c;
+
+ ts = td->td_sched;
+ THREAD_LOCK_ASSERT(td, MA_OWNED);
+ KASSERT((td->td_inhibitors == 0),
+ ("sched_add: trying to run inhibited thread"));
+ KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
+ ("sched_add: bad thread state"));
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("sched_add: thread swapped out"));
+ KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
+ "prio:%d", td->td_priority, KTR_ATTR_LINKED,
+ sched_tdname(curthread));
+ KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
+ KTR_ATTR_LINKED, sched_tdname(td));
+
+ /*
+ * Now that the thread is moving to the run-queue, set the lock
+ * to the scheduler's lock.
+ */
+ if (td->td_lock != &sched_lock) {
+ mtx_lock_spin(&sched_lock);
+ thread_lock_set(td, &sched_lock);
+ }
+ TD_SET_RUNQ(td);
+ CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
+ ts->ts_runq = &runq;
+
+ if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+ sched_load_add();
+ runq_add(ts->ts_runq, td, flags);
+
me = PCPU_GET(cpumask);
dontuse = me | stopped_cpus | hlt_cpus_mask;
map = idle_cpus_mask & ~dontuse;
@@ -755,42 +792,6 @@
}
void
-sched_add(struct thread *td, int flags)
-{
- struct td_sched *ts;
-
- ts = td->td_sched;
- THREAD_LOCK_ASSERT(td, MA_OWNED);
- KASSERT((td->td_inhibitors == 0),
- ("sched_add: trying to run inhibited thread"));
- KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
- ("sched_add: bad thread state"));
- KASSERT(td->td_flags & TDF_INMEM,
- ("sched_add: thread swapped out"));
- KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
- "prio:%d", td->td_priority, KTR_ATTR_LINKED,
- sched_tdname(curthread));
- KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
- KTR_ATTR_LINKED, sched_tdname(td));
-
- /*
- * Now that the thread is moving to the run-queue, set the lock
- * to the scheduler's lock.
- */
- if (td->td_lock != &sched_lock) {
- mtx_lock_spin(&sched_lock);
- thread_lock_set(td, &sched_lock);
- }
- TD_SET_RUNQ(td);
- CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
- ts->ts_runq = &runq;
-
- if ((td->td_proc->p_flag & P_NOLOAD) == 0)
- sched_load_add();
- runq_add(ts->ts_runq, td, flags);
-}
-
-void
sched_rem(struct thread *td)
{
struct td_sched *ts;
More information about the svn-soc-all
mailing list