PERFORCE change 100367 for review

Peter Wemm peter at FreeBSD.org
Sat Jul 1 00:04:07 UTC 2006


http://perforce.freebsd.org/chv.cgi?CH=100367

Change 100367 by peter at peter_work on 2006/07/01 00:03:11

	Add a special sched_run_ithread() function that is an optimized
	case for setrunqueue() and the myriad of functions and tests that
	it makes.  For ithreads (now that alpha is gone) on 4bsd, we can
	actually schedule them really easy.  We can eliminate all the
	priority balancing, tests for IPI'ing, etc.  For 4bsd, we either
	preempt on the current cpu or put it on the run queue.  This is
	identical to what the current code does but in a less
	claw-your-eyes-out way of writing the code.
	
	This certainly breaks compiling with SCHED_ULE because I haven't done the
	code there.  A stub could be put in that just implements sched_run_ithread()
	as a call to setrunqueue().

Affected files ...

.. //depot/projects/bike_sched/sys/kern/kern_intr.c#3 edit
.. //depot/projects/bike_sched/sys/kern/sched_4bsd.c#6 edit
.. //depot/projects/bike_sched/sys/sys/sched.h#5 edit

Differences ...

==== //depot/projects/bike_sched/sys/kern/kern_intr.c#3 (text+ko) ====

@@ -531,7 +531,7 @@
 		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
 		    p->p_comm);
 		TD_CLR_IWAIT(td);
-		setrunqueue(td, SRQ_INTR);
+		sched_run_ithread(td);
 	} else {
 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
 		    __func__, p->p_pid, p->p_comm, it->it_need, td->td_state);

==== //depot/projects/bike_sched/sys/kern/sched_4bsd.c#6 (text+ko) ====

@@ -1084,6 +1084,45 @@
 #endif /* SMP */
 
 void
+sched_run_ithread(struct thread *td)
+{
+	struct kse *ke = td->td_kse;
+
+	/* Inline of setrunqueue */
+	CTR2(KTR_RUNQ, "sched_run_ithread: td:%p pid:%d",
+	    td, td->td_proc->p_pid);
+	CTR5(KTR_SCHED, "sched_run_ithread: %p(%s) prio %d by %p(%s)",
+	    td, td->td_proc->p_comm, td->td_priority, ctd,
+	    ctd->td_proc->p_comm);
+	mtx_assert(&sched_lock, MA_OWNED);
+	KASSERT((td->td_inhibitors == 0),
+		 ("sched_run_ithread: trying to run inhibitted thread"));
+	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
+	    ("sched_run_ithread: bad thread state"));
+	KASSERT(ke->ke_state != KES_ONRUNQ,
+	    ("sched_run_ithread: kse %p (%s) already in run queue", ke,
+	    td->td_proc->p_comm));
+	KASSERT(td->td_proc->p_sflag & PS_INMEM,
+	    ("sched_run_ithread: process swapped out"));
+	CTR5(KTR_SCHED, "sched_run_ithread: %p(%s) prio %d by %p(%s)",
+	    td, td->td_proc->p_comm, td->td_priority, curthread,
+	    curthread->td_proc->p_comm);
+	CTR2(KTR_RUNQ, "sched_run_ithread: adding kse:%p (td:%p) to runq", ke, td);
+
+	TD_SET_RUNQ(td);
+	ke->ke_runq = &runq;
+	/* Preempt if we can.  If we did, we're finished */
+	if (maybe_preempt(td))
+		return;
+	/* We didn't preempt. Place on runq */
+	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+		sched_load_add();
+	runq_add(ke->ke_runq, ke, SRQ_INTR);
+	ke->ke_state = KES_ONRUNQ;
+	maybe_resched(td);
+}
+
+void
 sched_rem(struct thread *td)
 {
 	struct kse *ke;

==== //depot/projects/bike_sched/sys/sys/sched.h#5 (text+ko) ====

@@ -75,6 +75,7 @@
 void	sched_rem(struct thread *td);
 void	sched_tick(void);
 void	sched_relinquish(struct thread *td);
+void	sched_run_ithread(struct thread *td);
 
 /*
  * Binding makes cpu affinity permanent while pinning is used to temporarily


More information about the p4-projects mailing list