socsvn commit: r222377 - soc2011/rudot/kern

rudot at FreeBSD.org rudot at FreeBSD.org
Wed May 25 19:54:03 UTC 2011


Author: rudot
Date: Wed May 25 19:54:00 2011
New Revision: 222377
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=222377

Log:
  Removed periodical recalculating of threads' priorities

Modified:
  soc2011/rudot/kern/sched_4bsd.c

Modified: soc2011/rudot/kern/sched_4bsd.c
==============================================================================
--- soc2011/rudot/kern/sched_4bsd.c	Wed May 25 18:04:11 2011	(r222376)
+++ soc2011/rudot/kern/sched_4bsd.c	Wed May 25 19:54:00 2011	(r222377)
@@ -121,22 +121,9 @@
 #define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
 
 static void	setup_runqs(void);
-static void	schedcpu(void);
-static void	schedcpu_thread(void);
 static void	sched_priority(struct thread *td, u_char prio);
 static void	sched_setup(void *dummy);
-static void	maybe_resched(struct thread *td);
-static void	updatepri(struct thread *td);
-static void	resetpriority(struct thread *td);
-static void	resetpriority_thread(struct thread *td);
-
-static struct kproc_desc sched_kp = {
-        "schedcpu",
-        schedcpu_thread,
-        NULL
-};
-SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start,
-    &sched_kp);
+
 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
 
 /*
@@ -240,18 +227,6 @@
 	sched_tdcnt--;
 	KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
 }
-/*
- * Arrange to reschedule if necessary, taking the priorities and
- * schedulers into account.
- */
-static void
-maybe_resched(struct thread *td)
-{
-
-	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	if (td->td_priority < curthread->td_priority)
-		curthread->td_flags |= TDF_NEEDRESCHED;
-}
 
 /*
  * This function is called when a thread is about to be put on run queue
@@ -419,181 +394,6 @@
  */
 #define	CCPU_SHIFT	11
 
-/*
- * Recompute process priorities, every hz ticks.
- * MP-safe, called without the Giant mutex.
- */
-/* ARGSUSED */
-static void
-schedcpu(void)
-{
-	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
-	struct thread *td;
-	struct proc *p;
-	struct td_sched *ts;
-	int awake, realstathz;
-
-	realstathz = stathz ? stathz : hz;
-	sx_slock(&allproc_lock);
-	FOREACH_PROC_IN_SYSTEM(p) {
-		PROC_LOCK(p);
-		FOREACH_THREAD_IN_PROC(p, td) {
-			awake = 0;
-			thread_lock(td);
-			ts = td->td_sched;
-			/*
-			 * Increment sleep time (if sleeping).  We
-			 * ignore overflow, as above.
-			 */
-			/*
-			 * The td_sched slptimes are not touched in wakeup
-			 * because the thread may not HAVE everything in
-			 * memory? XXX I think this is out of date.
-			 */
-			if (TD_ON_RUNQ(td)) {
-				awake = 1;
-				td->td_flags &= ~TDF_DIDRUN;
-			} else if (TD_IS_RUNNING(td)) {
-				awake = 1;
-				/* Do not clear TDF_DIDRUN */
-			} else if (td->td_flags & TDF_DIDRUN) {
-				awake = 1;
-				td->td_flags &= ~TDF_DIDRUN;
-			}
-
-			/*
-			 * ts_pctcpu is only for ps and ttyinfo().
-			 */
-			ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
-			/*
-			 * If the td_sched has been idle the entire second,
-			 * stop recalculating its priority until
-			 * it wakes up.
-			 */
-			if (ts->ts_cpticks != 0) {
-#if	(FSHIFT >= CCPU_SHIFT)
-				ts->ts_pctcpu += (realstathz == 100)
-				    ? ((fixpt_t) ts->ts_cpticks) <<
-				    (FSHIFT - CCPU_SHIFT) :
-				    100 * (((fixpt_t) ts->ts_cpticks)
-				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
-#else
-				ts->ts_pctcpu += ((FSCALE - ccpu) *
-				    (ts->ts_cpticks *
-				    FSCALE / realstathz)) >> FSHIFT;
-#endif
-				ts->ts_cpticks = 0;
-			}
-			/*
-			 * If there are ANY running threads in this process,
-			 * then don't count it as sleeping.
-			 * XXX: this is broken.
-			 */
-			if (awake) {
-				if (ts->ts_slptime > 1) {
-					/*
-					 * In an ideal world, this should not
-					 * happen, because whoever woke us
-					 * up from the long sleep should have
-					 * unwound the slptime and reset our
-					 * priority before we run at the stale
-					 * priority.  Should KASSERT at some
-					 * point when all the cases are fixed.
-					 */
-					updatepri(td);
-				}
-				ts->ts_slptime = 0;
-			} else
-				ts->ts_slptime++;
-			if (ts->ts_slptime > 1) {
-				thread_unlock(td);
-				continue;
-			}
-			td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
-		      	resetpriority(td);
-			resetpriority_thread(td);
-			thread_unlock(td);
-		}
-		PROC_UNLOCK(p);
-	}
-	sx_sunlock(&allproc_lock);
-}
-
-/*
- * Main loop for a kthread that executes schedcpu once a second.
- */
-static void
-schedcpu_thread(void)
-{
-
-	for (;;) {
-		schedcpu();
-		pause("-", hz);
-	}
-}
-
-/*
- * Recalculate the priority of a process after it has slept for a while.
- * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
- * least six times the loadfactor will decay td_estcpu to zero.
- */
-static void
-updatepri(struct thread *td)
-{
-	struct td_sched *ts;
-	fixpt_t loadfac;
-	unsigned int newcpu;
-
-	ts = td->td_sched;
-	loadfac = loadfactor(averunnable.ldavg[0]);
-	if (ts->ts_slptime > 5 * loadfac)
-		td->td_estcpu = 0;
-	else {
-		newcpu = td->td_estcpu;
-		ts->ts_slptime--;	/* was incremented in schedcpu() */
-		while (newcpu && --ts->ts_slptime)
-			newcpu = decay_cpu(loadfac, newcpu);
-		td->td_estcpu = newcpu;
-	}
-}
-
-/*
- * Compute the priority of a process when running in user mode.
- * Arrange to reschedule if the resulting priority is better
- * than that of the current process.
- */
-static void
-resetpriority(struct thread *td)
-{
-	register unsigned int newpriority;
-
-	if (td->td_pri_class == PRI_TIMESHARE) {
-		newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
-		    NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
-		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
-		    PRI_MAX_TIMESHARE);
-		sched_user_prio(td, newpriority);
-	}
-}
-
-/*
- * Update the thread's priority when the associated process's user
- * priority changes.
- */
-static void
-resetpriority_thread(struct thread *td)
-{
-
-	/* Only change threads with a time sharing user priority. */
-	if (td->td_priority < PRI_MIN_TIMESHARE ||
-	    td->td_priority > PRI_MAX_TIMESHARE)
-		return;
-
-	/* XXX the whole needresched thing is broken, but not silly. */
-	maybe_resched(td);
-
-	sched_prio(td, td->td_user_pri);
-}
 
 /* ARGSUSED */
 static void
@@ -667,10 +467,6 @@
 
 	ts->ts_cpticks++;
 	td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
-	if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
-		resetpriority(td);
-		resetpriority_thread(td);
-	}
 
 	/*
 	 * Force a context switch if the current thread has used up a full
@@ -736,12 +532,6 @@
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 	p->p_nice = nice;
-	FOREACH_THREAD_IN_PROC(p, td) {
-		thread_lock(td);
-		resetpriority(td);
-		resetpriority_thread(td);
-		thread_unlock(td);
-	}
 }
 
 void
@@ -844,13 +634,10 @@
 void
 sched_user_prio(struct thread *td, u_char prio)
 {
-	u_char oldprio;
-
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	td->td_base_user_pri = prio;
 	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
 		return;
-	oldprio = td->td_user_pri;
 	td->td_user_pri = prio;
 }
 
@@ -1027,10 +814,6 @@
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	ts = td->td_sched;
 	td->td_flags &= ~TDF_CANSWAP;
-	if (ts->ts_slptime > 1) {
-		updatepri(td);
-		resetpriority(td);
-	}
 	td->td_slptick = 0;
 	ts->ts_slptime = 0;
 	sched_add(td, SRQ_BORING);
@@ -1067,23 +850,9 @@
 	CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
 	ts->ts_runq = &runq;
 
-	/*
-	 * If we are yielding (on the way out anyhow) or the thread
-	 * being saved is US, then don't try be smart about preemption
-	 * or kicking off another CPU as it won't help and may hinder.
-	 * In the YIEDLING case, we are about to run whoever is being
-	 * put in the queue anyhow, and in the OURSELF case, we are
-	 * puting ourself on the run queue which also only happens
-	 * when we are about to yield.
-	 */
-	if ((flags & SRQ_YIELDING) == 0) {
-		if (maybe_preempt(td))
-			return;
-	}
 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 		sched_load_add();
 	runq_add(ts->ts_runq, td, flags);
-	maybe_resched(td);
 }
 
 void


More information about the svn-soc-all mailing list