socsvn commit: r224784 - soc2011/rudot/kern

rudot at FreeBSD.org rudot at FreeBSD.org
Sun Jul 31 21:43:15 UTC 2011


Author: rudot
Date: Sun Jul 31 21:43:13 2011
New Revision: 224784
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=224784

Log:
  per-process cpu accounting added

Modified:
  soc2011/rudot/kern/sched_fbfs.c

Modified: soc2011/rudot/kern/sched_fbfs.c
==============================================================================
--- soc2011/rudot/kern/sched_fbfs.c	Sun Jul 31 20:06:11 2011	(r224783)
+++ soc2011/rudot/kern/sched_fbfs.c	Sun Jul 31 21:43:13 2011	(r224784)
@@ -74,17 +74,22 @@
 static int sched_slice = 1;
 
 /*
+ * The time window size over which we compute the CPU utilization percentage.
+ */
+#define	PCT_WINDOW	5
+
+/*
  * The schedulable entity that runs a context.
  * This is  an extension to the thread structure and is tailored to
  * the requirements of this scheduler
  */
 struct td_sched {
-	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
-	int		ts_cpticks;	/* (j) Ticks of cpu time. */
-	int		ts_slptime;	/* (j) Seconds !RUNNING. */
 	int		ts_flags;
 	int		ts_vdeadline;	/* virtual deadline. */
 	int		ts_slice;	/* Remaining slice in number of ticks */
+	int		ts_cswtick;
+	int		ts_incrtick;
+	int		ts_used;
 	struct runq	*ts_runq;	/* runq the thread is currently on */
 #ifdef KTR
 	char		ts_name[TS_NAME_LEN];
@@ -165,13 +170,6 @@
 	KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
 }
 
-/*
- * This function is called when a thread is about to be put on run queue
- * because it has been made runnable or its priority has been adjusted.  It
- * determines if the new thread should be immediately preempted to.  If so,
- * it switches to it and eventually returns true.  If not, it returns false
- * so that the caller may place the thread on an appropriate run queue.
- */
 int
 maybe_preempt(struct thread *td)
 {
@@ -236,6 +234,7 @@
 	thread0.td_sched = &td_sched0;
 	thread0.td_lock = &sched_lock;
 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
+	td_sched0.ts_used = 0;
 	td_sched0.ts_slice = sched_slice;
 }
 
@@ -259,8 +258,6 @@
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	ts = td->td_sched;
 
-	ts->ts_cpticks++;
-
 	if (--ts->ts_slice > 0)
 		return;
 
@@ -299,8 +296,6 @@
 
 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
 	    "prio:%d", child->td_priority);
-	thread_lock(td);
-	thread_unlock(td);
 	mtx_lock_spin(&sched_lock);
 	if ((child->td_proc->p_flag & P_NOLOAD) == 0)
 		sched_load_rem();
@@ -326,6 +321,7 @@
 	ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
 	ts->ts_vdeadline = td->td_sched->ts_vdeadline;
 	ts->ts_slice = td->td_sched->ts_slice;
+	ts->ts_used = td->td_sched->ts_used;
 }
 
 void
@@ -472,7 +468,6 @@
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	td->td_slptick = ticks;
-	td->td_sched->ts_slptime = 0;
 	if (pri)
 		sched_prio(td, pri);
 	if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
@@ -485,6 +480,7 @@
 	struct mtx *tmtx;
 	struct td_sched *ts;
 	struct proc *p;
+	int time_passed;
 
 	tmtx = NULL;
 	ts = td->td_sched;
@@ -571,6 +567,7 @@
 			(*dtrace_vtime_switch_func)(newtd);
 #endif
 
+		ts->ts_cswtick = ticks;
 		cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
 		lock_profile_obtain_lock_success(&sched_lock.lock_object,
 		    0, 0, __FILE__, __LINE__);
@@ -590,6 +587,9 @@
 		 * needed to, or the thread_wait() or wait() will
 		 * need to reap it.
 		 */
+		time_passed = ticks - ts->ts_cswtick;
+		ts->ts_used = imax(ts->ts_used - time_passed, 0);
+		if (ts->ts_used < 0) panic("Negative ts_used value\n");
 #ifdef	HWPMC_HOOKS
 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
@@ -666,6 +666,7 @@
 	u_char max_prio;
 
 	max_thread = curthread;
+	MPASS(max_thread != NULL);
 	max_prio = max_thread->td_priority;
 	ts = max_thread->td_sched;
 	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
@@ -685,7 +686,7 @@
 			}
 		}
 	}
-
+	MPASS(max_thread != NULL);
 	return (max_thread);
 }
 
@@ -702,7 +703,6 @@
 	ts = td->td_sched;
 	td->td_flags &= ~TDF_CANSWAP;
 	td->td_slptick = 0;
-	ts->ts_slptime = 0;
 	sched_add(td, SRQ_BORING);
 
 	me = PCPU_GET(cpumask);
@@ -736,6 +736,7 @@
 	 * We did not wake lastcpu and there is no suitable idle cpu
 	 */
 	thr_worst = worst_running_thread();
+	MPASS(thr_worst != NULL);
 	c = thr_worst->td_oncpu;
 	if (thr_worst->td_priority < td->td_priority)
 		return;
@@ -821,6 +822,8 @@
 	int deadline_min;
 	
 	td = td_min = TAILQ_FIRST(rqh);
+	MPASS(td != NULL);
+	MPASS(td_min != NULL);
 	deadline_min = td_min->td_sched->ts_vdeadline;
 
 	td = TAILQ_NEXT(td, td_runq);
@@ -833,6 +836,7 @@
 		td = TAILQ_NEXT(td, td_runq);
 	}
 
+	MPASS(td_min != NULL);
 	return (td_min);
 }
 
@@ -852,6 +856,7 @@
 		pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
 		if ((pri == RQ_TIMESHARE) || (pri == RQ_IDLE)) {
 			td = edf_choose(&rq->rq_queues[pri]);
+			KASSERT(td != NULL, "runq_choose_bfs: no thread on busy queue");
 			return (td);
 		}
 		rqh = &rq->rq_queues[pri];
@@ -881,7 +886,7 @@
 	rq = &runq;
 	td = runq_choose_bfs(&runq);
 
-	if (td) {
+	if (td != NULL) {
 		runq_remove(rq, td);
 		td->td_flags |= TDF_DIDRUN;
 
@@ -989,15 +994,44 @@
 sched_pctcpu(struct thread *td)
 {
 	struct td_sched *ts;
+	int time_passed;
+	int nticks;
+	fixpt_t pct;
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	ts = td->td_sched;
-	return (ts->ts_pctcpu);
+
+	switch (td->td_state) {
+	case TDS_RUNNING:
+		if (ts->ts_used < 0) panic("Bad ts_used value\n");
+		nticks = ts->ts_used;
+	default:
+		time_passed = ticks - ts->ts_cswtick;
+		nticks = imax(ts->ts_used - time_passed, 0);
+	}
+	nticks /= PCT_WINDOW;
+
+	if (nticks > hz) panic("too big nticks value.\n");
+	if (nticks < 0) panic("bad nticks value.\n");
+
+	pct = (FSCALE * ((FSCALE * nticks) / hz)) >> FSHIFT;
+
+	return (pct);
 }
 
 void
 sched_tick(void)
 {
+	struct td_sched *ts;
+
+	THREAD_LOCK_ASSERT(curthread, MA_OWNED);
+	ts = curthread->td_sched;
+	if (ts->ts_incrtick == ticks)
+		return;
+	if (ts->ts_used < (hz * PCT_WINDOW)) {
+		ts->ts_used += 1;
+		ts->ts_incrtick = ticks;
+	}
 }
 
 /*


More information about the svn-soc-all mailing list