PERFORCE change 185697 for review

Edward Tomasz Napierala trasz at FreeBSD.org
Fri Nov 12 17:07:40 UTC 2010


http://p4web.freebsd.org/@@185697?ac=10

Change 185697 by trasz at trasz_victim on 2010/11/12 17:07:14

	Move maintenance of cpu usage into a new kproc.  This way it's easy
	to measure how much time is wasted for running it.

Affected files ...

.. //depot/projects/soc2009/trasz_limits/sys/kern/kern_container.c#32 edit
.. //depot/projects/soc2009/trasz_limits/sys/kern/kern_resource.c#52 edit

Differences ...

==== //depot/projects/soc2009/trasz_limits/sys/kern/kern_container.c#32 (text+ko) ====

@@ -37,15 +37,20 @@
 #include <sys/container.h>
 #include <sys/param.h>
 #include <sys/kernel.h>
+#include <sys/kthread.h>
 #include <sys/lock.h>
 #include <sys/malloc.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
+#include <sys/resourcevar.h>
 #include <sys/sbuf.h>
+#include <sys/sched.h>
 #include <sys/sdt.h>
+#include <sys/sx.h>
 #include <sys/sysent.h>
 #include <sys/sysproto.h>
 #include <sys/systm.h>
+#include <sys/umtx.h>
 
 #ifdef HRL
 #include <sys/hrl.h>
@@ -590,3 +595,84 @@
 #endif
 	container_destroy(&p->p_container);
 }
+
+/*
+ * Stuff below runs from a "containerd" kernel process.
+ */
+static void
+rusage_throttle(struct thread *td, int throttle)
+{
+	u_char oldpri;
+	u_char newpri;
+	int type;
+
+	if (throttle) {
+		td->td_flags |= TDF_THROTTLED;
+		newpri = PRI_MIN_IDLE;
+		type = RTP_PRIO_IDLE;
+	} else if (td->td_flags & TDF_THROTTLED) {
+		td->td_flags &= ~TDF_THROTTLED;
+		newpri = PRI_MIN_TIMESHARE;
+		type = RTP_PRIO_NORMAL;
+	} else
+		return;
+
+	/* Mostly copied from rtp_to_pri(). */
+	sched_class(td, type);	/* XXX fix */
+	oldpri = td->td_user_pri;
+	sched_user_prio(td, newpri);
+	if (TD_IS_RUNNING(td) || TD_CAN_RUN(td))
+		sched_prio(td, td->td_user_pri); /* XXX dubious */
+	if (TD_ON_UPILOCK(td) && oldpri != newpri)
+		umtx_pi_adjust(td, oldpri);
+}
+
+static void
+containerd(void)
+{
+	int pctcpu;
+	struct thread *td;
+	struct proc *p;
+	struct timeval wallclock;
+	uint64_t pctcpu_limit;
+
+	for (;;) {
+		sx_slock(&allproc_lock);
+		FOREACH_PROC_IN_SYSTEM(p) {
+			pctcpu_limit = rusage_get_limit(p, RUSAGE_PCTCPU);
+			PROC_SLOCK(p);
+			pctcpu = 0;
+			FOREACH_THREAD_IN_PROC(p, td) {
+				ruxagg(p, td);
+				thread_lock(td);
+				pctcpu += sched_pctcpu(td);
+				/*
+				 * We are making this decision based on data from
+				 * the previous run.  The assumption is that this runs
+				 * so often it doesn't matter.
+				 */
+				if (pctcpu > pctcpu_limit)
+					rusage_throttle(td, 1);
+				else
+					rusage_throttle(td, 0);
+				thread_unlock(td);
+			}
+			PROC_SUNLOCK(p);
+			rusage_set(p, RUSAGE_CPU, cputick2usec(p->p_rux.rux_runtime));
+			microuptime(&wallclock);
+			timevalsub(&wallclock, &p->p_stats->p_start);
+			rusage_set(p, RUSAGE_WALLCLOCK, wallclock.tv_sec * 1000000 + wallclock.tv_usec);
+			pctcpu = ((pctcpu * 10000 + FSCALE / 2) >> FSHIFT) / 100;
+			rusage_set(p, RUSAGE_PCTCPU, pctcpu);
+		}
+		sx_sunlock(&allproc_lock);
+		pause("-", hz);
+	}
+}
+
+static struct kproc_desc containerd_kp = {
+	"containerd",
+	containerd,
+	NULL
+};
+SYSINIT(containerd, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &containerd_kp);

==== //depot/projects/soc2009/trasz_limits/sys/kern/kern_resource.c#52 (text+ko) ====

@@ -81,11 +81,6 @@
 static struct uidinfo *uilookup(uid_t uid);
 static void	ruxagg_locked(struct rusage_ext *rux, struct thread *td);
 
-#ifdef CONTAINERS
-static struct callout rusage_cpu_callout;
-static struct task rusage_cpu_task;
-#endif
-
 /*
  * Resource controls and accounting.
  */
@@ -611,89 +606,6 @@
 	return (error);
 }
 
-#ifdef CONTAINERS
-static void
-rusage_cpu_update(void *arg)
-{
-
-	/*
-	 * The rusage_cpu_task_fn() routine may sleep, so we have to put it
-	 * into taskqueue instead of running directly from callout.
-	 */
-	taskqueue_enqueue(taskqueue_thread, &rusage_cpu_task);
-}
-
-static void
-rusage_throttle(struct thread *td, int throttle)
-{
-	u_char oldpri;
-	u_char newpri;
-	int type;
-
-	if (throttle) {
-		td->td_flags |= TDF_THROTTLED;
-		newpri = PRI_MIN_IDLE;
-		type = RTP_PRIO_IDLE;
-	} else if (td->td_flags & TDF_THROTTLED) {
-		td->td_flags &= ~TDF_THROTTLED;
-		newpri = PRI_MIN_TIMESHARE;
-		type = RTP_PRIO_NORMAL;
-	} else
-		return;
-
-	/* Mostly copied from rtp_to_pri(). */
-	sched_class(td, type);	/* XXX fix */
-	oldpri = td->td_user_pri;
-	sched_user_prio(td, newpri);
-	if (TD_IS_RUNNING(td) || TD_CAN_RUN(td))
-		sched_prio(td, td->td_user_pri); /* XXX dubious */
-	if (TD_ON_UPILOCK(td) && oldpri != newpri)
-		umtx_pi_adjust(td, oldpri);
-}
-
-static void
-rusage_cpu_task_fn(void *arg, int pending)
-{
-	int pctcpu;
-	struct thread *td;
-	struct proc *p;
-	struct timeval wallclock;
-	uint64_t pctcpu_limit;
-
-	sx_slock(&allproc_lock);
-	FOREACH_PROC_IN_SYSTEM(p) {
-		pctcpu_limit = rusage_get_limit(p, RUSAGE_PCTCPU);
-		PROC_SLOCK(p);
-		pctcpu = 0;
-		FOREACH_THREAD_IN_PROC(p, td) {
-			ruxagg(p, td);
-			thread_lock(td);
-			pctcpu += sched_pctcpu(td);
-			/*
-			 * We are making this decision based on data from
-			 * the previous run.  The assumption is that this runs
-			 * so often it doesn't matter.
-			 */
-			if (pctcpu > pctcpu_limit)
-				rusage_throttle(td, 1);
-			else
-				rusage_throttle(td, 0);
-			thread_unlock(td);
-		}
-		PROC_SUNLOCK(p);
-		rusage_set(p, RUSAGE_CPU, cputick2usec(p->p_rux.rux_runtime));
-		microuptime(&wallclock);
-		timevalsub(&wallclock, &p->p_stats->p_start);
-		rusage_set(p, RUSAGE_WALLCLOCK, wallclock.tv_sec * 1000000 + wallclock.tv_usec);
-		pctcpu = ((pctcpu * 10000 + FSCALE / 2) >> FSHIFT) / 100;
-		rusage_set(p, RUSAGE_PCTCPU, pctcpu);
-	}
-	sx_sunlock(&allproc_lock);
-
-	callout_reset(&rusage_cpu_callout, hz, rusage_cpu_update, NULL);
-}
-#endif
-
 #ifndef HRL
 static void
 lim_cb(void *arg)
@@ -1378,16 +1290,6 @@
 
 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
 	rw_init(&uihashtbl_lock, "uidinfo hash");
-
-#ifdef CONTAINERS
-	/*
-	 * XXX: Piggybacked for now; in the future it should have
-	 *      it's own function.
-	 */
-	TASK_INIT(&rusage_cpu_task, 0, rusage_cpu_task_fn, NULL);
-	callout_init(&rusage_cpu_callout, 1);
-	callout_reset(&rusage_cpu_callout, hz, rusage_cpu_update, NULL);
-#endif
 }
 
 /*


More information about the p4-projects mailing list