PERFORCE change 126550 for review

Marko Zec zec at FreeBSD.org
Tue Sep 18 05:43:38 PDT 2007


http://perforce.freebsd.org/chv.cgi?CH=126550

Change 126550 by zec at zec_tpx32 on 2007/09/18 12:43:14

	Attempt to extend SCHED_ULE to allow for per-vprocg
	load average accounting, in a followup to the previous
	change adding such capability to SCHED_4BSD.

Affected files ...

.. //depot/projects/vimage/src/sys/kern/kern_exit.c#7 edit
.. //depot/projects/vimage/src/sys/kern/sched_4bsd.c#9 edit
.. //depot/projects/vimage/src/sys/kern/sched_ule.c#11 edit
.. //depot/projects/vimage/src/sys/sys/vimage.h#39 edit

Differences ...

==== //depot/projects/vimage/src/sys/kern/kern_exit.c#7 (text+ko) ====

@@ -40,6 +40,7 @@
 #include "opt_compat.h"
 #include "opt_ktrace.h"
 #include "opt_mac.h"
+#include "opt_sched.h"
 #include "opt_vimage.h"
 
 #include <sys/param.h>
@@ -189,6 +190,10 @@
 		crhold(p->p_pptr->p_ucred);
 		oldcred = p->p_ucred;
 		p->p_ucred = p->p_pptr->p_ucred;
+#ifdef SCHED_4BSD
+		sched_load_reassign(oldcred->cr_vimage->v_procg,
+		    p->p_ucred->cr_vimage->v_procg);
+#endif
 		PROC_UNLOCK(p);
 		sx_xlock(&allproc_lock);
 		oldcred->cr_vimage->v_procg->nprocs--;

==== //depot/projects/vimage/src/sys/kern/sched_4bsd.c#9 (text+ko) ====

@@ -391,7 +391,7 @@
 #ifndef VIMAGE
 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
 #else
-	#define loadfac loadfactor(p->p_ucred->cr_vimage->v_procg->_averunnable.ldavg[0])
+	#define loadfac loadfactor(td->td_ucred->cr_vimage->v_procg->_averunnable.ldavg[0])
 #endif
 	struct thread *td;
 	struct proc *p;

==== //depot/projects/vimage/src/sys/kern/sched_ule.c#11 (text+ko) ====

@@ -40,6 +40,7 @@
 
 #include "opt_hwpmc_hooks.h"
 #include "opt_sched.h"
+#include "opt_vimage.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -59,6 +60,7 @@
 #include <sys/turnstile.h>
 #include <sys/umtx.h>
 #include <sys/vmmeter.h>
+#include <sys/vimage.h>
 #ifdef KTRACE
 #include <sys/uio.h>
 #include <sys/ktrace.h>
@@ -278,8 +280,13 @@
 /* Operations on per processor queues */
 static struct td_sched * tdq_choose(struct tdq *);
 static void tdq_setup(struct tdq *);
+#ifndef VIMAGE
 static void tdq_load_add(struct tdq *, struct td_sched *);
 static void tdq_load_rem(struct tdq *, struct td_sched *);
+#else
+static void tdq_load_add(struct tdq *, struct td_sched *, struct vprocg *);
+static void tdq_load_rem(struct tdq *, struct td_sched *, struct vprocg *);
+#endif
 static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
 static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
 void tdq_print(int cpu);
@@ -451,7 +458,11 @@
  * for this thread to the referenced thread queue.
  */
 static void
+#ifndef VIMAGE
 tdq_load_add(struct tdq *tdq, struct td_sched *ts)
+#else
+tdq_load_add(struct tdq *tdq, struct td_sched *ts, struct vprocg *vprocg)
+#endif
 {
 	int class;
 
@@ -461,12 +472,16 @@
 	tdq->tdq_load++;
 	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
 	if (class != PRI_ITHD &&
-	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
+	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) {
 #ifdef SMP
 		tdq->tdq_group->tdg_load++;
 #else
 		tdq->tdq_sysload++;
 #endif
+#ifdef VIMAGE
+		V_tdq_load[curcpu]++;
+#endif
+	}
 }
 
 /*
@@ -474,7 +489,11 @@
  * exiting.
  */
 static void
+#ifndef VIMAGE
 tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
+#else
+tdq_load_rem(struct tdq *tdq, struct td_sched *ts, struct vprocg *vprocg)
+#endif
 {
 	int class;
 
@@ -482,12 +501,16 @@
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
 	class = PRI_BASE(ts->ts_thread->td_pri_class);
 	if (class != PRI_ITHD &&
-	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
+	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) {
 #ifdef SMP
 		tdq->tdq_group->tdg_load--;
 #else
 		tdq->tdq_sysload--;
 #endif
+#ifdef VIMAGE
+		V_tdq_load[curcpu]--;
+#endif
+	}
 	KASSERT(tdq->tdq_load != 0,
 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
 	tdq->tdq_load--;
@@ -1299,7 +1322,11 @@
 	/* Add thread0's load since it's running. */
 	TDQ_LOCK(tdq);
 	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
+#ifndef VIMAGE
 	tdq_load_add(tdq, &td_sched0);
+#else
+	tdq_load_add(tdq, &td_sched0, thread0.td_ucred->cr_vimage->v_procg);
+#endif
 	TDQ_UNLOCK(tdq);
 }
 
@@ -1823,7 +1850,11 @@
 		TD_SET_CAN_RUN(td);
 	} else if (TD_IS_RUNNING(td)) {
 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
+#ifndef VIMAGE
 		tdq_load_rem(tdq, ts);
+#else
+		tdq_load_rem(tdq, ts, td->td_ucred->cr_vimage->v_procg);
+#endif
 		srqflag = (flags & SW_PREEMPT) ?
 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
 		    SRQ_OURSELF|SRQ_YIELDING;
@@ -1835,7 +1866,11 @@
 		/* This thread must be going to sleep. */
 		TDQ_LOCK(tdq);
 		mtx = thread_block_switch(td);
+#ifndef VIMAGE
 		tdq_load_rem(tdq, ts);
+#else
+		tdq_load_rem(tdq, ts, td->td_ucred->cr_vimage->v_procg);
+#endif
 	}
 	/*
 	 * We enter here with the thread blocked and assigned to the
@@ -2328,7 +2363,11 @@
 		tdq->tdq_lowpri = td->td_priority;
 #endif
 	tdq_runq_add(tdq, ts, flags);
+#ifndef VIMAGE
 	tdq_load_add(tdq, ts);
+#else
+	tdq_load_add(tdq, ts, td->td_ucred->cr_vimage->v_procg);
+#endif
 }
 
 /*
@@ -2408,7 +2447,11 @@
 	KASSERT(TD_ON_RUNQ(td),
 	    ("sched_rem: thread not on run queue"));
 	tdq_runq_rem(tdq, ts);
+#ifndef VIMAGE
 	tdq_load_rem(tdq, ts);
+#else
+	tdq_load_rem(tdq, ts, td->td_ucred->cr_vimage->v_procg);
+#endif
 	TD_SET_CAN_RUN(td);
 }
 
@@ -2507,7 +2550,11 @@
  * Return the total system load.
  */
 int
+#ifdef VIMAGE
+sched_load(struct vprocg *vprocg)
+#else
 sched_load(void)
+#endif
 {
 #ifdef SMP
 	int total;
@@ -2515,13 +2562,31 @@
 
 	total = 0;
 	for (i = 0; i <= tdg_maxid; i++)
+#ifndef VIMAGE
 		total += TDQ_GROUP(i)->tdg_load;
+#else
+		total += V_tdg_load[i];
+#endif
 	return (total);
 #else
+#ifndef VIMAGE
 	return (TDQ_SELF()->tdq_sysload);
+#else
+	return (V_tdq_load[0]);
+#endif
 #endif
 }
 
+#ifdef VIMAGE
+void
+sched_load_reassign(struct vprocg *old, struct vprocg *new)
+{
+	/* XXX locking! */
+	old->_tdq_load[curcpu]--;
+	new->_tdq_load[curcpu]++;
+}
+#endif
+
 int
 sched_sizeof_proc(void)
 {
@@ -2572,7 +2637,12 @@
 		spinlock_exit();
 	} else {
 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
+#ifndef VIMAGE
 		tdq_load_rem(tdq, td->td_sched);
+#else
+		tdq_load_rem(tdq, td->td_sched,
+		    td->td_ucred->cr_vimage->v_procg);
+#endif
 	}
 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
 	PCPU_SET(switchtime, cpu_ticks());

==== //depot/projects/vimage/src/sys/sys/vimage.h#39 (text+ko) ====

@@ -301,6 +301,7 @@
 #define V_morphing_symlinks	VPROCG(morphing_symlinks)
 #define V_averunnable		VPROCG(averunnable)
 #define V_sched_tdcnt		VPROCG(sched_tdcnt)
+#define V_tdq_load		VPROCG(tdq_load)
 
 #ifdef VIMAGE
 void vnet_mod_register(const struct vnet_modinfo *);
@@ -377,6 +378,7 @@
 
 	struct loadavg _averunnable;	/* from kern/kern_synch.c */
 	int	_sched_tdcnt;		/* from kern/sched_4bsd.c */
+	int	_tdq_load[32]; /* XXX MAXCPUS from kern/sched_ule.c (SMP) */
 
 #if 0
 	u_int	proc_limit;		/* max. number of processes */


More information about the p4-projects mailing list