PERFORCE change 127589 for review
Marko Zec
zec at FreeBSD.org
Tue Oct 16 03:21:27 PDT 2007
http://perforce.freebsd.org/chv.cgi?CH=127589
Change 127589 by zec at zec_tpx32 on 2007/10/16 10:20:39
Implement per process group accounting of average CPU usage.
Two average values are maintained per process group: avg1
loses half of its value after around 62.5 ms, while for avg2
the same should happen after roughly 0.5 s. Internally the
values are maintained in a 16.16 bit fixed point format stored
in a uint (portability XXX). Each 62.5 ms the entire list
of process group containers is scanned for deccay processing.
This is very experimental, needs more thought about locking,
as well as more experimentation accross wide variety of HZ
values on both UP and SMP machines.
Affected files ...
.. //depot/projects/vimage/src/sys/kern/kern_clock.c#9 edit
.. //depot/projects/vimage/src/sys/kern/kern_vimage.c#43 edit
.. //depot/projects/vimage/src/sys/sys/vimage.h#42 edit
Differences ...
==== //depot/projects/vimage/src/sys/kern/kern_clock.c#9 (text+ko) ====
@@ -167,6 +167,11 @@
int ticks;
int psratio;
+#ifdef VIMAGE
+u_int tot_acc_statcalls;
+int last_acc_ticks;
+#endif
+
/*
* Initialize clock frequencies and start both clocks running.
*/
@@ -416,7 +421,8 @@
td = curthread;
p = td->td_proc;
#ifdef VIMAGE
- INIT_VPROCG(p->p_ucred->cr_vimage->v_procg);
+ INIT_VPROCG(td->td_ucred->cr_vimage->v_procg);
+ INIT_VCPU(td->td_ucred->cr_vimage->v_cpu);
struct vprocg *vprocg_iter;
#endif
@@ -468,10 +474,41 @@
#ifdef VIMAGE
if (sel != CP_INTR)
sel = CP_IDLE;
- /* XXX list locking? sched_lock is held here... */
+ /* XXX list locking? */
LIST_FOREACH(vprocg_iter, &vprocg_head, vprocg_le)
if (vprocg != vprocg_iter)
atomic_add_long(&vprocg_iter->_cp_time[sel], 1);
+
+ /* Per-vcpu average accounting */
+ /* LOCKING!!!! XXX */
+ tot_acc_statcalls++;
+ if (!TD_IS_IDLETHREAD(td))
+ V_acc_statcalls++;
+ if (last_acc_ticks + (hz >> 4) <= ticks) {
+ u_int weight_fixp;
+ u_int avg0;
+
+ last_acc_ticks = ticks;
+ /*
+ * 0x10000 == 1.0 in 16:16 fixed point notation;
+ * a few extra LS bits are added in an attempt to
+ * compensate for truncation errors.
+ */
+ weight_fixp = 0x010007 / tot_acc_statcalls;
+ LIST_FOREACH(vcpu, &vcpu_head, vcpu_le) {
+ avg0 = weight_fixp * V_acc_statcalls;
+ V_avg1_fixp = (V_avg1_fixp + avg0 + 1) >> 1;
+ V_avg2_fixp = (15 * V_avg2_fixp + avg0 + 15) >> 4;
+ V_acc_statcalls = 0;
+ /*
+ * Convert fixp notation to percents for export to
+ * userspace via sysctls - this will go away soon.
+ */
+ V_avg1_uint = (V_avg1_fixp * 100 + 0x8000) >> 16;
+ V_avg2_uint = (V_avg2_fixp * 100 + 0x8000) >> 16;
+ }
+ tot_acc_statcalls = 0;
+ }
#endif
/* Update resource usage integrals and maximums. */
==== //depot/projects/vimage/src/sys/kern/kern_vimage.c#43 (text+ko) ====
@@ -43,6 +43,7 @@
#include <sys/sched.h>
#include <sys/sockio.h>
#include <sys/sx.h>
+#include <sys/sysctl.h>
#include <sys/vimage.h>
#ifdef DDB
@@ -130,6 +131,11 @@
static TAILQ_HEAD(vnet_modlink_head, vnet_modlink) vnet_modlink_head;
static TAILQ_HEAD(vnet_modpending_head, vnet_modlink) vnet_modpending_head;
+SYSCTL_V_INT(V_CPU, vcpu, _kern, OID_AUTO, avg1_uint, CTLFLAG_RD,
+ avg1_uint, 0, "Average CPU usage");
+SYSCTL_V_INT(V_CPU, vcpu, _kern, OID_AUTO, avg2_uint, CTLFLAG_RD,
+ avg2_uint, 0, "Average CPU usage");
+
void vnet_mod_register(vmi)
const struct vnet_modinfo *vmi;
{
==== //depot/projects/vimage/src/sys/sys/vimage.h#42 (text+ko) ====
@@ -104,6 +104,7 @@
#define V_MOD_vnet_ipsec VNET_MOD_IPSEC
#define V_MOD_vprocg 0
+#define V_MOD_vcpu 0
struct vnet {
void *mod_data[VNET_MOD_MAX];
@@ -272,6 +273,8 @@
#define VPROCG_ITERLOOP_END() \
} \
+#define INIT_VCPU(arg) struct vcpu *vcpu = (arg);
+
#else /* !VIMAGE */
/* Non-VIMAGE null-macros */
@@ -290,10 +293,13 @@
#define INIT_VPROCG(arg)
#define VPROCG_ITERLOOP_BEGIN()
#define VPROCG_ITERLOOP_END()
+#define INIT_VCPU(arg)
#endif /* !VIMAGE */
+/* XXX those defines bellow should probably go into vprocg.h and vcpu.h */
#define VPROCG(sym) VSYM(vprocg, sym)
+#define VCPU(sym) VSYM(vcpu, sym)
#define V_cp_time VPROCG(cp_time)
#define V_hostname VPROCG(hostname)
@@ -303,6 +309,12 @@
#define V_sched_tdcnt VPROCG(sched_tdcnt)
#define V_tdq_load VPROCG(tdq_load)
+#define V_acc_statcalls VCPU(acc_statcalls)
+#define V_avg1_fixp VCPU(avg1_fixp)
+#define V_avg2_fixp VCPU(avg2_fixp)
+#define V_avg1_uint VCPU(avg1_uint)
+#define V_avg2_uint VCPU(avg2_uint)
+
#ifdef VIMAGE
void vnet_mod_register(const struct vnet_modinfo *);
void vnet_mod_deregister(const struct vnet_modinfo *);
@@ -408,10 +420,13 @@
u_int vcpu_ref; /* reference count */
u_int vcpu_id; /* ID num */
+ u_int _acc_statcalls; /* statclocks since last avg update*/
+ u_int _avg1_fixp; /* "fast" avg in 16:16 bit fixedpoint */
+ u_int _avg2_fixp; /* "slow" avg in 16:16 bit fixedpoint */
+ u_int _avg1_uint; /* (avg1_fixp * 100) >> 16 */
+ u_int _avg2_uint; /* (avg2_fixp * 100) >> 16 */
+
#if 0
- u_int cp_time_avg[CPUSTATES];
- u_int cp_time_avg1[CPUSTATES];
-
u_int cpu_min; /* Guaranteed CPU share */
u_int cpu_max; /* Maximum average CPU usage */
u_int intr_limit; /* Limit on CPU usage in intr ctx */
More information about the p4-projects
mailing list