PERFORCE change 127600 for review
Marko Zec
zec at FreeBSD.org
Tue Oct 16 04:53:01 PDT 2007
http://perforce.freebsd.org/chv.cgi?CH=127600
Change 127600 by zec at zec_tca51 on 2007/10/16 11:52:31
Hold a spin mutex when updating CPU usage state and / or
traversing through the list of cpu accounting containers (in
statclock() we cannot hold a default mutex). This shouldn't
be happening more often than ~ 130 * number_of_processors
times per second, so hopefully the introduction of this
new spin mutex shouldn't choke the system too much.
Nuke periodic conversion from fixed point representation of
CPU usage averages to integers, together with sysctls that
were exposing those ints to userland. We'll be doing the
conversion only when requested, and export the state to
userland through the vimage API.
Affected files ...
.. //depot/projects/vimage/src/sys/kern/kern_clock.c#10 edit
.. //depot/projects/vimage/src/sys/kern/kern_vimage.c#44 edit
.. //depot/projects/vimage/src/sys/sys/vimage.h#43 edit
Differences ...
==== //depot/projects/vimage/src/sys/kern/kern_clock.c#10 (text+ko) ====
@@ -480,7 +480,7 @@
atomic_add_long(&vprocg_iter->_cp_time[sel], 1);
/* Per-vcpu average accounting */
- /* LOCKING!!!! XXX */
+ mtx_lock_spin(&vcpu_list_mtx);
tot_acc_statcalls++;
if (!TD_IS_IDLETHREAD(td))
V_acc_statcalls++;
@@ -500,15 +500,10 @@
V_avg1_fixp = (V_avg1_fixp + avg0 + 1) >> 1;
V_avg2_fixp = (15 * V_avg2_fixp + avg0 + 15) >> 4;
V_acc_statcalls = 0;
- /*
- * Convert fixp notation to percents for export to
- * userspace via sysctls - this will go away soon.
- */
- V_avg1_uint = (V_avg1_fixp * 100 + 0x8000) >> 16;
- V_avg2_uint = (V_avg2_fixp * 100 + 0x8000) >> 16;
}
tot_acc_statcalls = 0;
}
+ mtx_unlock_spin(&vcpu_list_mtx);
#endif
/* Update resource usage integrals and maximums. */
==== //depot/projects/vimage/src/sys/kern/kern_vimage.c#44 (text+ko) ====
@@ -115,6 +115,8 @@
struct mtx vnet_list_refc_mtx;
int vnet_list_refc = 0;
+struct mtx vcpu_list_mtx;
+
#define VNET_LIST_LOCK() \
mtx_lock(&vnet_list_refc_mtx); \
while (vnet_list_refc != 0) \
@@ -131,11 +133,6 @@
static TAILQ_HEAD(vnet_modlink_head, vnet_modlink) vnet_modlink_head;
static TAILQ_HEAD(vnet_modpending_head, vnet_modlink) vnet_modpending_head;
-SYSCTL_V_INT(V_CPU, vcpu, _kern, OID_AUTO, avg1_uint, CTLFLAG_RD,
- avg1_uint, 0, "Average CPU usage");
-SYSCTL_V_INT(V_CPU, vcpu, _kern, OID_AUTO, avg2_uint, CTLFLAG_RD,
- avg2_uint, 0, "Average CPU usage");
-
void vnet_mod_register(vmi)
const struct vnet_modinfo *vmi;
{
@@ -501,12 +498,14 @@
case SIOCGPVIMAGE:
vi_req->vi_id = vip_r->vi_id;
bcopy(&vip_r->vi_name, &vi_req->vi_name,
- sizeof (vi_req->vi_name));
+ sizeof (vi_req->vi_name));
bcopy(&vip_r->v_procg->_averunnable, &vi_req->averunnable,
- sizeof (vi_req->averunnable));
+ sizeof (vi_req->averunnable));
vi_req->vi_proc_count = vip_r->v_procg->nprocs;
vi_req->vi_if_count = vip_r->v_vnet->ifccnt;
vi_req->vi_sock_count = vip_r->v_vnet->sockcnt;
+ vi_req->cp_time_avg =
+ (vip_r->v_cpu->_avg2_fixp * 10000 + 0x8000) >> 16;
break;
case SIOCSPVIMAGE:
@@ -644,12 +643,19 @@
vnet_mod_constructor(vml);
CURVNET_RESTORE();
- VNET_LIST_LOCK(); /* XXX should lock other lists separately */
+ VNET_LIST_LOCK();
LIST_INSERT_HEAD(&vnet_head, vnet, vnet_le);
+ VNET_LIST_UNLOCK();
+
+ /* XXX locking */
LIST_INSERT_HEAD(&vprocg_head, vprocg, vprocg_le);
+
+ mtx_lock_spin(&vcpu_list_mtx);
LIST_INSERT_HEAD(&vcpu_head, vcpu, vcpu_le);
+ mtx_unlock_spin(&vcpu_list_mtx);
+
+ /* XXX locking */
LIST_INSERT_HEAD(&vimage_head, vip, vi_le);
- VNET_LIST_UNLOCK();
vi_alloc_done:
return (vip);
@@ -814,6 +820,8 @@
mtx_init(&vnet_list_refc_mtx, "vnet_list_refc_mtx", NULL, MTX_DEF);
cv_init(&vnet_list_condvar, "vnet_list_condvar");
+ mtx_init(&vcpu_list_mtx, "vcpu_list_mtx", NULL, MTX_SPIN);
+
vi_alloc("default", 0);
/* We MUST clear curvnet in vi_init_done before going SMP. */
==== //depot/projects/vimage/src/sys/sys/vimage.h#43 (text+ko) ====
@@ -312,8 +312,6 @@
#define V_acc_statcalls VCPU(acc_statcalls)
#define V_avg1_fixp VCPU(avg1_fixp)
#define V_avg2_fixp VCPU(avg2_fixp)
-#define V_avg1_uint VCPU(avg1_uint)
-#define V_avg2_uint VCPU(avg2_uint)
#ifdef VIMAGE
void vnet_mod_register(const struct vnet_modinfo *);
@@ -346,6 +344,7 @@
extern int vnet_list_refc;
extern struct mtx vnet_list_refc_mtx;
extern struct cv vnet_list_condvar;
+extern struct mtx vcpu_list_mtx;
#define VNET_LIST_REF() \
mtx_lock(&vnet_list_refc_mtx); \
@@ -423,8 +422,6 @@
u_int _acc_statcalls; /* statclocks since last avg update*/
u_int _avg1_fixp; /* "fast" avg in 16:16 bit fixedpoint */
u_int _avg2_fixp; /* "slow" avg in 16:16 bit fixedpoint */
- u_int _avg1_uint; /* (avg1_fixp * 100) >> 16 */
- u_int _avg2_uint; /* (avg2_fixp * 100) >> 16 */
#if 0
u_int cpu_min; /* Guaranteed CPU share */
@@ -456,7 +453,7 @@
char vi_name[MAXPATHLEN];
char vi_chroot[MAXPATHLEN];
char vi_if_xname[MAXPATHLEN]; /* XXX should be IFNAMSIZ */
- int cp_time_avg[CPUSTATES];
+ u_int cp_time_avg;
struct loadavg averunnable;
};
More information about the p4-projects
mailing list