socsvn commit: r237075 - in soc2012/rudot: aux sys/kern
rudot at FreeBSD.org
rudot at FreeBSD.org
Mon Jun 4 18:39:53 UTC 2012
Author: rudot
Date: Mon Jun 4 18:39:50 2012
New Revision: 237075
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=237075
Log:
fix problems that witness complains about
Added:
soc2012/rudot/aux/check_proc.sh (contents, props changed)
Modified:
soc2012/rudot/sys/kern/kern_racct.c
soc2012/rudot/sys/kern/sched_4bsd.c
Added: soc2012/rudot/aux/check_proc.sh
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ soc2012/rudot/aux/check_proc.sh Mon Jun 4 18:39:50 2012 (r237075)
@@ -0,0 +1,3 @@
+./proc_pcpu.sh $1
+
+ps ax -O %cpu | grep dummy
Modified: soc2012/rudot/sys/kern/kern_racct.c
==============================================================================
--- soc2012/rudot/sys/kern/kern_racct.c Mon Jun 4 18:05:40 2012 (r237074)
+++ soc2012/rudot/sys/kern/kern_racct.c Mon Jun 4 18:39:50 2012 (r237075)
@@ -54,6 +54,7 @@
#include <sys/sysent.h>
#include <sys/sysproto.h>
#include <sys/umtx.h>
+#include <machine/smp.h>
#ifdef RCTL
#include <sys/rctl.h>
@@ -61,6 +62,9 @@
#ifdef RACCT
+#define TDF_RACCT_PCTCPU TDF_SCHED2
+#define TDF_RACCT_RQ TDF_SCHED3
+
FEATURE(racct, "Resource Accounting");
static struct mtx racct_lock;
@@ -842,6 +846,70 @@
}
static void
+racct_proc_disable(struct proc *p)
+{
+ struct thread *td;
+
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if ((td->td_flags & TDF_RACCT_PCTCPU) == 0) {
+ thread_lock(td);
+ td->td_flags |= TDF_RACCT_PCTCPU;
+
+ switch (td->td_state) {
+ case TDS_RUNQ:
+ sched_rem(td);
+ td->td_flags |= TDF_RACCT_RQ;
+ break;
+ case TDS_RUNNING:
+ td->td_flags |= TDF_NEEDRESCHED;
+#ifdef SMP
+ if (td != curthread)
+ ipi_cpu(td->td_oncpu, IPI_AST);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ thread_unlock(td);
+ }
+ }
+}
+
+/*
+ * Returns true if at least one of the process threads
+ * has been disabled.
+ */
+static int
+racct_proc_disabled(struct proc *p)
+{
+ struct thread *td;
+
+ FOREACH_THREAD_IN_PROC(p, td) {
+ if (td->td_flags & TDF_RACCT_PCTCPU)
+ return (1);
+ }
+
+ return (0);
+}
+
+static void
+racct_proc_enable(struct proc *p)
+{
+ struct thread *td;
+
+ FOREACH_THREAD_IN_PROC(p, td) {
+ thread_lock(td);
+ td->td_flags &= ~TDF_RACCT_PCTCPU;
+ if (td->td_flags & TDF_RACCT_RQ) {
+ td->td_flags &= ~TDF_RACCT_RQ;
+ sched_add(td, SRQ_BORING);
+ }
+ thread_unlock(td);
+ }
+}
+
+static void
racctd(void)
{
struct thread *td;
@@ -849,6 +917,7 @@
struct timeval wallclock;
uint64_t runtime;
u_int pct;
+ uint64_t limit;
for (;;) {
sx_slock(&allproc_lock);
@@ -873,14 +942,20 @@
runtime = p->p_prev_runtime;
#endif
p->p_prev_runtime = runtime;
- mtx_lock(&racct_lock);
+ limit = racct_get_limit(p, RACCT_PCTCPU);
pct = racct_getpcpu(p);
+ mtx_lock(&racct_lock);
/*
* I use _force_ here because we always want to have
* the real value in the RACCT_PCTCPU resource
* regardless of the limits set.
*/
racct_set_force_locked(p, RACCT_PCTCPU, pct);
+ if (pct >= limit) {
+ racct_proc_disable(p);
+ } else if (racct_proc_disabled(p)) {
+ racct_proc_enable(p);
+ }
racct_set_locked(p, RACCT_CPU, runtime);
racct_set_locked(p, RACCT_WALLCLOCK,
(uint64_t)wallclock.tv_sec * 1000000 +
Modified: soc2012/rudot/sys/kern/sched_4bsd.c
==============================================================================
--- soc2012/rudot/sys/kern/sched_4bsd.c Mon Jun 4 18:05:40 2012 (r237074)
+++ soc2012/rudot/sys/kern/sched_4bsd.c Mon Jun 4 18:39:50 2012 (r237075)
@@ -107,6 +107,10 @@
/* flags kept in td_flags */
#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
#define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */
+#ifdef RACCT
+#define TDF_RACCT_PCTCPU TDF_SCHED2
+#define TDF_RACCT_RQ TDF_SCHED3
+#endif
/* flags kept in ts_flags */
#define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */
@@ -693,9 +697,6 @@
{
struct pcpuidlestat *stat;
struct td_sched *ts;
-#ifdef RACCT
- u_int pct;
-#endif
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
@@ -718,14 +719,6 @@
stat = DPCPU_PTR(idlestat);
stat->oldidlecalls = stat->idlecalls;
stat->idlecalls = 0;
-
-#ifdef RACCT
- pct = racct_getpcpu(td->td_proc);
- racct_set_force(td->td_proc, RACCT_PCTCPU, pct);
- if (pct >= racct_get_limit(td->td_proc, RACCT_PCTCPU)) {
- pause("racct", hz);
- }
-#endif
}
/*
@@ -1239,7 +1232,18 @@
KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
KTR_ATTR_LINKED, sched_tdname(td));
-
+#ifdef RACCT
+ /*
+ * Skip adding threads to the rq that have exceeded their racct
+ * pctcpu limits. Also set a flag that says when the thread is
+ * again within its pctcpu limits, it should be also added to
+ * the rq.
+ */
+ if (td->td_flags & TDF_RACCT_PCTCPU) {
+ td->td_flags |= TDF_RACCT_RQ;
+ return;
+ }
+#endif
/*
* Now that the thread is moving to the run-queue, set the lock
* to the scheduler's lock.
More information about the svn-soc-all
mailing list