socsvn commit: r223064 - in soc2011/rudot: kern sys
rudot at FreeBSD.org
rudot at FreeBSD.org
Fri Jun 10 15:49:59 UTC 2011
Author: rudot
Date: Fri Jun 10 15:49:56 2011
New Revision: 223064
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=223064
Log:
Standalone scheduler
changed mapping of priorities to queues
Added:
soc2011/rudot/kern/kern_switch.c
soc2011/rudot/kern/sched_fbfs.c
- copied, changed from r222402, soc2011/rudot/kern/sched_4bsd.c
soc2011/rudot/sys/
soc2011/rudot/sys/runq.h
Deleted:
soc2011/rudot/kern/proc.h
soc2011/rudot/kern/sched_4bsd.c
Added: soc2011/rudot/kern/kern_switch.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ soc2011/rudot/kern/kern_switch.c Fri Jun 10 15:49:56 2011 (r223064)
@@ -0,0 +1,523 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder <jake at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/kern/kern_switch.c,v 1.146.2.1.6.1 2010/12/21 17:09:25 kensmith Exp $");
+
+#include "opt_sched.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+
+#include <machine/cpu.h>
+
+/* Uncomment this to enable logging of critical_enter/exit. */
+#if 0
+#define KTR_CRITICAL KTR_SCHED
+#else
+#define KTR_CRITICAL 0
+#endif
+
+#ifdef FULL_PREEMPTION
+#ifndef PREEMPTION
+#error "The FULL_PREEMPTION option requires the PREEMPTION option"
+#endif
+#endif
+
+CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
+
+/*
+ * kern.sched.preemption allows user space to determine if preemption support
+ * is compiled in or not. It is not currently a boot or runtime flag that
+ * can be changed.
+ */
+#ifdef PREEMPTION
+static int kern_sched_preemption = 1;
+#else
+static int kern_sched_preemption = 0;
+#endif
+SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
+ &kern_sched_preemption, 0, "Kernel preemption enabled");
+
+/*
+ * Support for scheduler stats exported via kern.sched.stats. All stats may
+ * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere
+ * with SCHED_STAT_DEFINE().
+ */
+#ifdef SCHED_STATS
+SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
+
+/* Switch reasons from mi_switch(). */
+DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
+SCHED_STAT_DEFINE_VAR(uncategorized,
+ &DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
+SCHED_STAT_DEFINE_VAR(preempt,
+ &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
+SCHED_STAT_DEFINE_VAR(owepreempt,
+ &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
+SCHED_STAT_DEFINE_VAR(turnstile,
+ &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
+SCHED_STAT_DEFINE_VAR(sleepq,
+ &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
+SCHED_STAT_DEFINE_VAR(sleepqtimo,
+ &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
+SCHED_STAT_DEFINE_VAR(relinquish,
+ &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
+SCHED_STAT_DEFINE_VAR(needresched,
+ &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
+SCHED_STAT_DEFINE_VAR(idle,
+ &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
+SCHED_STAT_DEFINE_VAR(iwait,
+ &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
+SCHED_STAT_DEFINE_VAR(suspend,
+ &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
+SCHED_STAT_DEFINE_VAR(remotepreempt,
+ &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
+SCHED_STAT_DEFINE_VAR(remotewakeidle,
+ &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
+
+static int
+sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
+{
+ struct sysctl_oid *p;
+ uintptr_t counter;
+ int error;
+ int val;
+ int i;
+
+ val = 0;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (val == 0)
+ return (0);
+ /*
+ * Traverse the list of children of _kern_sched_stats and reset each
+ * to 0. Skip the reset entry.
+ */
+ SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
+ if (p == oidp || p->oid_arg1 == NULL)
+ continue;
+ counter = (uintptr_t)p->oid_arg1;
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ *(long *)(dpcpu_off[i] + counter) = 0;
+ }
+ }
+ return (0);
+}
+
+SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
+ 0, sysctl_stats_reset, "I", "Reset scheduler statistics");
+#endif
+
+/************************************************************************
+ * Functions that manipulate runnability from a thread perspective. *
+ ************************************************************************/
+/*
+ * Select the thread that will be run next.
+ */
+struct thread *
+choosethread(void)
+{
+ struct thread *td;
+
+retry:
+ td = sched_choose();
+
+ /*
+ * If we are in panic, only allow system threads,
+ * plus the one we are running in, to be run.
+ */
+ if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
+ (td->td_flags & TDF_INPANIC) == 0)) {
+ /* note that it is no longer on the run queue */
+ TD_SET_CAN_RUN(td);
+ goto retry;
+ }
+
+ TD_SET_RUNNING(td);
+ return (td);
+}
+
+/*
+ * Kernel thread preemption implementation. Critical sections mark
+ * regions of code in which preemptions are not allowed.
+ */
+void
+critical_enter(void)
+{
+ struct thread *td;
+
+ td = curthread;
+ td->td_critnest++;
+ CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
+ (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
+}
+
+void
+critical_exit(void)
+{
+ struct thread *td;
+ int flags;
+
+ td = curthread;
+ KASSERT(td->td_critnest != 0,
+ ("critical_exit: td_critnest == 0"));
+
+ if (td->td_critnest == 1) {
+ td->td_critnest = 0;
+ if (td->td_owepreempt) {
+ td->td_critnest = 1;
+ thread_lock(td);
+ td->td_critnest--;
+ flags = SW_INVOL | SW_PREEMPT;
+ if (TD_IS_IDLETHREAD(td))
+ flags |= SWT_IDLE;
+ else
+ flags |= SWT_OWEPREEMPT;
+ mi_switch(flags, NULL);
+ thread_unlock(td);
+ }
+ } else
+ td->td_critnest--;
+
+ CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
+ (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
+}
+
+/************************************************************************
+ * SYSTEM RUN QUEUE manipulations and tests *
+ ************************************************************************/
+/*
+ * Initialize a run structure.
+ */
+void
+runq_init(struct runq *rq)
+{
+ int i;
+
+ bzero(rq, sizeof *rq);
+ for (i = 0; i < RQ_NQS; i++)
+ TAILQ_INIT(&rq->rq_queues[i]);
+}
+
+/*
+ * Clear the status bit of the queue corresponding to priority level pri,
+ * indicating that it is empty.
+ */
+static __inline void
+runq_clrbit(struct runq *rq, int pri)
+{
+ struct rqbits *rqb;
+
+ rqb = &rq->rq_status;
+ CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
+ rqb->rqb_bits[RQB_WORD(pri)],
+ rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
+ RQB_BIT(pri), RQB_WORD(pri));
+ rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
+}
+
+/*
+ * Find the index of the first non-empty run queue. This is done by
+ * scanning the status bits, a set bit indicates a non-empty queue.
+ */
+static __inline int
+runq_findbit(struct runq *rq)
+{
+ struct rqbits *rqb;
+ int pri;
+ int i;
+
+ rqb = &rq->rq_status;
+ for (i = 0; i < RQB_LEN; i++)
+ if (rqb->rqb_bits[i]) {
+ pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
+ CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
+ rqb->rqb_bits[i], i, pri);
+ return (pri);
+ }
+
+ return (-1);
+}
+
+static __inline int
+runq_findbit_from(struct runq *rq, u_char pri)
+{
+ struct rqbits *rqb;
+ rqb_word_t mask;
+ int i;
+
+ /*
+ * Set the mask for the first word so we ignore priorities before 'pri'.
+ */
+ mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
+ rqb = &rq->rq_status;
+again:
+ for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
+ mask = rqb->rqb_bits[i] & mask;
+ if (mask == 0)
+ continue;
+ pri = RQB_FFS(mask) + (i << RQB_L2BPW);
+ CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
+ mask, i, pri);
+ return (pri);
+ }
+ if (pri == 0)
+ return (-1);
+ /*
+ * Wrap back around to the beginning of the list just once so we
+ * scan the whole thing.
+ */
+ pri = 0;
+ goto again;
+}
+
+/*
+ * Set the status bit of the queue corresponding to priority level pri,
+ * indicating that it is non-empty.
+ */
+static __inline void
+runq_setbit(struct runq *rq, int pri)
+{
+ struct rqbits *rqb;
+
+ rqb = &rq->rq_status;
+ CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
+ rqb->rqb_bits[RQB_WORD(pri)],
+ rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
+ RQB_BIT(pri), RQB_WORD(pri));
+ rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
+}
+
+/*
+ * Add the thread to the queue specified by its priority, and set the
+ * corresponding status bit.
+ */
+void
+runq_add(struct runq *rq, struct thread *td, int flags)
+{
+ struct rqhead *rqh;
+ int pri;
+
+#ifdef SCHED_FBFS
+ if (td->td_priority >= PRI_MIN_IDLE) {
+ pri = RQ_IDLE;
+ } else if (td->td_priority >= PRI_MIN_TIMESHARE) {
+ pri = RQ_TIMESHARE;
+ } else if (td->td_priority >= PRI_MIN_REALTIME) {
+ pri = min(RQ_MIN_REALTIME + td->td_priority - PRI_MIN_REALTIME,
+ RQ_MAX_REALTIME);
+ } else {
+ pri = td->td_priority / RQ_PPQ;
+ }
+#else
+ pri = td->td_priority / RQ_PPQ;
+#endif
+
+ td->td_rqindex = pri;
+ runq_setbit(rq, pri);
+ rqh = &rq->rq_queues[pri];
+ CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
+ td, td->td_priority, pri, rqh);
+ if (flags & SRQ_PREEMPTED) {
+ TAILQ_INSERT_HEAD(rqh, td, td_runq);
+ } else {
+ TAILQ_INSERT_TAIL(rqh, td, td_runq);
+ }
+}
+
+void
+runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
+{
+ struct rqhead *rqh;
+
+ KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
+ td->td_rqindex = pri;
+ runq_setbit(rq, pri);
+ rqh = &rq->rq_queues[pri];
+ CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
+ td, td->td_priority, pri, rqh);
+ if (flags & SRQ_PREEMPTED) {
+ TAILQ_INSERT_HEAD(rqh, td, td_runq);
+ } else {
+ TAILQ_INSERT_TAIL(rqh, td, td_runq);
+ }
+}
+/*
+ * Return true if there are runnable processes of any priority on the run
+ * queue, false otherwise. Has no side effects, does not modify the run
+ * queue structure.
+ */
+int
+runq_check(struct runq *rq)
+{
+ struct rqbits *rqb;
+ int i;
+
+ rqb = &rq->rq_status;
+ for (i = 0; i < RQB_LEN; i++)
+ if (rqb->rqb_bits[i]) {
+ CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
+ rqb->rqb_bits[i], i);
+ return (1);
+ }
+ CTR0(KTR_RUNQ, "runq_check: empty");
+
+ return (0);
+}
+
+/*
+ * Find the highest priority process on the run queue.
+ */
+struct thread *
+runq_choose_fuzz(struct runq *rq, int fuzz)
+{
+ struct rqhead *rqh;
+ struct thread *td;
+ int pri;
+
+ while ((pri = runq_findbit(rq)) != -1) {
+ rqh = &rq->rq_queues[pri];
+ /* fuzz == 1 is normal.. 0 or less are ignored */
+ if (fuzz > 1) {
+ /*
+ * In the first couple of entries, check if
+ * there is one for our CPU as a preference.
+ */
+ int count = fuzz;
+ int cpu = PCPU_GET(cpuid);
+ struct thread *td2;
+ td2 = td = TAILQ_FIRST(rqh);
+
+ while (count-- && td2) {
+ if (td2->td_lastcpu == cpu) {
+ td = td2;
+ break;
+ }
+ td2 = TAILQ_NEXT(td2, td_runq);
+ }
+ } else
+ td = TAILQ_FIRST(rqh);
+ KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
+ CTR3(KTR_RUNQ,
+ "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
+ return (td);
+ }
+ CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
+
+ return (NULL);
+}
+
+/*
+ * Find the highest priority process on the run queue.
+ */
+struct thread *
+runq_choose(struct runq *rq)
+{
+ struct rqhead *rqh;
+ struct thread *td;
+ int pri;
+
+ while ((pri = runq_findbit(rq)) != -1) {
+ rqh = &rq->rq_queues[pri];
+ td = TAILQ_FIRST(rqh);
+ KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
+ CTR3(KTR_RUNQ,
+ "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
+ return (td);
+ }
+ CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
+
+ return (NULL);
+}
+
+struct thread *
+runq_choose_from(struct runq *rq, u_char idx)
+{
+ struct rqhead *rqh;
+ struct thread *td;
+ int pri;
+
+ if ((pri = runq_findbit_from(rq, idx)) != -1) {
+ rqh = &rq->rq_queues[pri];
+ td = TAILQ_FIRST(rqh);
+ KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
+ CTR4(KTR_RUNQ,
+ "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
+ pri, td, td->td_rqindex, rqh);
+ return (td);
+ }
+ CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
+
+ return (NULL);
+}
+/*
+ * Remove the thread from the queue specified by its priority, and clear the
+ * corresponding status bit if the queue becomes empty.
+ * Caller must set state afterwards.
+ */
+void
+runq_remove(struct runq *rq, struct thread *td)
+{
+
+ runq_remove_idx(rq, td, NULL);
+}
+
+void
+runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
+{
+ struct rqhead *rqh;
+ u_char pri;
+
+ KASSERT(td->td_flags & TDF_INMEM,
+ ("runq_remove_idx: thread swapped out"));
+ pri = td->td_rqindex;
+ KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
+ rqh = &rq->rq_queues[pri];
+ CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
+ td, td->td_priority, pri, rqh);
+ TAILQ_REMOVE(rqh, td, td_runq);
+ if (TAILQ_EMPTY(rqh)) {
+ CTR0(KTR_RUNQ, "runq_remove_idx: empty");
+ runq_clrbit(rq, pri);
+ if (idx != NULL && *idx == pri)
+ *idx = (pri + 1) % RQ_NQS;
+ }
+}
Copied and modified: soc2011/rudot/kern/sched_fbfs.c (from r222402, soc2011/rudot/kern/sched_4bsd.c)
==============================================================================
--- soc2011/rudot/kern/sched_4bsd.c Thu May 26 13:31:05 2011 (r222402, copy source)
+++ soc2011/rudot/kern/sched_fbfs.c Fri Jun 10 15:49:56 2011 (r223064)
@@ -229,91 +229,19 @@
}
/*
- * Constants for digital decay and forget:
- * 90% of (td_estcpu) usage in 5 * loadav time
- * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
- * Note that, as ps(1) mentions, this can let percentages
- * total over 100% (I've seen 137.9% for 3 processes).
- *
- * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
- *
- * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
- * That is, the system wants to compute a value of decay such
- * that the following for loop:
- * for (i = 0; i < (5 * loadavg); i++)
- * td_estcpu *= decay;
- * will compute
- * td_estcpu *= 0.1;
- * for all values of loadavg:
- *
- * Mathematically this loop can be expressed by saying:
- * decay ** (5 * loadavg) ~= .1
- *
- * The system computes decay as:
- * decay = (2 * loadavg) / (2 * loadavg + 1)
- *
- * We wish to prove that the system's computation of decay
- * will always fulfill the equation:
- * decay ** (5 * loadavg) ~= .1
- *
- * If we compute b as:
- * b = 2 * loadavg
- * then
- * decay = b / (b + 1)
- *
- * We now need to prove two things:
- * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
- * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
- *
- * Facts:
- * For x close to zero, exp(x) =~ 1 + x, since
- * exp(x) = 0! + x**1/1! + x**2/2! + ... .
- * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
- * For x close to zero, ln(1+x) =~ x, since
- * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
- * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
- * ln(.1) =~ -2.30
- *
- * Proof of (1):
- * Solve (factor)**(power) =~ .1 given power (5*loadav):
- * solving for factor,
- * ln(factor) =~ (-2.30/5*loadav), or
- * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
- * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
- *
- * Proof of (2):
- * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
- * solving for power,
- * power*ln(b/(b+1)) =~ -2.30, or
- * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
- *
- * Actual power values for the implemented algorithm are as follows:
- * loadav: 1 2 3 4
- * power: 5.68 10.32 14.94 19.55
+ * This function is called when a thread is about to be put on run queue
+ * because it has been made runnable or its priority has been adjusted. It
+ * determines if the new thread should be immediately preempted to. If so,
+ * it switches to it and eventually returns true. If not, it returns false
+ * so that the caller may place the thread on an appropriate run queue.
*/
-
-/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
-#define loadfactor(loadav) (2 * (loadav))
-#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
-
-/* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
-static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
-SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
-
-/*
- * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
- * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
- * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
- *
- * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
- * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
- *
- * If you don't want to bother with the faster/more-accurate formula, you
- * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
- * (more general) method of calculating the %age of CPU used by a process.
- */
-#define CCPU_SHIFT 11
-
+int
+maybe_preempt(struct thread *td)
+{
+#ifdef PREEMPTION
+#endif
+ return (0);
+}
/* ARGSUSED */
static void
Added: soc2011/rudot/sys/runq.h
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ soc2011/rudot/sys/runq.h Fri Jun 10 15:49:56 2011 (r223064)
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder <jake at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/sys/runq.h,v 1.12.2.1.6.1 2010/12/21 17:09:25 kensmith Exp $
+ */
+
+#ifndef _RUNQ_H_
+#define _RUNQ_H_
+
+#include <machine/runq.h>
+
+struct thread;
+
+/*
+ * Run queue parameters.
+ */
+
+#define RQ_NQS (64) /* Number of run queues. */
+#define RQ_PPQ (4) /* Priorities per queue. */
+
+#ifdef SCHED_FBFS
+#define RQ_IDLE (RQ_NQS - 1)
+#define RQ_TIMESHARE (RQ_IDLE - 1)
+#define RQ_MIN_REALTIME (PRI_MIN_REALTIME / 4)
+#define RQ_MAX_REALTIME (RQ_TIMESHARE - 1)
+#endif
+
+/*
+ * Head of run queues.
+ */
+TAILQ_HEAD(rqhead, thread);
+
+/*
+ * Bit array which maintains the status of a run queue. When a queue is
+ * non-empty the bit corresponding to the queue number will be set.
+ */
+struct rqbits {
+ rqb_word_t rqb_bits[RQB_LEN];
+};
+
+/*
+ * Run queue structure. Contains an array of run queues on which processes
+ * are placed, and a structure to maintain the status of each queue.
+ */
+struct runq {
+ struct rqbits rq_status;
+ struct rqhead rq_queues[RQ_NQS];
+};
+
+void runq_add(struct runq *, struct thread *, int);
+void runq_add_pri(struct runq *, struct thread *, u_char, int);
+int runq_check(struct runq *);
+struct thread *runq_choose(struct runq *);
+struct thread *runq_choose_from(struct runq *, u_char);
+struct thread *runq_choose_fuzz(struct runq *, int);
+void runq_init(struct runq *);
+void runq_remove(struct runq *, struct thread *);
+void runq_remove_idx(struct runq *, struct thread *, u_char *);
+
+#endif
More information about the svn-soc-all
mailing list