PERFORCE change 56557 for review
Julian Elischer
julian at FreeBSD.org
Mon Jul 5 16:30:27 PDT 2004
http://perforce.freebsd.org/chv.cgi?CH=56557
Change 56557 by julian at julian_jules1 on 2004/07/05 23:29:46
Part way to getting rid of struct kse.
Most of its features are taken up by the td_sched substructure
which is always present on each thread.
Affected files ...
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#21 edit
.. //depot/projects/nsched/sys/sys/proc.h#18 edit
Differences ...
==== //depot/projects/nsched/sys/kern/sched_4bsd.c#21 (text+ko) ====
@@ -55,63 +55,7 @@
#include <vm/uma.h>
#include <machine/critical.h>
-/********************************************************************
- * Definitions of the "KSE" structure.
- *
- * This is a scheduler private structure that it uses (for now)
- * to implement the thread fairness algorythm.
- * The threads are made runnable by the rest of the system, but
- * only NKSE of them are actually put on the run queues to compete with
- * threads from other processes. For normal processes there is only one KSE
- * but for threaded processes we allocate only upto NKSE kses per ksegrp.
- * KSEs are what is put on the system run queue, so multithreaded
- * processes must multiplex their threads onto their KSEs, making them compete
- * on a similar basis to nonthreaded processes.
- * In pictures:
- * With a single run queue used by all processors:
- *
- * RUNQ: --->KSE---KSE--...
- * | /
- * KSEG---THREAD--THREAD--THREAD
- *
- * (processors run THREADs from the KSEG until they are exhausted or
- * the KSEG exhausts its quantum)
- *
- * XXX This structure is stictly speaking, not needed any more
- * and its fields will move to the thread structure. The first 'N'
- * threads can be kept trak of with a simple count. Do this soon.
- */
-
-struct kse {
- struct proc *ke_proc; /* (*) Associated process. */
- struct ksegrp *ke_ksegrp; /* (*) Associated KSEG. */
- TAILQ_ENTRY(kse) ke_kglist; /* (*) Queue of KSEs in ke_ksegrp. */
- TAILQ_ENTRY(kse) ke_kgrlist; /* (*) Queue of KSEs in this state. */
- TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
- struct thread *ke_thread; /* (*) Active associated thread. */
-#define ke_startzero ke_flags
- int ke_flags; /* (j) KEF_* flags. */
- fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
- u_char ke_oncpu; /* (j) Which cpu we are on. */
- char ke_rqindex; /* (j) Run queue index. */
- enum {
- KES_UNUSED = 0x0,
- KES_IDLE,
- KES_ONRUNQ,
- KES_UNQUEUED, /* in transit */
- KES_THREAD /* slaved to thread state */
- } ke_state; /* (j) KSE status. */
-#define ke_endzero ke_cpticks
- int ke_cpticks; /* (j) Ticks of cpu time. */
- struct runq *ke_runq; /* runq the kse is currently on */
-};
-
-/* flags kept in ke_flags */
-#define KEF_BOUND 0x00001 /* Stuck on a cpu.. long term */
-#define KEF_EXIT 0x00002 /* KSE is being killed. */
-#define KEF_DIDRUN 0x00004 /* KSE actually ran. */
-
/***************************************************************
* Scheduler private extensions to thread, ksegrp and proc structures.
*
@@ -131,42 +75,57 @@
*/
struct td_sched {
- struct kse *std_last_kse; /* (j) Previous value of td_kse. */
- struct kse *std_kse; /* (j) Current KSE if running. */
+ TAILQ_ENTRY(td_sched) std_kgrlist; /* (*) Queue of threads in this state. */
+ TAILQ_ENTRY(td_sched) std_procq; /* (j/z) Run queue. */
+
+ struct thread *std_thread; /* (*) Active associated thread. */
+#define std_startzero std_pctcpu
+ fixpt_t std_pctcpu; /* (j) %cpu during p_swtime. */
+ u_char std_oncpu; /* (j) Which cpu we are on. */
+ char std_rqindex; /* (j) Run queue index. */
+ enum {
+ STDS_UNUSED = 0x0,
+ STDS_IDLE,
+ STDS_ONRUNQ,
+ STDS_UNQUEUED, /* in transit */
+ STDS_THREAD /* slaved to thread state */
+ } std_state; /* (j) KSE status. */
+#define std_endzero std_cpticks
+ int std_cpticks; /* (j) Ticks of cpu time. */
+ struct runq *std_runq; /* runq the td_sched is currently on */
};
-#define td_last_kse td_sched->std_last_kse
-#define td_kse td_sched->std_kse
+#define td_kgrlist td_sched->std_kgrlist
+#define td_procq td_sched->std_procq
+#define td_thread td_sched->std_thread
+#define td_pctcpu td_sched->std_pctcpu
+#define td_oncpu td_sched->std_oncpu
+#define td_rqindex td_sched->std_rqindex
+#define td_state td_sched->std_state
+#define td_cpticks td_sched->std_cpticks
+#define td_runq td_sched->std_runq
+
+
+/* flags kept in ke_flags */
+#define TDF_BOUND TDF_SCHED1 /* Stuck on a cpu.. long term */
+#define TDF_EXIT TDF_SCHED2 /* thread is being killed. */
+#define TDF_DIDRUN TDF_SCHED3 /* thread actually ran. */
struct kg_sched {
- TAILQ_HEAD(, kse) skg_kseq; /* (ke_kglist) All KSEs. */
- TAILQ_HEAD(, kse) skg_iq; /* (ke_kgrlist) All idle KSEs. */
struct thread *skg_last_assigned; /* (j) Last thread assigned */
- /* ( to a KSE). */
- int skg_runq_kses; /* (j) Num KSEs on runq. */
- int skg_idle_kses; /* (j) Num KSEs on iq. */
- int skg_kses; /* (j) Num KSEs in group. */
+ /* to the system scheduler */
+ int skg_runq_threads; /* (j) Num KSEs on runq. */
+ int skg_avail_opennings; /* (j) Num KSEs on iq. */
int skg_concurrancy; /* (j) desired concurrancy */
};
-#define kg_kseq kg_sched->skg_kseq
-#define kg_iq kg_sched->skg_iq
#define kg_last_assigned kg_sched->skg_last_assigned
-#define kg_runq_kses kg_sched->skg_runq_kses
-#define kg_idle_kses kg_sched->skg_idle_kses
-#define kg_kses kg_sched->skg_kses
+#define kg_runq_threads kg_sched->skg_runq_threads
+#define kg_avail_opennings kg_sched->skg_avail_opennings
-#define FIRST_KSE_IN_KSEGRP(kg) TAILQ_FIRST(&(kg)->kg_kseq)
-#define FIRST_KSE_IN_PROC(p) FIRST_KSE_IN_KSEGRP(FIRST_KSEGRP_IN_PROC(p))
-
/****************************************************************
* function prototypes
*/
-static void kse_free(struct kse *ke);
-static void kse_stash(struct kse *ke);
-static void kse_reassign(struct kse *ke);
-static struct kse * kse_alloc(void);
-static void kse_link(struct kse *ke, struct ksegrp *kg);
-static void kse_unlink(struct kse *ke);
+static void recycle_slot(struct ksegrp *kg); /* was kse_reassign */
#define KTR_4BSD 0x0
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
@@ -185,15 +144,15 @@
#endif
#define NICE_WEIGHT 1 /* Priorities per nice level. */
-#define KE_RUNQ_PCPU(ke) \
- ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq)
+#define STD_RUNQ_PCPU(std) \
+ ((std)->std_runq != 0 && (std)->std_runq != &runq)
/*
- * KSE_CAN_MIGRATE macro returns true if the kse can migrate between
+ * TD_CAN_MIGRATE macro returns true if the thread can migrate between
* cpus.
*/
-#define KSE_CAN_MIGRATE(ke) \
- ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
+#define TD_CAN_MIGRATE(td) \
+ ((td)->td_pinned == 0 && ((td)->td_flags & TDF_BOUND) == 0)
static int sched_tdcnt; /* Total runnable threads in the system. */
static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
@@ -211,7 +170,7 @@
static void resetpriority(struct ksegrp *kg);
static void sched_add(struct thread *td);
static void sched_rem(struct thread *td);
-static struct kse * sched_choose(void);
+static struct td_sched * sched_choose(void);
static void adjustrunqueue( struct thread *td, int newpri) ;
@@ -264,8 +223,6 @@
#include <machine/runq.h>
-struct kse;
-
/*
* Run queue parameters.
*/
@@ -276,7 +233,7 @@
/*
* Head of run queues.
*/
-TAILQ_HEAD(rqhead, kse);
+TAILQ_HEAD(rqhead, td_sched);
/*
* Bit array which maintains the status of a run queue. When a queue is
@@ -295,11 +252,11 @@
struct rqhead rq_queues[RQ_NQS];
};
-static void runq_add(struct runq *, struct kse *);
+static void runq_add(struct runq *, struct thread *);
static int runq_check(struct runq *);
-static struct kse *runq_choose(struct runq *);
+static struct td_sched *runq_choose(struct runq *);
static void runq_init(struct runq *);
-static void runq_remove(struct runq *, struct kse *);
+static void runq_remove(struct runq *, struct thread *);
#endif /* end of Jake copyright file */
/*
@@ -363,7 +320,7 @@
{
mtx_assert(&sched_lock, MA_OWNED);
- if (td->td_priority < curthread->td_priority && curthread->td_kse)
+ if (td->td_priority < curthread->td_priority && curthread->td_sched)
curthread->td_flags |= TDF_NEEDRESCHED;
}
@@ -397,7 +354,7 @@
/*
* Constants for digital decay and forget:
* 90% of (kg_estcpu) usage in 5 * loadav time
- * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
+ * 95% of (td_pctcpu) usage in 60 seconds (load insensitive)
* Note that, as ps(1) mentions, this can let percentages
* total over 100% (I've seen 137.9% for 3 processes).
*
@@ -462,7 +419,7 @@
#define loadfactor(loadav) (2 * (loadav))
#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
-/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
+/* decay 95% of `td_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
@@ -493,7 +450,7 @@
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
struct thread *td;
struct proc *p;
- struct kse *ke;
+ struct td_sched *ts;
struct ksegrp *kg;
int awake, realstathz;
@@ -511,54 +468,57 @@
p->p_swtime++;
FOREACH_KSEGRP_IN_PROC(p, kg) {
awake = 0;
- FOREACH_KSE_IN_GROUP(kg, ke) {
+ FOREACH_THREAD(kg, td) {
+ ts = td->td_sched;
/*
* Increment sleep time (if sleeping). We
* ignore overflow, as above.
*/
/*
+ * *** XXXKSE ** no longer true.... FIX
* The kse slptimes are not touched in wakeup
* because the thread may not HAVE a KSE.
*/
- if (ke->ke_state == KES_ONRUNQ) {
+ if (std->std_state == STDS_ONRUNQ) {
awake = 1;
- ke->ke_flags &= ~KEF_DIDRUN;
- } else if ((ke->ke_state == KES_THREAD) &&
- (TD_IS_RUNNING(ke->ke_thread))) {
+ td->td_flags &= ~TDF_DIDRUN;
+ } else if ((std->std_state == STDS_THREAD) &&
+ (TD_IS_RUNNING(td))) {
awake = 1;
- /* Do not clear KEF_DIDRUN */
- } else if (ke->ke_flags & KEF_DIDRUN) {
+ /* Do not clear TDF_DIDRUN */
+ } else if (td->td_flags & TDF_DIDRUN) {
awake = 1;
- ke->ke_flags &= ~KEF_DIDRUN;
- }
-
+ td->td_flags &= ~TDF_DIDRUN;
+ } else
+ continue;
+
/*
- * ke_pctcpu is only for ps and ttyinfo().
+ * std_pctcpu is only for ps and ttyinfo().
* Do it per kse, and add them up at the end?
* XXXKSE
*/
- ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
+ std->std_pctcpu = (std->std_pctcpu * ccpu) >>
FSHIFT;
/*
- * If the kse has been idle the entire second,
+ * If the thread has been idle the entire second,
* stop recalculating its priority until
* it wakes up.
*/
- if (ke->ke_cpticks == 0)
+ if (std->std_cpticks == 0)
continue;
#if (FSHIFT >= CCPU_SHIFT)
- ke->ke_pctcpu += (realstathz == 100)
- ? ((fixpt_t) ke->ke_cpticks) <<
+ std->std_pctcpu += (realstathz == 100)
+ ? ((fixpt_t) std->std_cpticks) <<
(FSHIFT - CCPU_SHIFT) :
- 100 * (((fixpt_t) ke->ke_cpticks)
+ 100 * (((fixpt_t) std->std_cpticks)
<< (FSHIFT - CCPU_SHIFT)) / realstathz;
#else
- ke->ke_pctcpu += ((FSCALE - ccpu) *
- (ke->ke_cpticks *
+ std->std_pctcpu += ((FSCALE - ccpu) *
+ (std->std_cpticks *
FSCALE / realstathz)) >> FSHIFT;
#endif
- ke->ke_cpticks = 0;
- } /* end of kse loop */
+ std->std_cpticks = 0;
+ } /* end of thread loop */
/*
* If there are ANY running threads in this KSEGRP,
* then don't count it as sleeping.
@@ -715,7 +675,7 @@
}
/*
- * report teh round-robin scheduling interval.
+ * report the round-robin scheduling interval.
* Called from:
* (posix scheduling interface)
*/
@@ -747,13 +707,11 @@
sched_clock(struct thread *td)
{
struct ksegrp *kg;
- struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
kg = td->td_ksegrp;
- ke = td->td_kse;
- ke->ke_cpticks++;
+ td->td_sched->std_cpticks++;
kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
resetpriority(kg);
@@ -765,7 +723,7 @@
/*
* charge childs scheduling cpu usage to parent.
*
- * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
+ * XXXKSE assume only one thread & ksegrp keep estcpu in each ksegrp.
* Charge it to the ksegrp that did the wait since process estcpu is sum of
* all ksegrps, this is strictly as expected. Assume that the child process
* aggregated all the estcpu into the 'built-in' ksegrp.
@@ -780,13 +738,14 @@
mtx_assert(&sched_lock, MA_OWNED);
kg = FIRST_KSEGRP_IN_PROC(parent); /* XXXKSE */
- td->td_kse->ke_flags |= KEF_EXIT;
+ td->td_flags |= TDF_EXIT;
kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu +
td->td_ksegrp->kg_estcpu);
}
/*
* We should give the estcpu to someone, but WHO?
+ * Only called on exit of last thread in the ksegrp.
* Called from:
* thread_exit() (for threaded programs only)
*/
@@ -805,14 +764,10 @@
void
sched_thread_exit(struct thread *td)
{
- struct kse *ke;
- ke = td->td_kse;
-
- if ((td->td_proc->p_flag & P_SA) && ke != NULL) {
- ke->ke_thread = NULL;
- td->td_kse = NULL;
- kse_reassign(ke);
+ if (td->td_proc->p_flag & P_SA) {
+ td->td_ksegrp->kg_avail_opennings++;
+ recycle_slot(td->td_ksegrp);
}
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
@@ -829,23 +784,15 @@
void
sched_thr_exit(struct thread *td)
{
- struct kse *ke;
-
- ke = td->td_kse;
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
-
- /* td is about to be freed, but keep it clean */
- td->td_kse = NULL;
- td->td_last_kse = NULL;
- kse_unlink(ke); /* also frees it */
}
/*
* Allocate any resources the scheduler needs to allocate or set up
- * on a new process at fork() time.
+ * on a new process at fork() time. Set up our scheduler specifi extensions.
* Called from:
* fork1()
*/
@@ -853,27 +800,33 @@
sched_fork(struct thread *td, struct proc *child)
{
struct thread *newtd;
- struct kse *newke;
+ struct td_sched *newstd;
newtd = FIRST_THREAD_IN_PROC(child);
- newke = FIRST_KSE_IN_PROC(child);
- bzero(&newke->ke_startzero,
- (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
- newke->ke_state = KES_THREAD;
- newke->ke_cpticks = 0;
- sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(child));
- newke->ke_thread = newtd;
- newtd->td_kse = newke;
+ newstd = newtd->td_sched;
+ bzero(&newstd->std_startzero,
+ (unsigned)RANGEOF(struct td_sched, std_startzero, std_endzero));
+ newstd->std_state = STDS_THREAD;
+ newstd->std_cpticks = 0;
+
+ newtd->td_ksegrp->kg_concurrancy = 1;
+ /* non threaded process. ignore thread fairness stuff */
+ newtd->td_ksegrp->kg_avail_opennings = 0;
+ /* Our child inherrits our estimated cpu requirement */
+ newtd->td_ksegrp->kg_estcpu = td->td_ksegrp->kg_estcpu;
}
/*
- * When we make a new ksegrp, teh scheduler may need to know about it.
+ * When we make a new ksegrp, the scheduler may need to know about it.
+ * make sure the new ksegrp inherrits our cpu estimates.
* Called from:
* kse_create()
*/
void
sched_fork_ksegrp(struct thread *td, struct ksegrp *newkg)
{
+ newkg->kg_concurrancy = 1;
+ newkg->kg_avail_opennings = 1;
newkg->kg_estcpu = td->td_ksegrp->kg_estcpu;
}
@@ -958,21 +911,20 @@
void
sched_switch(struct thread *td, int flags, struct thread *newtd)
{
- struct kse *ke;
+ struct td_sched *std;
struct proc *p;
- ke = td->td_kse;
+ std = td->td_sched;
p = td->td_proc;
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT((ke->ke_state == KES_THREAD), ("sched_switch: kse state?"));
+ KASSERT((std->std_state == STDS_THREAD), ("sched_switch: kse state?"));
if ((p->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
if (newtd != NULL && (newtd->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt++;
td->td_lastcpu = td->td_oncpu;
- td->td_last_kse = ke;
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_OWEPREEMPT);
td->td_oncpu = NOCPU;
/*
@@ -984,22 +936,26 @@
if (td == PCPU_GET(idlethread))
TD_SET_CAN_RUN(td);
else if (TD_IS_RUNNING(td)) {
- /* Put us back on the run queue (kse and all). */
+ /* Put us back on the run queue. */
+ if (p->p_flag & P_SA)
+ kg->kg_avail_opennings++;
setrunqueue(td);
} else if (p->p_flag & P_SA) {
/*
* We will not be on the run queue. So we must be
* sleeping or similar. As it's available,
- * someone else can use the KSE if they need it.
+ * another thread could use our 'slot'.
*/
- kse_reassign(ke);
+ kg->kg_avail_opennings++;
+ recycle_slot(kg);
}
if (newtd == NULL)
newtd = choosethread(flags);
- if (td != newtd)
+ if (td != newtd) {
cpu_switch(td, newtd);
- sched_lock.mtx_lock = (uintptr_t)td;
- td->td_oncpu = PCPU_GET(cpuid);
+ sched_lock.mtx_lock = (uintptr_t)td;
+ td->td_oncpu = PCPU_GET(cpuid);
+ }
}
/*
@@ -1029,24 +985,21 @@
* the fact that the thread is becoming runnable,
* and decide which run queue to use.
* Called from:
- * kse_reassign() (local)
+ * recycle_slot() (local)
* adjustrunqueue() (local)
* setrunqueue() (local)
*/
static void
sched_add(struct thread *td)
{
- struct kse *ke;
+ struct td_sched *std;
- ke = td->td_kse;
+ std = td->td_sched;
mtx_assert(&sched_lock, MA_OWNED);
- KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
- KASSERT((ke->ke_thread->td_kse != NULL),
- ("sched_add: No KSE on thread"));
- KASSERT(ke->ke_state != KES_ONRUNQ,
- ("sched_add: kse %p (%s) already in run queue", ke,
- ke->ke_proc->p_comm));
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
+ KASSERT(std->std_state != STDS_ONRUNQ,
+ ("sched_add: kse %p (%s) already in run queue", std,
+ std->std_proc->p_comm));
+ KASSERT(std->std_proc->p_sflag & PS_INMEM,
("sched_add: process swapped out"));
#ifdef SMP
@@ -1054,32 +1007,32 @@
* Only try to preempt if the thread is unpinned or pinned to the
* current CPU.
*/
- if (KSE_CAN_MIGRATE(ke) || ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)])
+ if (TD_CAN_MIGRATE(td) || std->std_runq == &runq_pcpu[PCPU_GET(cpuid)])
#endif
if (maybe_preempt(td))
return;
- ke->ke_ksegrp->kg_runq_kses++;
- ke->ke_state = KES_ONRUNQ;
+ td->td_ksegrp->kg_runq_threads++;
+ std->std_state = STDS_ONRUNQ;
#ifdef SMP
- if (KSE_CAN_MIGRATE(ke)) {
- CTR1(KTR_4BSD, "adding kse:%p to gbl runq", ke);
- ke->ke_runq = &runq;
+ if (TD_CAN_MIGRATE(td)) {
+ CTR1(KTR_4BSD, "adding thread:%p to gbl runq", td);
+ std->std_runq = &runq;
} else {
- CTR1(KTR_4BSD, "adding kse:%p to pcpu runq", ke);
- if (!KE_RUNQ_PCPU(ke))
- ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)];
+ CTR1(KTR_4BSD, "adding thread:%p to pcpu runq", td);
+ if (!STD_RUNQ_PCPU(std))
+ std->std_runq = &runq_pcpu[PCPU_GET(cpuid)];
}
#else
- ke->ke_runq = &runq;
+ std->std_runq = &runq;
#endif
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt++;
- runq_add(ke->ke_runq, ke);
+ runq_add(std->std_runq, std);
}
/*
- * Change scheduler stats to reflect removal of a thread from teh run queues.
+ * Change scheduler stats to reflect removal of a thread from the run queues.
* Called from:
* remrunqueue() (local)
* adjustrunqueue() (local)
@@ -1088,70 +1041,21 @@
static void
sched_rem(struct thread *td)
{
- struct kse *ke;
+ struct td_sched *std;
- ke = td->td_kse;
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
+ std = td->td_sched;
+ KASSERT(std->std_proc->p_sflag & PS_INMEM,
("sched_rem: process swapped out"));
- KASSERT((ke->ke_state == KES_ONRUNQ),
+ KASSERT((std->std_state == STDS_ONRUNQ),
("sched_rem: KSE not on run queue"));
mtx_assert(&sched_lock, MA_OWNED);
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
- runq_remove(ke->ke_runq, ke);
+ runq_remove(std->std_runq, std);
- ke->ke_state = KES_THREAD;
- ke->ke_ksegrp->kg_runq_kses--;
-}
-
-/*
- * Try find a runnable thread from teh per-cpu or
- * general run queues and adjust scheduler stats accordingly.
- * Called from:
- * choosethread() (local)
- */
-static struct kse *
-sched_choose(void)
-{
- struct kse *ke;
- struct runq *rq;
-
-#ifdef SMP
- struct kse *kecpu;
-
- rq = &runq;
- ke = runq_choose(&runq);
- kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
-
- if (ke == NULL ||
- (kecpu != NULL &&
- kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
- CTR2(KTR_4BSD, "choosing kse %p from pcpu runq %d", kecpu,
- PCPU_GET(cpuid));
- ke = kecpu;
- rq = &runq_pcpu[PCPU_GET(cpuid)];
- } else {
- CTR1(KTR_4BSD, "choosing kse %p from main runq", ke);
- }
-
-#else
- rq = &runq;
- ke = runq_choose(&runq);
-#endif
-
- if (ke != NULL) {
- runq_remove(rq, ke);
- ke->ke_state = KES_THREAD;
-
- KASSERT((ke->ke_thread != NULL),
- ("sched_choose: No thread on KSE"));
- KASSERT((ke->ke_thread->td_kse != NULL),
- ("sched_choose: No KSE on thread"));
- KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
- ("sched_choose: process swapped out"));
- }
- return (ke);
+ std->std_state = STDS_THREAD;
+ std->std_ksegrp->kg_runq_threads--;
}
/*
@@ -1188,21 +1092,21 @@
void
sched_bind(struct thread *td, int cpu)
{
- struct kse *ke;
+ struct td_sched *std;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT(TD_IS_RUNNING(td),
("sched_bind: cannot bind non-running thread"));
- ke = td->td_kse;
+ std = td->td_sched;
- ke->ke_flags |= KEF_BOUND;
+ td->td_flags |= TDF_BOUND;
#ifdef SMP
- ke->ke_runq = &runq_pcpu[cpu];
+ std->std_runq = &runq_pcpu[cpu];
if (PCPU_GET(cpuid) == cpu)
return;
- ke->ke_state = KES_THREAD;
+ std->std_state = STDS_THREAD;
mi_switch(SW_VOL, NULL);
#endif
@@ -1217,7 +1121,7 @@
sched_unbind(struct thread* td)
{
mtx_assert(&sched_lock, MA_OWNED);
- td->td_kse->ke_flags &= ~KEF_BOUND;
+ td->td_flags &= ~TDF_BOUND;
}
/*
@@ -1265,29 +1169,13 @@
fixpt_t
sched_pctcpu(struct thread *td)
{
- struct kse *ke;
- ke = td->td_kse;
- if (ke == NULL)
- ke = td->td_last_kse;
- if (ke)
- return (ke->ke_pctcpu);
-
- return (0);
+ return (td->td_sched->std_pctcpu);
}
-
-
-static uma_zone_t kse_zone;
-
-struct kse kse0;
static struct kg_sched kg_sched0;
static struct td_sched td_sched0;
-
-extern struct mtx kse_zombie_lock;
-TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
-
/*
* Occasionally the scheduler may need to do some GC..
* Called from:
@@ -1296,20 +1184,6 @@
void
sched_GC(void)
{
- struct kse *ke_first, *ke_next;
-
- if (!TAILQ_EMPTY(&zombie_kses)) {
- mtx_lock_spin(&kse_zombie_lock);
- ke_first = TAILQ_FIRST(&zombie_kses);
- if (ke_first)
- TAILQ_INIT(&zombie_kses);
- mtx_unlock_spin(&kse_zombie_lock);
- while (ke_first) {
- ke_next = TAILQ_NEXT(ke_first, ke_procq);
- kse_free(ke_first);
- ke_first = ke_next;
- }
- }
}
/*
@@ -1328,19 +1202,14 @@
proc0.p_sched = NULL; /* XXX */
thread0.td_sched = &td_sched0;
- /*
- * and link in our own per scheduler struct
- */
- kse_link(&kse0, &ksegrp0);
/*
- * and set it up as if BOUND and running
+ * and set it up as if running
*/
- kse0.ke_thread = &thread0;
- thread0.td_kse = &kse0; /* we are running */
- kse0.ke_state = KES_THREAD;
+ std->std_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
+ td_sched0.std_state = STDS_THREAD;
+ kg_sched0.concurrancy = 1;
+ kg_sched0.skg_avail_opennings = 0; /* we are already running */
- kse_zone = uma_zcreate("KSE", sizeof (struct kse),
- NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
}
/*
@@ -1352,25 +1221,18 @@
int
sched_thr_newthread(struct thread *td, struct thread *newtd, int flags)
{
- struct kse *newke;
- /* Initialize our kse structure. */
- newke = kse_alloc();
- bzero(&newke->ke_startzero,
- RANGEOF(struct kse, ke_startzero, ke_endzero));
+ newstd = newtd->td_sched;
+ bzero(&newstd->std_startzero,
+ RANGEOF(struct td_sched, std_startzero, std_endzero));
- /* Link the thread and kse into the ksegrp and make it runnable. */
+ /* Link the thread into the ksegrp and make it runnable. */
mtx_lock_spin(&sched_lock);
thread_link(newtd, td->td_ksegrp);
- kse_link(newke, td->td_ksegrp);
- /* Bind this thread and kse together. */
- newtd->td_kse = newke;
- newke->ke_thread = newtd;
- bzero(&newke->ke_startzero,
- (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero));
- newke->ke_state = KES_THREAD;
- newke->ke_cpticks = 0;
+ std->std_oncpu = NOCPU;
+ newstd->std_state = STDS_THREAD;
+ newstd->std_cpticks = 0;
TD_SET_CAN_RUN(newtd);
if ((flags & THR_SUSPENDED) == 0)
@@ -1380,63 +1242,7 @@
return (0); /* the API could fail but not in this case */
}
-/*****************************
- * KSE zone/allocation methods.
- */
/*
- * Allocate a kse.
- */
-static struct kse *
-kse_alloc(void)
-{
- return (uma_zalloc(kse_zone, M_WAITOK));
-}
-
-/*
- * Deallocate a kse.
- */
-static void
-kse_free(struct kse *td)
-{
- uma_zfree(kse_zone, td);
-}
-
-/*
- * Stash an embarasingly extra kse into the zombie kse queue.
- * Called from:
- * kse_unlink() (local)
- */
-static void
-kse_stash(struct kse *ke)
-{
- mtx_lock_spin(&kse_zombie_lock);
- TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
- mtx_unlock_spin(&kse_zombie_lock);
-}
-
-/*
- * KSE is linked into kse group.
- * Called from:
- * sched_newproc() (local)
- * sched_thr_newthread() (local)
- * schedinit() (local)
- * sched_set_concurrancy() (local)
- *
- */
-static void
-kse_link( struct kse *ke, struct ksegrp *kg)
-{
- TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
- kg->kg_kses++;
- ke->ke_state = KES_UNQUEUED;
- ke->ke_proc = kg->kg_proc; /* really just a shortcut */
- ke->ke_ksegrp = kg;
- ke->ke_thread = NULL;
- ke->ke_oncpu = NOCPU;
- ke->ke_flags = 0;
-}
-
-/*
* Allocate scheduler specific per-process resources.
* The thread and ksegrp have already been linked in.
* Called from:
@@ -1445,19 +1251,13 @@
int
sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
{
- struct kse *ke;
+ struct td_sched *std;
+
+ std = td->td_sched;
- /*
- * For a new process, allocate a single KSE to the ksegrp.
- */
- ke = kse_alloc();
- if (ke) {
- kse_link(ke, kg);
- td->td_kse = ke;
- ke->ke_thread = td;
- return (0);
- }
- return (ENOMEM );
+ std->std_state = STDS_UNQUEUED;
+ std->std_oncpu = NOCPU;
+ return (0);
}
/*
@@ -1471,11 +1271,9 @@
sched_init_ksegrp(struct ksegrp *kg)
{
- TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
- TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */
- kg->kg_kses = 0;
- kg->kg_runq_kses = 0; /* XXXKSE change name */
- kg->kg_idle_kses = 0;
+ kg->kg_runq_threads = 0; /* XXXKSE change name */
+ kg->kg_avail_opennings = 1;
+ kg->kg_concurrancy = 1;
}
/*
@@ -1483,64 +1281,18 @@
* Fix up the per-scheduler resources associated with it.
* Called from:
* thread_dtor()
- * thread_initi()
+ * thread_init()
*/
/* Assumes td->td_sched is already set up */
void
sched_init_thread(struct thread *td)
{
- td->td_last_kse = NULL;
- td->td_kse = NULL;
-}
-
-
-/*
- * code to take the per-scheduler KSE structure
- * off the ksegrp it is hanging off and free it
- * Called from:
- * sched_destroyproc()
- * sched_thr_exit()
- * sched_set_concurrancy() via REDUCE_KSES()
- * kse_reassign() via REDUCE_KSES()
- */
-static void
-kse_unlink(struct kse *ke)
-{
- struct ksegrp *kg;
-
- mtx_assert(&sched_lock, MA_OWNED);
- kg = ke->ke_ksegrp;
- TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
- if (ke->ke_state == KES_IDLE) {
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- kg->kg_idle_kses--;
- }
- /*
- * Aggregate stats from the KSE
- * ## none yet ##
- */
-
- kse_stash(ke);
}
-/*
- * Whenever we have idle KSEs and there are too many for the concurrancy,
- * then free as many as we can. Don't free too many if we have threads
- * to run/kill.
- */
-#define REDUCE_KSES(kg, skg) \
-do { \
- while ((skg->skg_concurrancy < skg->skg_kses) && \
- (skg->skg_idle_kses > 0) && \
- (skg->skg_kses > kg->kg_numthreads)) { \
- kse_unlink(TAILQ_FIRST(&skg->skg_iq)); \
- } \
-} while (0)
-
/*
* Called by the uma process fini routine..
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list