PERFORCE change 55431 for review
Julian Elischer
julian at FreeBSD.org
Mon Jun 21 01:50:24 GMT 2004
http://perforce.freebsd.org/chv.cgi?CH=55431
Change 55431 by julian at julian_ref on 2004/06/21 01:49:43
cleaning up and commenting
Affected files ...
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#15 edit
.. //depot/projects/nsched/sys/sys/proc.h#15 edit
.. //depot/projects/nsched/sys/sys/sched.h#7 edit
Differences ...
==== //depot/projects/nsched/sys/kern/sched_4bsd.c#15 (text+ko) ====
@@ -215,7 +215,16 @@
static void maybe_resched(struct thread *td);
static void updatepri(struct ksegrp *kg);
static void resetpriority(struct ksegrp *kg);
+static void sched_add(struct thread *td);
+static void sched_rem(struct thread *td);
+static struct kse * sched_choose(void);
+static void adjustrunqueue( struct thread *td, int newpri) ;
+
+/***************************************************************
+ * Define startup entrypoints
+ */
+
static struct kproc_desc sched_kp = {
"schedcpu",
schedcpu_thread,
@@ -224,7 +233,82 @@
SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp)
SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
+/************************************************************************
+ * Definitions of the run queues we use here.
+ */
+
+/*
+ * Copyright (c) 2001 Jake Burkholder <jake at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/sys/runq.h,v 1.4 2002/05/25 01:12:23 jake Exp $
+ */
+
+#ifndef _RUNQ_H_
+#define _RUNQ_H_
+
+#include <machine/runq.h>
+
+struct kse;
+
+/*
+ * Run queue parameters.
+ */
+
+#define RQ_NQS (64) /* Number of run queues. */
+#define RQ_PPQ (4) /* Priorities per queue. */
+
+/*
+ * Head of run queues.
+ */
+TAILQ_HEAD(rqhead, kse);
+
/*
+ * Bit array which maintains the status of a run queue. When a queue is
+ * non-empty the bit corresponding to the queue number will be set.
+ */
+struct rqbits {
+ rqb_word_t rqb_bits[RQB_LEN];
+};
+
+/*
+ * Run queue structure. Contains an array of run queues on which processes
+ * are placed, and a structure to maintain the status of each queue.
+ */
+struct runq {
+ struct rqbits rq_status;
+ struct rqhead rq_queues[RQ_NQS];
+};
+
+static void runq_add(struct runq *, struct kse *);
+static int runq_check(struct runq *);
+static struct kse *runq_choose(struct runq *);
+static void runq_init(struct runq *);
+static void runq_remove(struct runq *, struct kse *);
+
+#endif /* end of Jake copyright file */
+/*
* Global run queue.
*/
static struct runq runq;
@@ -292,6 +376,8 @@
* The act of firing the event triggers a context switch to softclock() and
* then switching back out again which is equivalent to a preemption, thus
* no further work is needed on the local CPU.
+ * Called from:
+ * callout started by sched_setup();
*/
/* ARGSUSED */
static void
@@ -396,6 +482,8 @@
/*
* Recompute process priorities, every hz ticks.
* MP-safe, called without the Giant mutex.
+ * Called from:
+ * schedcpu_thread() which is a kthread that awakens once per second.
*/
/* ARGSUSED */
static void
@@ -507,6 +595,8 @@
/*
* Main loop for a kthread that executes schedcpu once a second.
+ * Called from:
+ * This is a kthread that runs forever..
*/
static void
schedcpu_thread(void)
@@ -523,6 +613,9 @@
* Recalculate the priority of a process after it has slept for a while.
* For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
* least six times the loadfactor will decay kg_estcpu to zero.
+ * Called from:
+ * sched_wakeup() (local)
+ * schedcpu() (local)
*/
static void
updatepri(struct ksegrp *kg)
@@ -547,6 +640,11 @@
* Compute the priority of a process when running in user mode.
* Arrange to reschedule if the resulting priority is better
* than that of the current process.
+ * Called from:
+ * updatepri() (local)
+ * schedcpu() (local)
+ * sched_clock() (local)
+ * sched_nice() (local)
*/
static void
resetpriority(struct ksegrp *kg)
@@ -566,6 +664,11 @@
}
}
+/*
+ * Scheduler intialisation.
+ * Called from:
+ * SYSINIT() above.
+ */
/* ARGSUSED */
static void
sched_setup(void *dummy)
@@ -593,6 +696,13 @@
* allocation and death of various entities, timer events, and *
* direct calls for scheduler services. *
******************************************************************/
+/*
+ * Test to see if there is anything scheduled to run.
+ * Called from:
+ * idle_proc()
+ * vm_pagezero()
+ * cpu_idle()
+ */
int
sched_runnable(void)
{
@@ -603,6 +713,11 @@
#endif
}
+/*
+ * report teh round-robin scheduling interval.
+ * Called from:
+ * (posix scheduling interface)
+ */
int
sched_rr_interval(void)
{
@@ -624,6 +739,8 @@
* 90% forget that the process used a lot of CPU time in 5 * loadav
* seconds. This causes the system to favor processes which haven't
* run much recently, and to round-robin among other processes.
+ * Called from:
+ * statclock() (called by hardclock)
*/
void
sched_clock(struct thread *td)
@@ -651,6 +768,8 @@
* Charge it to the ksegrp that did the wait since process estcpu is sum of
* all ksegrps, this is strictly as expected. Assume that the child process
* aggregated all the estcpu into the 'built-in' ksegrp.
+ * Called from:
+ * exit()
*/
void
sched_exit(struct proc *parent, struct thread *td)
@@ -667,6 +786,8 @@
/*
* We should give the estcpu to someone, but WHO?
+ * Called from:
+ * thread_exit() (for threaded programs only)
*/
void
sched_exit_ksegrp(struct proc *parent, struct thread *td)
@@ -675,6 +796,11 @@
sched_set_concurrancy(td->td_ksegrp, 0);
}
+/*
+ * Called when individual threads exit.
+ * Called from:
+ * thread_exit() (for all exiting threads)
+ */
void
sched_thread_exit(struct thread *td)
{
@@ -695,6 +821,9 @@
/*
* special version of the above for thr..
* work towards merging them.
+ * Special code for thr library thread
+ * Called from:
+ * thr_exit1()
*/
void
sched_thr_exit(struct thread *td)
@@ -713,6 +842,12 @@
}
+/*
+ * Allocate any resources the scheduler needs to allocate or set up
+ * on a new process at fork() time.
+ * Called from:
+ * fork1()
+ */
void
sched_fork(struct thread *td, struct proc *child)
{
@@ -730,12 +865,24 @@
newtd->td_kse = newke;
}
+/*
+ * When we make a new ksegrp, teh scheduler may need to know about it.
+ * Called from:
+ * kse_create()
+ */
void
sched_fork_ksegrp(struct thread *td, struct ksegrp *newkg)
{
newkg->kg_estcpu = td->td_ksegrp->kg_estcpu;
}
+/*
+ * Do the dirty work of changing the nice value of the process
+ * and deciding how that affects the threads.
+ * Called from:
+ * donice()
+ * vm_pageout_scan() (to make a killed program exit quickly)
+ */
void
sched_nice(struct proc *p, int nice)
{
@@ -749,6 +896,11 @@
}
}
+/*
+ * Scheduler specific code when a process group changes scheduler class.
+ * Called from:
+ * rtp_to_pri()
+ */
void
sched_class(struct ksegrp *kg, int class)
{
@@ -761,6 +913,13 @@
* This may include moving the thread within the KSEGRP,
* changing the assignment of a kse to the thread,
* and moving a KSE in the system run queue.
+ * Called from:
+ * rtp_to_pri()
+ * uio_yield()
+ * msleep()
+ * yield()
+ * ast()
+ * propogate_priority()
*/
void
sched_prio(struct thread *td, u_char prio)
@@ -774,6 +933,13 @@
}
}
+/*
+ * when going to sleep. The scheduler may need to be informed.
+ * (probaly this is silly. it can tell from sched_switch probably)
+ * Called from:
+ * sleepq_switch()
+ */
+ */
void
sched_sleep(struct thread *td)
{
@@ -783,6 +949,12 @@
td->td_base_pri = td->td_priority;
}
+/*
+ * As the thread is actually switched out, the scheduler
+ * may need to be told of this..
+ * Called from:
+ * mi_switch()
+ */
void
sched_switch(struct thread *td, int flags)
{
@@ -825,6 +997,12 @@
td->td_oncpu = PCPU_GET(cpuid);
}
+/*
+ * Making the thread runnable after being stopped or sleeping,
+ * The scheduler needs to be informed.
+ * Called from:
+ * setrunnable()
+ */
void
sched_wakeup(struct thread *td)
{
@@ -839,7 +1017,18 @@
maybe_resched(td);
}
-void
+
+/*
+ * We have a thread to put on the run queue.
+ * Let the scheduler keep track of
+ * the fact that the thread is becoming runnable,
+ * and decide which run queue to use.
+ * Called from:
+ * kse_reassign() (local)
+ * adjustrunqueue() (local)
+ * setrunqueue() (local)
+ */
+static void
sched_add(struct thread *td)
{
struct kse *ke;
@@ -874,7 +1063,14 @@
runq_add(ke->ke_runq, ke);
}
-void
+/*
+ * Change scheduler stats to reflect removal of a thread from teh run queues.
+ * Called from:
+ * remrunqueue() (local)
+ * adjustrunqueue() (local)
+ * setrunqueue() (local)
+ */
+static void
sched_rem(struct thread *td)
{
struct kse *ke;
@@ -894,7 +1090,13 @@
ke->ke_ksegrp->kg_runq_kses--;
}
-struct kse *
+/*
+ * Try find a runnable thread from teh per-cpu or
+ * general run queues and adjust scheduler stats accordingly.
+ * Called from:
+ * choosethread() (local)
+ */
+static struct kse *
sched_choose(void)
{
struct kse *ke;
@@ -937,6 +1139,11 @@
return (ke);
}
+/*
+ * The scheduler needs to know when the thread transitions to user-mode.
+ * Called from:
+ * userret()
+ */
void
sched_userret(struct thread *td)
{
@@ -958,6 +1165,11 @@
}
}
+/*
+ * Set the thread up so that it is bound onto a particular CPU.
+ * Called from:
+ * ***NEVER CALLED ***
+ */
void
sched_bind(struct thread *td, int cpu)
{
@@ -981,6 +1193,11 @@
#endif
}
+/*
+ * Set the thread up so that it is NOT bound onto a particular CPU.
+ * Called from:
+ * ***NEVER CALLED ***
+ */
void
sched_unbind(struct thread* td)
{
@@ -988,12 +1205,24 @@
td->td_kse->ke_flags &= ~KEF_BOUND;
}
+/*
+ * Scheduler specific way to find the loadaverage.
+ * Called from:
+ * loadav() (from a callout)
+ */
int
sched_load(void)
{
return (sched_tdcnt);
}
+/*
+ * The following calls return teh size of the scheduler specific
+ * storage that needs to be allocated along with the proc, ksegrp and thread
+ * structures.
+ * Called from:
+ * ksegrp_init(), proc_init(), thread_init() (UMA init entrypoints)
+ */
int
sched_sizeof_ksegrp(void)
{
@@ -1012,6 +1241,12 @@
return (sizeof(struct thread) + sizeof(struct td_sched));
}
+/*
+ * Scheduler specific method to detirmine teh percentage cpu used by a thread.
+ * Called from:
+ * fill_kinfo_thread() (for ps etc.)
+ * ttyinfo() ( for ^T etc)
+ */
fixpt_t
sched_pctcpu(struct thread *td)
{
@@ -1038,6 +1273,11 @@
extern struct mtx kse_zombie_lock;
TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
+/*
+ * Occasionally the scheduler may need to do some GC..
+ * Called from:
+ * thread_reap()
+ */
void
sched_GC(void)
{
@@ -1057,6 +1297,12 @@
}
}
+/*
+ * Very early in the boot some setup of scheduler-specific
+ * parts of proc0 and of soem scheduler resources needs to be done.
+ * Called from:
+ * proc0_init()
+ */
void
schedinit(void)
{
@@ -1085,6 +1331,8 @@
/*
* for now have special thr code
* later on, clean these up into common code.
+ * Called from:
+ * thr_create()
*/
int
sched_thr_newthread(struct thread *td, struct thread *newtd, int flags)
@@ -1117,6 +1365,9 @@
return (0); /* the API could fail but not in this case */
}
+/*****************************
+ * KSE zone/allocation methods.
+ */
/*
* Allocate a kse.
*/
@@ -1129,7 +1380,7 @@
/*
* Deallocate a kse.
*/
-void
+static void
kse_free(struct kse *td)
{
uma_zfree(kse_zone, td);
@@ -1137,8 +1388,10 @@
/*
* Stash an embarasingly extra kse into the zombie kse queue.
+ * Called from:
+ * kse_unlink() (local)
*/
-void
+static void
kse_stash(struct kse *ke)
{
mtx_lock_spin(&kse_zombie_lock);
@@ -1148,8 +1401,14 @@
/*
* KSE is linked into kse group.
+ * Called from:
+ * sched_newproc() (local)
+ * sched_thr_newthread() (local)
+ * schedinit() (local)
+ * sched_set_concurrancy() (local)
+ *
*/
-void
+static void
kse_link( struct kse *ke, struct ksegrp *kg)
{
TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
@@ -1163,8 +1422,10 @@
}
/*
- * Allocate scheduler per-process resources.
+ * Allocate scheduler specific per-process resources.
* The thread and ksegrp have already been linked in.
+ * Called from:
+ * proc_init() (UMA init method)
*/
int
sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
@@ -1184,6 +1445,13 @@
return (ENOMEM );
}
+/*
+ * Ksegrp is being either created or recycled.
+ * Fix up the per-scheduler resources associated with it.
+ * Called from:
+ * ksegrp_dtor()
+ * ksegrp_initi()
+ */
void
sched_init_ksegrp(struct ksegrp *kg)
{
@@ -1195,6 +1463,13 @@
kg->kg_idle_kses = 0;
}
+/*
+ * thread is being either created or recycled.
+ * Fix up the per-scheduler resources associated with it.
+ * Called from:
+ * thread_dtor()
+ * thread_initi()
+ */
/* Assumes td->td_sched is already set up */
void
sched_init_thread(struct thread *td)
@@ -1203,7 +1478,17 @@
td->td_kse = NULL;
}
-void
+
+/*
+ * code to take the per-scheduler KSE structure
+ * off the ksegrp it is hanging off and free it
+ * Called from:
+ * sched_destroyproc()
+ * sched_thr_exit()
+ * sched_set_concurrancy() via REDUCE_KSES()
+ * kse_reassign() via REDUCE_KSES()
+ */
+static void
kse_unlink(struct kse *ke)
{
struct ksegrp *kg;
@@ -1241,6 +1526,8 @@
* Called by the uma process fini routine..
* undo anything we may have done in the uma_init method.
* Panic if it's not all 1:1:1:1
+ * Called from:
+ * proc_fini() (UMA method)
*/
void
sched_destroyproc(struct proc *p)
@@ -1259,6 +1546,17 @@
kse_unlink(TAILQ_FIRST(&skg->skg_iq)); \
}
+/*
+ * (Re) assign resources to allow the ksegrp to implement
+ * teh requested concurrancy. At this time it means allocating
+ * or freeing KSE structures.
+ * Called from:
+ * kern_execve() (reverting to non threaded)
+ * kern_exit() (reverting to non threaded)
+ * thread_exit() (during removal of ksegrp)
+ * sched_exit_ksegrp() (local)
+ * kse_exit() (decreasing)
+ * kse_create() (increasing)
void
sched_set_concurrancy(struct ksegrp *kg, int concurrancy)
{
@@ -1393,6 +1691,11 @@
* this will be what does it.
* XXX Change to take an argument indicating
* if the switch is voluntary or involuntary.
+ * Called from:
+ * thr_exit1()
+ * thread_exit()
+ * sched_switch() (local)
+ * init_secondary() (start up 2ndary processors)
*/
struct thread *
choosethread(int flags)
@@ -1465,8 +1768,14 @@
* Given a surplus KSE, either assign a new runable thread to it
* (and put it in the run queue) or put it in the ksegrp's idle KSE list.
* Assumes that the original thread is not runnable.
+ * Called from:
+ * sched_thread_exit() (local)
+ * sched_set_concurrancy() (local)
+ * sched_switch() (local)
+ * sched_thread_exit() (local)
+ * remrunqueue() (local) (commented out)
*/
-void
+static void
kse_reassign(struct kse *ke)
{
struct ksegrp *kg;
@@ -1516,6 +1825,8 @@
* This in turn may remove it from a KSE if it was already assigned
* to one, possibly causing a new thread to be assigned to the KSE
* and the KSE getting a new priority.
+ * Called from:
+ * Not used
*/
static void
remrunqueue(struct thread *td)
@@ -1562,8 +1873,10 @@
/*
* Change the priority of a thread that is on the run queue.
+ * Called from:
+ * sched_prio() (local)
*/
-void
+static void
adjustrunqueue( struct thread *td, int newpri)
{
struct ksegrp *kg;
@@ -1750,7 +2063,7 @@
/*
* Initialize a run structure.
*/
-void
+static void
runq_init(struct runq *rq)
{
int i;
@@ -1821,7 +2134,7 @@
* Add the KSE to the queue specified by its priority, and set the
* corresponding status bit.
*/
-void
+static void
runq_add(struct runq *rq, struct kse *ke)
{
struct rqhead *rqh;
@@ -1841,7 +2154,7 @@
* queue, false otherwise. Has no side effects, does not modify the run
* queue structure.
*/
-int
+static int
runq_check(struct runq *rq)
{
struct rqbits *rqb;
@@ -1862,7 +2175,7 @@
/*
* Find the highest priority process on the run queue.
*/
-struct kse *
+static struct kse *
runq_choose(struct runq *rq)
{
struct rqhead *rqh;
@@ -1888,7 +2201,7 @@
* corresponding status bit if the queue becomes empty.
* Caller must set ke->ke_state afterwards.
*/
-void
+static void
runq_remove(struct runq *rq, struct kse *ke)
{
struct rqhead *rqh;
==== //depot/projects/nsched/sys/sys/proc.h#15 (text+ko) ====
@@ -48,7 +48,6 @@
#include <sys/_mutex.h>
#include <sys/priority.h>
#include <sys/rtprio.h> /* XXX. */
-#include <sys/runq.h>
#include <sys/sigio.h>
#include <sys/signal.h>
#ifndef _KERNEL
@@ -741,7 +740,6 @@
struct pgrp *pgfind(pid_t); /* Find process group by id. */
struct proc *zpfind(pid_t); /* Find zombie process by id. */
-void adjustrunqueue(struct thread *, int newpri);
void ast(struct trapframe *framep);
struct thread *choosethread(int flags);
int cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
==== //depot/projects/nsched/sys/sys/sched.h#7 (text+ko) ====
@@ -72,13 +72,7 @@
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);
-/*
- * Threads are moved on and off of run queues
- */
-void sched_add(struct thread *td);
-struct kse *sched_choose(void); /* XXX Should be thread * */
void sched_clock(struct thread *td);
-void sched_rem(struct thread *td);
/*
* Binding makes cpu affinity permanent while pinning is used to temporarily
More information about the p4-projects
mailing list