svn commit: r192325 - in user/kmacy/releng_7_2_fcs/sys: kern sys vm
Kip Macy
kmacy at FreeBSD.org
Mon May 18 18:50:39 UTC 2009
Author: kmacy
Date: Mon May 18 18:50:38 2009
New Revision: 192325
URL: http://svn.freebsd.org/changeset/base/192325
Log:
merge 177085
- Pass the priority argument from *sleep() into sleepq and down into
sched_sleep(). This removes extra thread_lock() acquisition and
allows the scheduler to decide what to do with the static boost.
- Change the priority arguments to cv_* to match sleepq/msleep/etc.
where 0 means no priority change. Catch -1 in cv_broadcastpri() and
convert it to 0 for now.
- Set a flag when sleeping in a way that is compatible with swapping
since direct priority comparisons are meaningless now.
- Add a sysctl to ule, kern.sched.static_boost, that defaults to on which
controls the boost behavior. Turning it off gives better performance
in some workloads but needs more investigation.
- While we're modifying sleepq, change signal and broadcast to both
return with the lock held as the lock was held on enter.
Modified:
user/kmacy/releng_7_2_fcs/sys/kern/kern_condvar.c
user/kmacy/releng_7_2_fcs/sys/kern/kern_sx.c
user/kmacy/releng_7_2_fcs/sys/kern/kern_synch.c
user/kmacy/releng_7_2_fcs/sys/kern/kern_thread.c
user/kmacy/releng_7_2_fcs/sys/kern/kern_timeout.c
user/kmacy/releng_7_2_fcs/sys/kern/sched_4bsd.c
user/kmacy/releng_7_2_fcs/sys/kern/sched_ule.c
user/kmacy/releng_7_2_fcs/sys/kern/subr_sleepqueue.c
user/kmacy/releng_7_2_fcs/sys/sys/condvar.h
user/kmacy/releng_7_2_fcs/sys/sys/proc.h
user/kmacy/releng_7_2_fcs/sys/sys/sched.h
user/kmacy/releng_7_2_fcs/sys/sys/sleepqueue.h
user/kmacy/releng_7_2_fcs/sys/vm/vm_glue.c
user/kmacy/releng_7_2_fcs/sys/vm/vm_meter.c
Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_condvar.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_condvar.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_condvar.c Mon May 18 18:50:38 2009 (r192325)
@@ -134,7 +134,7 @@ _cv_wait(struct cv *cvp, struct lock_obj
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
}
- sleepq_wait(cvp);
+ sleepq_wait(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -191,7 +191,7 @@ _cv_wait_unlock(struct cv *cvp, struct l
class->lc_unlock(lock);
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
- sleepq_wait(cvp);
+ sleepq_wait(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -252,7 +252,7 @@ _cv_wait_sig(struct cv *cvp, struct lock
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
}
- rval = sleepq_wait_sig(cvp);
+ rval = sleepq_wait_sig(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -317,7 +317,7 @@ _cv_timedwait(struct cv *cvp, struct loc
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
}
- rval = sleepq_timedwait(cvp);
+ rval = sleepq_timedwait(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -386,7 +386,7 @@ _cv_timedwait_sig(struct cv *cvp, struct
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
}
- rval = sleepq_timedwait_sig(cvp);
+ rval = sleepq_timedwait_sig(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
@@ -433,13 +433,19 @@ cv_broadcastpri(struct cv *cvp, int pri)
{
int wakeup_swapper;
+ /*
+ * XXX sleepq_broadcast pri argument changed from -1 meaning
+ * no pri to 0 meaning no pri.
+ */
+ if (pri == -1)
+ pri = 0;
wakeup_swapper = 0;
sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters = 0;
wakeup_swapper = sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
- } else
- sleepq_release(cvp);
+ }
+ sleepq_release(cvp);
if (wakeup_swapper)
kick_proc0();
}
Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_sx.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_sx.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_sx.c Mon May 18 18:50:38 2009 (r192325)
@@ -402,8 +402,7 @@ _sx_downgrade(struct sx *sx, const char
if (x & SX_LOCK_SHARED_WAITERS)
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
-1, SQ_SHARED_QUEUE);
- else
- sleepq_release(&sx->lock_object);
+ sleepq_release(&sx->lock_object);
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
@@ -557,9 +556,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
if (!(opts & SX_INTERRUPTIBLE))
- sleepq_wait(&sx->lock_object);
+ sleepq_wait(&sx->lock_object, 0);
else
- error = sleepq_wait_sig(&sx->lock_object);
+ error = sleepq_wait_sig(&sx->lock_object, 0);
if (error) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -630,6 +629,7 @@ _sx_xunlock_hard(struct sx *sx, uintptr_
atomic_store_rel_ptr(&sx->sx_lock, x);
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
queue);
+ sleepq_release(&sx->lock_object);
if (wakeup_swapper)
kick_proc0();
}
@@ -779,9 +779,9 @@ _sx_slock_hard(struct sx *sx, int opts,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
if (!(opts & SX_INTERRUPTIBLE))
- sleepq_wait(&sx->lock_object);
+ sleepq_wait(&sx->lock_object, 0);
else
- error = sleepq_wait_sig(&sx->lock_object);
+ error = sleepq_wait_sig(&sx->lock_object, 0);
if (error) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -879,6 +879,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
"exclusive queue", __func__, sx);
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
-1, SQ_EXCLUSIVE_QUEUE);
+ sleepq_release(&sx->lock_object);
if (wakeup_swapper)
kick_proc0();
break;
Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_synch.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_synch.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_synch.c Mon May 18 18:50:38 2009 (r192325)
@@ -161,6 +161,7 @@ _sleep(void *ident, struct lock_object *
return (0);
}
catch = priority & PCATCH;
+ pri = priority & PRIMASK;
rval = 0;
/*
@@ -209,25 +210,14 @@ _sleep(void *ident, struct lock_object *
lock_state = class->lc_unlock(lock);
sleepq_lock(ident);
}
-
- /*
- * Adjust this thread's priority, if necessary.
- */
- pri = priority & PRIMASK;
- if (pri != 0 && pri != td->td_priority) {
- thread_lock(td);
- sched_prio(td, pri);
- thread_unlock(td);
- }
-
if (timo && catch)
- rval = sleepq_timedwait_sig(ident);
+ rval = sleepq_timedwait_sig(ident, pri);
else if (timo)
- rval = sleepq_timedwait(ident);
+ rval = sleepq_timedwait(ident, pri);
else if (catch)
- rval = sleepq_wait_sig(ident);
+ rval = sleepq_wait_sig(ident, pri);
else {
- sleepq_wait(ident);
+ sleepq_wait(ident, pri);
rval = 0;
}
#ifdef KTRACE
@@ -305,9 +295,9 @@ msleep_spin(void *ident, struct mtx *mtx
sleepq_lock(ident);
#endif
if (timo)
- rval = sleepq_timedwait(ident);
+ rval = sleepq_timedwait(ident, 0);
else {
- sleepq_wait(ident);
+ sleepq_wait(ident, 0);
rval = 0;
}
#ifdef KTRACE
Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_thread.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_thread.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_thread.c Mon May 18 18:50:38 2009 (r192325)
@@ -898,8 +898,8 @@ thread_suspend_switch(struct thread *td)
p->p_suspcount++;
PROC_UNLOCK(p);
thread_lock(td);
- sched_sleep(td);
TD_SET_SUSPENDED(td);
+ sched_sleep(td, 0);
PROC_SUNLOCK(p);
DROP_GIANT();
mi_switch(SW_VOL, NULL);
@@ -918,8 +918,8 @@ thread_suspend_one(struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
p->p_suspcount++;
- sched_sleep(td);
TD_SET_SUSPENDED(td);
+ sched_sleep(td, 0);
}
int
Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_timeout.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_timeout.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_timeout.c Mon May 18 18:50:38 2009 (r192325)
@@ -496,7 +496,9 @@ _callout_stop_safe(c, safe)
else {
use_lock = 1;
class = LOCK_CLASS(c->c_lock);
+#ifdef notyet
class->lc_assert(c->c_lock, LA_XLOCKED);
+#endif
}
} else
use_lock = 0;
@@ -564,7 +566,7 @@ again:
sleepq_add(&callout_wait,
&callout_lock.lock_object, "codrain",
SLEEPQ_SLEEP, 0);
- sleepq_wait(&callout_wait);
+ sleepq_wait(&callout_wait, 0);
sq_locked = 0;
/* Reacquire locks previously released. */
Modified: user/kmacy/releng_7_2_fcs/sys/kern/sched_4bsd.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/sched_4bsd.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/kern/sched_4bsd.c Mon May 18 18:50:38 2009 (r192325)
@@ -813,12 +813,16 @@ sched_unlend_user_prio(struct thread *td
}
void
-sched_sleep(struct thread *td)
+sched_sleep(struct thread *td, int pri)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
td->td_slptick = ticks;
td->td_sched->ts_slptime = 0;
+ if (pri)
+ sched_prio(td, pri);
+ if (TD_IS_SUSPENDED(td) || pri <= PSOCK)
+ td->td_flags |= TDF_CANSWAP;
}
void
@@ -946,6 +950,7 @@ sched_wakeup(struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
+ td->td_flags &= ~TDF_CANSWAP;
if (ts->ts_slptime > 1) {
updatepri(td);
resetpriority(td);
Modified: user/kmacy/releng_7_2_fcs/sys/kern/sched_ule.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/sched_ule.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/kern/sched_ule.c Mon May 18 18:50:38 2009 (r192325)
@@ -195,6 +195,7 @@ static int preempt_thresh = PRI_MIN_KERN
#else
static int preempt_thresh = 0;
#endif
+static int static_boost = 1;
/*
* tdq - per processor runqs and statistics. All fields are protected by the
@@ -1985,12 +1986,16 @@ sched_nice(struct proc *p, int nice)
* Record the sleep time for the interactivity scorer.
*/
void
-sched_sleep(struct thread *td)
+sched_sleep(struct thread *td, int prio)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
td->td_slptick = ticks;
+ if (TD_IS_SUSPENDED(td) || prio <= PSOCK)
+ td->td_flags |= TDF_CANSWAP;
+ if (static_boost && prio)
+ sched_prio(td, prio);
}
/*
@@ -2005,6 +2010,7 @@ sched_wakeup(struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
+ td->td_flags &= ~TDF_CANSWAP;
/*
* If we slept for more than a tick update our interactivity and
* priority.
@@ -2746,6 +2752,8 @@ SYSCTL_INT(_kern_sched, OID_AUTO, intera
"Interactivity score threshold");
SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
0,"Min priority for preemption, lower priorities have greater precedence");
+SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost,
+ 0,"Controls whether static kernel priorities are assigned to sleeping threads.");
#ifdef SMP
SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
"Pick the target cpu based on priority rather than load.");
Modified: user/kmacy/releng_7_2_fcs/sys/kern/subr_sleepqueue.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/subr_sleepqueue.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/kern/subr_sleepqueue.c Mon May 18 18:50:38 2009 (r192325)
@@ -149,7 +149,7 @@ static uma_zone_t sleepq_zone;
/*
* Prototypes for non-exported routines.
*/
-static int sleepq_catch_signals(void *wchan);
+static int sleepq_catch_signals(void *wchan, int pri);
static int sleepq_check_signals(void);
static int sleepq_check_timeout(void);
#ifdef INVARIANTS
@@ -158,7 +158,7 @@ static void sleepq_dtor(void *mem, int s
static int sleepq_init(void *mem, int size, int flags);
static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
int pri);
-static void sleepq_switch(void *wchan);
+static void sleepq_switch(void *wchan, int pri);
static void sleepq_timeout(void *arg);
/*
@@ -367,7 +367,7 @@ sleepq_set_timeout(void *wchan, int timo
* may have transitioned from the sleepq lock to a run lock.
*/
static int
-sleepq_catch_signals(void *wchan)
+sleepq_catch_signals(void *wchan, int pri)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
@@ -414,7 +414,7 @@ sleepq_catch_signals(void *wchan)
PROC_SUNLOCK(p);
if (ret == 0) {
if (!(td->td_flags & TDF_INTERRUPT)) {
- sleepq_switch(wchan);
+ sleepq_switch(wchan, pri);
return (0);
}
/* KSE threads tried unblocking us. */
@@ -447,7 +447,7 @@ sleepq_catch_signals(void *wchan)
* Returns with thread lock.
*/
static void
-sleepq_switch(void *wchan)
+sleepq_switch(void *wchan, int pri)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
@@ -488,10 +488,9 @@ sleepq_switch(void *wchan)
return;
}
- thread_lock_set(td, &sc->sc_lock);
-
MPASS(td->td_sleepqueue == NULL);
- sched_sleep(td);
+ sched_sleep(td, pri);
+ thread_lock_set(td, &sc->sc_lock);
TD_SET_SLEEPING(td);
SCHED_STAT_INC(switch_sleepq);
mi_switch(SW_VOL, NULL);
@@ -570,14 +569,14 @@ sleepq_check_signals(void)
* Block the current thread until it is awakened from its sleep queue.
*/
void
-sleepq_wait(void *wchan)
+sleepq_wait(void *wchan, int pri)
{
struct thread *td;
td = curthread;
MPASS(!(td->td_flags & TDF_SINTR));
thread_lock(td);
- sleepq_switch(wchan);
+ sleepq_switch(wchan, pri);
thread_unlock(td);
}
@@ -586,12 +585,12 @@ sleepq_wait(void *wchan)
* or it is interrupted by a signal.
*/
int
-sleepq_wait_sig(void *wchan)
+sleepq_wait_sig(void *wchan, int pri)
{
int rcatch;
int rval;
- rcatch = sleepq_catch_signals(wchan);
+ rcatch = sleepq_catch_signals(wchan, pri);
rval = sleepq_check_signals();
thread_unlock(curthread);
if (rcatch)
@@ -604,7 +603,7 @@ sleepq_wait_sig(void *wchan)
* or it times out while waiting.
*/
int
-sleepq_timedwait(void *wchan)
+sleepq_timedwait(void *wchan, int pri)
{
struct thread *td;
int rval;
@@ -612,7 +611,7 @@ sleepq_timedwait(void *wchan)
td = curthread;
MPASS(!(td->td_flags & TDF_SINTR));
thread_lock(td);
- sleepq_switch(wchan);
+ sleepq_switch(wchan, pri);
rval = sleepq_check_timeout();
thread_unlock(td);
@@ -624,11 +623,11 @@ sleepq_timedwait(void *wchan)
* it is interrupted by a signal, or it times out waiting to be awakened.
*/
int
-sleepq_timedwait_sig(void *wchan)
+sleepq_timedwait_sig(void *wchan, int pri)
{
int rcatch, rvalt, rvals;
- rcatch = sleepq_catch_signals(wchan);
+ rcatch = sleepq_catch_signals(wchan, pri);
rvalt = sleepq_check_timeout();
rvals = sleepq_check_signals();
thread_unlock(curthread);
@@ -692,8 +691,8 @@ sleepq_resume_thread(struct sleepqueue *
TD_CLR_SLEEPING(td);
/* Adjust priority if requested. */
- MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
- if (pri != -1 && td->td_priority > pri)
+ MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
+ if (pri != 0 && td->td_priority > pri)
sched_prio(td, pri);
return (setrunnable(td));
}
@@ -782,10 +781,8 @@ sleepq_broadcast(void *wchan, int flags,
KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sq = sleepq_lookup(wchan);
- if (sq == NULL) {
- sleepq_release(wchan);
+ if (sq == NULL)
return (0);
- }
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
@@ -797,7 +794,6 @@ sleepq_broadcast(void *wchan, int flags,
wakeup_swapper = 1;
thread_unlock(td);
}
- sleepq_release(wchan);
return (wakeup_swapper);
}
Modified: user/kmacy/releng_7_2_fcs/sys/sys/condvar.h
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/sys/condvar.h Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/sys/condvar.h Mon May 18 18:50:38 2009 (r192325)
@@ -72,7 +72,7 @@ void cv_broadcastpri(struct cv *cvp, int
#define cv_timedwait_sig(cvp, lock, timo) \
_cv_timedwait_sig((cvp), &(lock)->lock_object, (timo))
-#define cv_broadcast(cvp) cv_broadcastpri(cvp, -1)
+#define cv_broadcast(cvp) cv_broadcastpri(cvp, 0)
#define cv_wmesg(cvp) ((cvp)->cv_description)
Modified: user/kmacy/releng_7_2_fcs/sys/sys/proc.h
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/sys/proc.h Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/sys/proc.h Mon May 18 18:50:38 2009 (r192325)
@@ -329,7 +329,7 @@ do { \
#define TDF_SINTR 0x00000008 /* Sleep is interruptible. */
#define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */
#define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */
-#define TDF_SELECT 0x00000040 /* Selecting; wakeup/waiting danger. */
+#define TDF_SELECT 0x00000040 /* Thread can be swapped. */
#define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */
#define TDF_UNUSEDx100 0x00000100 /* --available-- */
#define TDF_UBORROWING 0x00000200 /* Thread is borrowing user pri. */
@@ -346,7 +346,7 @@ do { \
#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
#define TDF_DBSUSPEND 0x00200000 /* Thread is suspended by debugger */
#define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */
-#define TDF_UNUSED23 0x00800000 /* --available-- */
+#define TDF_CANSWAP 0x00800000 /* --available-- */
#define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */
#define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */
#define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */
@@ -778,7 +778,7 @@ MALLOC_DECLARE(M_ZOMBIE);
} while (0)
/* Check whether a thread is safe to be swapped out. */
-#define thread_safetoswapout(td) (TD_IS_SLEEPING(td) || TD_IS_SUSPENDED(td))
+#define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP)
/* Control whether or not it is safe for curthread to sleep. */
#define THREAD_NO_SLEEPING() do { \
Modified: user/kmacy/releng_7_2_fcs/sys/sys/sched.h
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/sys/sched.h Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/sys/sched.h Mon May 18 18:50:38 2009 (r192325)
@@ -100,7 +100,7 @@ void sched_lend_prio(struct thread *td,
void sched_lend_user_prio(struct thread *td, u_char pri);
fixpt_t sched_pctcpu(struct thread *td);
void sched_prio(struct thread *td, u_char prio);
-void sched_sleep(struct thread *td);
+void sched_sleep(struct thread *td, int prio);
void sched_switch(struct thread *td, struct thread *newtd, int flags);
void sched_throw(struct thread *td);
void sched_unlend_prio(struct thread *td, u_char prio);
Modified: user/kmacy/releng_7_2_fcs/sys/sys/sleepqueue.h
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/sys/sleepqueue.h Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/sys/sleepqueue.h Mon May 18 18:50:38 2009 (r192325)
@@ -102,10 +102,10 @@ void sleepq_release(void *wchan);
void sleepq_remove(struct thread *td, void *wchan);
int sleepq_signal(void *wchan, int flags, int pri, int queue);
void sleepq_set_timeout(void *wchan, int timo);
-int sleepq_timedwait(void *wchan);
-int sleepq_timedwait_sig(void *wchan);
-void sleepq_wait(void *wchan);
-int sleepq_wait_sig(void *wchan);
+int sleepq_timedwait(void *wchan, int pri);
+int sleepq_timedwait_sig(void *wchan, int pri);
+void sleepq_wait(void *wchan, int pri);
+int sleepq_wait_sig(void *wchan, int pri);
#endif /* _KERNEL */
#endif /* !_SYS_SLEEPQUEUE_H_ */
Modified: user/kmacy/releng_7_2_fcs/sys/vm/vm_glue.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/vm/vm_glue.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/vm/vm_glue.c Mon May 18 18:50:38 2009 (r192325)
@@ -885,8 +885,7 @@ retry:
* This could be refined to support
* swapping out a thread.
*/
- if ((td->td_priority) < PSOCK ||
- !thread_safetoswapout(td)) {
+ if (!thread_safetoswapout(td)) {
thread_unlock(td);
goto nextproc;
}
Modified: user/kmacy/releng_7_2_fcs/sys/vm/vm_meter.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/vm/vm_meter.c Mon May 18 18:44:54 2009 (r192324)
+++ user/kmacy/releng_7_2_fcs/sys/vm/vm_meter.c Mon May 18 18:50:38 2009 (r192325)
@@ -96,7 +96,6 @@ SYSCTL_PROC(_vm, VM_LOADAVG, loadavg, CT
static int
vmtotal(SYSCTL_HANDLER_ARGS)
{
-/* XXXKSE almost completely broken */
struct proc *p;
struct vmtotal total;
vm_map_entry_t entry;
@@ -139,25 +138,16 @@ vmtotal(SYSCTL_HANDLER_ARGS)
break;
default:
FOREACH_THREAD_IN_PROC(p, td) {
- /* Need new statistics XXX */
thread_lock(td);
switch (td->td_state) {
case TDS_INHIBITED:
- /*
- * XXX stats no longer synchronized.
- */
- if (TD_ON_LOCK(td) ||
- (td->td_inhibitors ==
- TDI_SWAPPED)) {
+ if (TD_IS_SWAPPED(td))
total.t_sw++;
- } else if (TD_IS_SLEEPING(td) ||
- TD_AWAITING_INTR(td) ||
- TD_IS_SUSPENDED(td)) {
- if (td->td_priority <= PZERO)
- total.t_dw++;
- else
- total.t_sl++;
- }
+ else if (TD_IS_SLEEPING(td) &&
+ td->td_priority <= PZERO)
+ total.t_dw++;
+ else
+ total.t_sl++;
break;
case TDS_CAN_RUN:
More information about the svn-src-user
mailing list