PERFORCE change 61838 for review
Julian Elischer
julian at FreeBSD.org
Mon Sep 20 00:26:52 PDT 2004
http://perforce.freebsd.org/chv.cgi?CH=61838
Change 61838 by julian at julian_ref on 2004/09/20 07:25:55
code cleanups and slot debugging support.
Affected files ...
.. //depot/projects/nsched/sys/kern/kern_kse.c#29 edit
.. //depot/projects/nsched/sys/kern/kern_switch.c#35 edit
.. //depot/projects/nsched/sys/kern/kern_thr.c#19 edit
.. //depot/projects/nsched/sys/kern/kern_thread.c#39 edit
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#57 edit
Differences ...
==== //depot/projects/nsched/sys/kern/kern_kse.c#29 (text+ko) ====
@@ -518,16 +518,10 @@
struct thread *newtd;
p = td->td_proc;
+ kg = td->td_ksegrp;
if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
return (err);
- /*
- * Processes using the other threading model can't
- * suddenly start calling this one
- */
- if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS)
- return (EINVAL);
-
ncpus = mp_ncpus;
if (virtual_cpu != 0)
ncpus = virtual_cpu;
@@ -547,10 +541,32 @@
}
PROC_LOCK(p);
+ /*
+ * Processes using the other threading model can't
+ * suddenly start calling this one
+ */
+ if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
+ PROC_UNLOCK(p);
+ return (EINVAL);
+ }
+
+ /*
+ * Limit it to NCPU upcall contexts per ksegrp in any case.
+ * There is a small race here as we don't hold proclock
+ * until we inc the ksegrp count, but it's not really a big problem
+ * if we get one too many, but we save a proc lock.
+ */
+ if ((!uap->newgroup) && (kg->kg_numupcalls >= ncpus)) {
+ PROC_UNLOCK(p);
+ return (EPROCLIM);
+ }
+
if (!(p->p_flag & P_SA)) {
first = 1;
p->p_flag |= P_SA|P_HADTHREADS;
}
+
+
PROC_UNLOCK(p);
/*
* Now pay attention!
@@ -565,22 +581,21 @@
if (!sa && !(uap->newgroup || first))
return (EINVAL);
- kg = td->td_ksegrp;
if (uap->newgroup) {
newkg = ksegrp_alloc();
bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
kg_startzero, kg_endzero));
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
+ sched_init_concurrency(newkg);
PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
if (p->p_numksegrps >= max_groups_per_proc) {
- mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
ksegrp_free(newkg);
return (EPROCLIM);
}
ksegrp_link(newkg, p);
+ mtx_lock_spin(&sched_lock);
sched_fork_ksegrp(td, newkg);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
@@ -595,6 +610,7 @@
*/
if (!first && ((td->td_pflags & TDP_SA) != sa))
return (EINVAL);
+
newkg = kg;
}
@@ -647,20 +663,11 @@
if (td->td_standin == NULL)
thread_alloc_spare(td);
- /*
- * Creating upcalls more than number of physical cpu does
- * not help performance.
- */
PROC_LOCK(p);
- if (newkg->kg_numupcalls >= ncpus) {
- PROC_UNLOCK(p);
- upcall_free(newku);
- return (EPROCLIM);
- }
/*
* If we are the first time, and a normal thread,
- * then trnasfer all the signals back to the 'process'.
+ * then transfer all the signals back to the 'process'.
* SA threading will make a special thread to handle them.
*/
if (first && sa) {
@@ -991,6 +998,8 @@
* for upcall. Initialize thread's large data area outside sched_lock
* for thread_schedule_upcall(). The crhold is also here to get it out
* from the schedlock as it has a mutex op itself.
+ * XXX BUG.. we need to get the cr ref after the thread has
+ * checked and chenged its own, not 6 months before...
*/
void
thread_alloc_spare(struct thread *td)
==== //depot/projects/nsched/sys/kern/kern_switch.c#35 (text+ko) ====
@@ -426,7 +426,7 @@
sched_rem(tda);
tda = kg->kg_last_assigned =
TAILQ_PREV(tda, threadqueue, td_runq);
- kg->kg_avail_opennings++;
+ SLOT_RELEASE(kg);
}
/*
@@ -891,6 +891,7 @@
sched_init_concurrency(struct ksegrp *kg)
{
+ CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
kg->kg_concurrency = 1;
kg->kg_avail_opennings = 1;
}
@@ -907,7 +908,11 @@
sched_set_concurrency(struct ksegrp *kg, int concurrency)
{
- /* Handle the case for a declining concurrency */
+ CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
+ kg,
+ concurrency,
+ kg->kg_avail_opennings,
+ kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
kg->kg_concurrency = concurrency;
}
@@ -925,7 +930,7 @@
sched_thread_exit(struct thread *td)
{
- td->td_ksegrp->kg_avail_opennings++;
+ SLOT_RELEASE(td->td_ksegrp);
slot_fill(td->td_ksegrp);
}
==== //depot/projects/nsched/sys/kern/kern_thr.c#19 (text+ko) ====
==== //depot/projects/nsched/sys/kern/kern_thread.c#39 (text+ko) ====
@@ -724,8 +724,6 @@
p->p_flag &= ~(P_SA|P_HADTHREADS);
td->td_mailbox = NULL;
td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
- p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); /* maybe not */
- p->p_singlethread = NULL;
if (td->td_standin != NULL) {
thread_stash(td->td_standin);
td->td_standin = NULL;
@@ -786,17 +784,16 @@
if (p->p_singlethread)
return (1);
- if (force_exit == SINGLE_EXIT) {
- p->p_flag |= P_SINGLE_EXIT;
- } else
- p->p_flag &= ~P_SINGLE_EXIT;
p->p_flag |= P_STOPPED_SINGLE;
mtx_lock_spin(&sched_lock);
p->p_singlethread = td;
- if (force_exit == SINGLE_EXIT)
+ if (force_exit == SINGLE_EXIT) {
remaining = p->p_numthreads;
- else
+ p->p_flag |= P_SINGLE_EXIT;
+ } else {
remaining = p->p_numthreads - p->p_suspcount;
+ p->p_flag &= ~P_SINGLE_EXIT;
+ }
while (remaining != 1) {
FOREACH_THREAD_IN_PROC(p, td2) {
if (td2 == td)
@@ -860,8 +857,9 @@
* we try our utmost to revert to being a non-threaded
* process.
*/
+ p->p_singlethread = NULL;
+ p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
thread_unthread(td);
- p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
}
mtx_unlock_spin(&sched_lock);
return (0);
@@ -1018,7 +1016,6 @@
/*
* End the single threading mode..
- * Part of this is duplicated in thread-single in the SINGLE_EXIT case.
*/
void
thread_single_end(void)
==== //depot/projects/nsched/sys/kern/sched_4bsd.c#57 (text+ko) ====
@@ -119,6 +119,32 @@
#define kg_concurrency kg_sched->skg_concurrency
#define kg_runq_kses kg_sched->skg_runq_kses
+#define SLOT_RELEASE(kg) \
+do { \
+ kg->kg_avail_opennings++; \
+ CTR5(KTR_RUNQ, "%s line %d: kg %p(%d) Slot released (->%d)", \
+ __FUNCTION__, \
+ __LINE__, \
+ kg, \
+ kg->kg_concurrency, \
+ kg->kg_avail_opennings); \
+ KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency), \
+ ("slots out of whack")); \
+} while (0)
+
+#define SLOT_USE(kg) \
+do { \
+ kg->kg_avail_opennings--; \
+ CTR5(KTR_RUNQ, "%s line %d: kg %p(%d) Slot used (->%d)", \
+ __FUNCTION__, \
+ __LINE__, \
+ kg, \
+ kg->kg_concurrency, \
+ kg->kg_avail_opennings); \
+ KASSERT((kg->kg_avail_opennings >= 0), \
+ ("slots out of whack")); \
+} while (0)
+
/*
* KSE_CAN_MIGRATE macro returns true if the kse can migrate between
* cpus.
@@ -789,26 +815,6 @@
}
}
- /*
- * The thread we are about to run needs to be counted as if it had been
- * added to the run queue and selected.
- * it came from:
- * A preemption
- * An upcall
- * A followon
- * Do this before saving curthread so that the slot count
- * doesn't give an overly optimistic view when that happens.
- */
- if (newtd) {
- KASSERT((newtd->td_inhibitors == 0),
- ("trying to run inhibitted thread"));
- newtd->td_ksegrp->kg_avail_opennings--;
- newtd->td_kse->ke_flags |= KEF_DIDRUN;
- TD_SET_RUNNING(newtd);
- if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
- sched_tdcnt++;
- }
-
td->td_lastcpu = td->td_oncpu;
td->td_flags &= ~TDF_NEEDRESCHED;
td->td_pflags &= ~TDP_OWEPREEMPT;
@@ -822,7 +828,7 @@
if (td == PCPU_GET(idlethread))
TD_SET_CAN_RUN(td);
else {
- td->td_ksegrp->kg_avail_opennings++;
+ SLOT_RELEASE(td->td_ksegrp);
if (TD_IS_RUNNING(td)) {
/* Put us back on the run queue (kse and all). */
setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
@@ -831,12 +837,32 @@
* We will not be on the run queue. So we must be
* sleeping or similar. As it's available,
* someone else can use the KSE if they need it.
+ * It's NOT available if we are about to need it
*/
- slot_fill(td->td_ksegrp);
+ if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)
+ slot_fill(td->td_ksegrp);
}
}
- if (newtd == NULL)
+ if (newtd) {
+ /*
+ * The thread we are about to run needs to be counted
+ * as if it had been added to the run queue and selected.
+ * It came from:
+ * * A preemption
+ * * An upcall
+ * * A followon
+ */
+ KASSERT((newtd->td_inhibitors == 0),
+ ("trying to run inhibitted thread"));
+ SLOT_USE(newtd->td_ksegrp);
+ newtd->td_kse->ke_flags |= KEF_DIDRUN;
+ TD_SET_RUNNING(newtd);
+ if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
+ sched_tdcnt++;
+ } else {
newtd = choosethread();
+ }
+
if (td != newtd)
cpu_switch(td, newtd);
sched_lock.mtx_lock = (uintptr_t)td;
@@ -1030,7 +1056,7 @@
}
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt++;
- td->td_ksegrp->kg_avail_opennings--;
+ SLOT_USE(td->td_ksegrp);
runq_add(ke->ke_runq, ke);
ke->ke_ksegrp->kg_runq_kses++;
ke->ke_state = KES_ONRUNQ;
@@ -1051,7 +1077,7 @@
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
- td->td_ksegrp->kg_avail_opennings++;
+ SLOT_RELEASE(td->td_ksegrp);
runq_remove(ke->ke_runq, ke);
ke->ke_state = KES_THREAD;
More information about the p4-projects
mailing list