PERFORCE change 61923 for review

Julian Elischer julian at FreeBSD.org
Tue Sep 21 18:55:37 PDT 2004


http://perforce.freebsd.org/chv.cgi?CH=61923

Change 61923 by julian at julian_ref on 2004/09/22 01:54:47

	 Use slot counting macros for ULE too.
	 Allow preempted threads to get back quicker by puting them
	 at the HEAD of their run queue list instead of the end.

Affected files ...

.. //depot/projects/nsched/sys/kern/kern_switch.c#37 edit
.. //depot/projects/nsched/sys/kern/kern_synch.c#16 edit
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#58 edit
.. //depot/projects/nsched/sys/kern/sched_ule.c#36 edit
.. //depot/projects/nsched/sys/sys/proc.h#34 edit
.. //depot/projects/nsched/sys/sys/runq.h#2 edit

Differences ...

==== //depot/projects/nsched/sys/kern/kern_switch.c#37 (text+ko) ====

@@ -581,9 +581,7 @@
 	}
 
 	/*
-	 * Our thread state says that we are already on a run queue, so
-	 * update our state as if we had been dequeued by choosethread().
-	 * However we must not actually be on the system run queue yet.
+	 * Thread is runnable but not yet put on system run queue.
 	 */
 	MPASS(TD_ON_RUNQ(td));
 	MPASS(td->td_sched->ke_state != KES_ONRUNQ);
@@ -606,7 +604,7 @@
 	TD_SET_RUNNING(td);
 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
 	    td->td_proc->p_pid, td->td_proc->p_comm);
-	mi_switch(SW_INVOL, td);
+	mi_switch(SW_INVOL|SW_PREEMPT, td);
 	return (1);
 #else
 	return (0);
@@ -706,7 +704,7 @@
  * corresponding status bit.
  */
 void
-runq_add(struct runq *rq, struct kse *ke)
+runq_add(struct runq *rq, struct kse *ke, int flags)
 {
 	struct rqhead *rqh;
 	int pri;
@@ -717,7 +715,11 @@
 	rqh = &rq->rq_queues[pri];
 	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
 	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
-	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
+	if (flags & SRQ_PREEMPTED) {
+		TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
+	} else {
+		TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
+	}
 }
 
 /*

==== //depot/projects/nsched/sys/kern/kern_synch.c#16 (text+ko) ====


==== //depot/projects/nsched/sys/kern/sched_4bsd.c#58 (text+ko) ====

@@ -128,8 +128,8 @@
 	kg,								\
 	kg->kg_concurrency,						\
 	 kg->kg_avail_opennings);					\
-	KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency),		\
-	    ("slots out of whack"));					\
+/*	KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency),		\
+	    ("slots out of whack"));*/					\
 } while (0)
 
 #define SLOT_USE(kg)							\
@@ -141,8 +141,8 @@
 	kg,								\
 	kg->kg_concurrency,						\
 	 kg->kg_avail_opennings);					\
-	KASSERT((kg->kg_avail_opennings >= 0),				\
-	    ("slots out of whack"));					\
+/*	KASSERT((kg->kg_avail_opennings >= 0),				\
+	    ("slots out of whack"));*/					\
 } while (0)
 
 /*
@@ -831,7 +831,9 @@
 		SLOT_RELEASE(td->td_ksegrp);
 		if (TD_IS_RUNNING(td)) {
 			/* Put us back on the run queue (kse and all). */
-			setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
+			setrunqueue(td, (flags & SW_PREEMPT) ?
+			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
+			    SRQ_OURSELF|SRQ_YIELDING);
 		} else if (p->p_flag & P_HADTHREADS) {
 			/*
 			 * We will not be on the run queue. So we must be
@@ -1057,7 +1059,7 @@
 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 		sched_tdcnt++;
 	SLOT_USE(td->td_ksegrp);
-	runq_add(ke->ke_runq, ke);
+	runq_add(ke->ke_runq, ke, flags);
 	ke->ke_ksegrp->kg_runq_kses++;
 	ke->ke_state = KES_ONRUNQ;
 	maybe_resched(td);

==== //depot/projects/nsched/sys/kern/sched_ule.c#36 (text+ko) ====

@@ -164,6 +164,32 @@
 #define kg_runtime		kg_sched->skg_runtime
 #define kg_slptime		kg_sched->skg_slptime
 
+#define SLOT_RELEASE(kg)						\
+do {									\
+	kg->kg_avail_opennings++; 					\
+	CTR5(KTR_RUNQ, "%s line %d: kg %p(%d) Slot released (->%d)",	\
+	__FUNCTION__,							\
+	__LINE__,							\
+	kg,								\
+	kg->kg_concurrency,						\
+	 kg->kg_avail_opennings);					\
+	KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency),		\
+	    ("slots out of whack"));					\
+} while (0)
+
+#define SLOT_USE(kg)							\
+do {									\
+	kg->kg_avail_opennings--; 					\
+	CTR5(KTR_RUNQ, "%s line %d: kg %p(%d) Slot used (->%d)",	\
+	__FUNCTION__,							\
+	__LINE__,							\
+	kg,								\
+	kg->kg_concurrency,						\
+	 kg->kg_avail_opennings);					\
+	KASSERT((kg->kg_avail_opennings >= 0),				\
+	    ("slots out of whack"));					\
+} while (0)
+
 
 static struct kse kse0;
 static struct kg_sched kg_sched0;
@@ -383,7 +409,7 @@
 		ke->ke_flags |= KEF_XFERABLE;
 	}
 #endif
-	runq_add(ke->ke_runq, ke);
+	runq_add(ke->ke_runq, ke, 0);
 }
 
 static __inline void
@@ -874,7 +900,7 @@
 			runq_remove(ke->ke_runq, ke);
 			sched_slice(ke);
 			ke->ke_runq = kseq->ksq_next;
-			runq_add(ke->ke_runq, ke);
+			runq_add(ke->ke_runq, ke, 0);
 			continue;
 		}
 		return (ke);
@@ -1149,9 +1175,9 @@
 	/*
 	 * Set up the scheduler specific parts of proc0.
 	 */
+	proc0.p_sched = NULL; /* XXX */
 	ksegrp0.kg_sched = &kg_sched0;
-	proc0.p_sched = NULL; /* XXX */
-	thread0.td_kse = &kse0;
+	thread0.td_sched = &kse0;
 	kse0.ke_thread = &thread0;
 	kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
 	kse0.ke_state = KES_THREAD;
@@ -1210,7 +1236,7 @@
 		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
 			runq_remove(ke->ke_runq, ke);
 			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
-			runq_add(ke->ke_runq, ke);
+			runq_add(ke->ke_runq, ke, 0);
 		}
 		/*
 		 * Hold this kse on this cpu so that sched_prio() doesn't
@@ -1238,15 +1264,6 @@
 	td->td_pflags &= ~TDP_OWEPREEMPT;
 
 	/*
-	 * If we bring in a thread, 
-	 * then account for it as if it had been added to the run queue and then chosen.
-	 */
-	if (newtd) {
-		newtd->td_ksegrp->kg_avail_opennings--;
-		newtd->td_kse->ke_flags |= KEF_DIDRUN;
-        	TD_SET_RUNNING(newtd);
-	}
-	/*
 	 * If the KSE has been assigned it may be in the process of switching
 	 * to the new cpu.  This is the case in sched_bind().
 	 */
@@ -1255,7 +1272,7 @@
 			TD_SET_CAN_RUN(td);
 		} else {
 			/* We are ending our run so make our slot available again */
-			td->td_ksegrp->kg_avail_opennings++;
+			SLOT_RELEASE(td->td_ksegrp);
 			if (TD_IS_RUNNING(td)) {
 				kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
 				/*
@@ -1272,16 +1289,29 @@
 				/*
 				 * We will not be on the run queue.
 				 * So we must be sleeping or similar.
+				 * Don't use the slot if we will need it 
+				 * for newtd.
 				 */
-				if (td->td_proc->p_flag & P_HADTHREADS)
+				if ((td->td_proc->p_flag & P_HADTHREADS) &&
+				    (newtd == NULL ||
+				    newtd->td_ksegrp != td->td_ksegrp))
 					slot_fill(td->td_ksegrp);
 			}
 		}
 	}
-	if (newtd != NULL)
+	if (newtd != NULL) {
+		/*
+		 * If we bring in a thread, 
+		 * then account for it as if it had been added to the
+		 * run queue and then chosen.
+		 */
+		SLOT_USE(newtd->td_ksegrp);
+		newtd->td_kse->ke_flags |= KEF_DIDRUN;
+       		TD_SET_RUNNING(newtd);
 		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
-	else
+	} else {
 		newtd = choosethread();
+	}
 	if (td != newtd)
 		cpu_switch(td, newtd);
 	sched_lock.mtx_lock = (uintptr_t)td;
@@ -1773,7 +1803,7 @@
 		curthread->td_flags |= TDF_NEEDRESCHED;
 	if (preemptive && maybe_preempt(td))
 		return;
-	td->td_ksegrp->kg_avail_opennings--;
+	SLOT_USE(td->td_ksegrp);
 	ke->ke_ksegrp->kg_runq_threads++;
 	ke->ke_state = KES_ONRUNQ;
 

==== //depot/projects/nsched/sys/sys/proc.h#34 (text+ko) ====

@@ -652,12 +652,14 @@
 /* Flags for mi_switch(). */
 #define	SW_VOL		0x0001		/* Voluntary switch. */
 #define	SW_INVOL	0x0002		/* Involuntary switch. */
+#define SW_PREEMPT	0x0004		/* The invol switch is a preemption */
 
 /* flags for setrunqueue(). Why are we setting this thread on the run queue? */
 #define SRQ_BORING	0x0000		/* No special circumstances */
 #define SRQ_YIELDING	0x0001		/* we are yielding (from mi_switch) */
 #define SRQ_OURSELF	0x0002		/* it is ourself (from mi_switch) */
 #define SRQ_INTR	0x0004		/* it is probably urgent */
+#define SRQ_PREEMPTED	0x0008		/* has been preempted.. be kind */
 
 /* How values for thread_single(). */
 #define	SINGLE_NO_EXIT	0

==== //depot/projects/nsched/sys/sys/runq.h#2 (text+ko) ====

@@ -62,7 +62,7 @@
 	struct	rqhead rq_queues[RQ_NQS];
 };
 
-void	runq_add(struct runq *, struct kse *);
+void	runq_add(struct runq *, struct kse *, int flags);
 int	runq_check(struct runq *);
 struct	kse *runq_choose(struct runq *);
 void	runq_init(struct runq *);


More information about the p4-projects mailing list