PERFORCE change 137475 for review

Peter Wemm peter at FreeBSD.org
Wed Mar 12 07:50:45 UTC 2008


http://perforce.freebsd.org/chv.cgi?CH=137475

Change 137475 by peter at peter_overcee on 2008/03/12 07:50:27

	Use struct thread for all internal APIs.  struct td_sched becomes
	a an adjunct storage item, not something that is passed around.
	TS_TO_TD() goes away as 'td' is always available.

Affected files ...

.. //depot/projects/bike_sched/sys/kern/sched_4bsd.c#18 edit
.. //depot/projects/bike_sched/sys/kern/sched_ule.c#18 edit

Differences ...

==== //depot/projects/bike_sched/sys/kern/sched_4bsd.c#18 (text+ko) ====

@@ -94,7 +94,6 @@
     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
 
 #define TD_TO_TS(td) ((struct td_sched *)(&(td)[1]))
-#define TS_TO_TD(ts) (&((struct thread *)(ts))[-1])
 
 /* Packed structure to match the layout of the uma thread zone */
 static struct {
@@ -1193,40 +1192,40 @@
 struct thread *
 sched_choose(void)
 {
-	struct td_sched *ts;
+	struct td_sched *td;
 	struct runq *rq;
 
 	mtx_assert(&sched_lock,  MA_OWNED);
 #ifdef SMP
-	struct td_sched *kecpu;
+	struct thread *kecpu;
 
 	rq = &runq;
-	ts = runq_choose(&runq);
+	td = runq_choose(&runq);
 	kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
 
-	if (ts == NULL || 
+	if (td == NULL || 
 	    (kecpu != NULL && 
-	     TS_TO_TD(kecpu)->td_priority < TS_TO_TD(ts)->td_priority)) {
-		CTR2(KTR_RUNQ, "choosing td_sched %p from pcpu runq %d", kecpu,
+	     kecpu->td_priority < td->td_priority)) {
+		CTR2(KTR_RUNQ, "choosing thread %p from pcpu runq %d", kecpu,
 		     PCPU_GET(cpuid));
-		ts = kecpu;
+		td = kecpu;
 		rq = &runq_pcpu[PCPU_GET(cpuid)];
 	} else { 
-		CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", ts);
+		CTR1(KTR_RUNQ, "choosing thread %p from main runq", td);
 	}
 
 #else
 	rq = &runq;
-	ts = runq_choose(&runq);
+	td = runq_choose(&runq);
 #endif
 
-	if (ts) {
-		runq_remove(rq, ts);
-		ts->ts_flags |= TSF_DIDRUN;
+	if (td) {
+		runq_remove(rq, td);
+		TD_TO_TS(ts)->ts_flags |= TSF_DIDRUN;
 
-		KASSERT(TS_TO_TD(ts)->td_flags & TDF_INMEM,
+		KASSERT(td->td_flags & TDF_INMEM,
 		    ("sched_choose: thread swapped out"));
-		return (TS_TO_TD(ts));
+		return (td);
 	} 
 	return (PCPU_GET(idlethread));
 }

==== //depot/projects/bike_sched/sys/kern/sched_ule.c#18 (text+ko) ====

@@ -101,7 +101,6 @@
 #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
 
 #define TD_TO_TS(td) ((struct td_sched *)(&(td)[1]))
-#define TS_TO_TD(ts) (&((struct thread *)(ts))[-1])
 
 static struct {
 	struct thread	initial_thread;
@@ -284,30 +283,30 @@
 static int sched_interact_score(struct thread *);
 static void sched_interact_update(struct thread *);
 static void sched_interact_fork(struct thread *);
-static void sched_pctcpu_update(struct td_sched *);
+static void sched_pctcpu_update(struct thread *);
 
 /* Operations on per processor queues */
-static struct td_sched * tdq_choose(struct tdq *);
+static struct thread * tdq_choose(struct tdq *);
 static void tdq_setup(struct tdq *);
-static void tdq_load_add(struct tdq *, struct td_sched *);
-static void tdq_load_rem(struct tdq *, struct td_sched *);
-static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
-static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
+static void tdq_load_add(struct tdq *, struct thread *);
+static void tdq_load_rem(struct tdq *, struct thread *);
+static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
+static __inline void tdq_runq_rem(struct tdq *, struct thread *);
 void tdq_print(int cpu);
 static void runq_print(struct runq *rq);
 static void tdq_add(struct tdq *, struct thread *, int);
 #ifdef SMP
 static void tdq_move(struct tdq *, struct tdq *);
 static int tdq_idled(struct tdq *);
-static void tdq_notify(struct td_sched *);
+static void tdq_notify(struct thread *);
 static struct thread *tdq_steal(struct tdq *);
 static struct thread *runq_steal(struct runq *);
-static int sched_pickcpu(struct td_sched *, int);
+static int sched_pickcpu(struct thread *, int);
 static void sched_balance(void);
 static void sched_balance_groups(void);
 static void sched_balance_group(struct tdq_group *);
 static void sched_balance_pair(struct tdq *, struct tdq *);
-static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
+static inline struct tdq *sched_setcpu(struct thread *, int, int);
 static inline struct mtx *thread_block_switch(struct thread *);
 static inline void thread_unblock_switch(struct thread *, struct mtx *);
 static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
@@ -384,12 +383,13 @@
  * queue position for timeshare threads.
  */
 static __inline void
-tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
+tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
 {
+	struct td_sched *ts = TD_TO_TS(td);
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-	THREAD_LOCK_ASSERT(TS_TO_TD(ts), MA_OWNED);
+	THREAD_LOCK_ASSERT(td, MA_OWNED);
 #ifdef SMP
-	if (THREAD_CAN_MIGRATE(TS_TO_TD(ts))) {
+	if (THREAD_CAN_MIGRATE(td)) {
 		tdq->tdq_transferable++;
 		tdq->tdq_group->tdg_transferable++;
 		ts->ts_flags |= TSF_XFERABLE;
@@ -398,7 +398,7 @@
 	if (ts->ts_runq == &tdq->tdq_timeshare) {
 		u_char pri;
 
-		pri = TS_TO_TD(ts)->td_priority;
+		pri = td->td_priority;
 		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
 			("Invalid priority %d on timeshare runq", pri));
 		/*
@@ -429,11 +429,12 @@
  * transferable count does not reflect them.
  */
 static __inline void
-tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
+tdq_runq_rem(struct tdq *tdq, struct thread *td)
 {
+	struct td_sched *ts = TD_TO_TS(td);
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
 	KASSERT(ts->ts_runq != NULL,
-	    ("tdq_runq_remove: thread %p null ts_runq", TS_TO_TD(ts)));
+	    ("tdq_runq_remove: thread %p null ts_runq", td));
 #ifdef SMP
 	if (ts->ts_flags & TSF_XFERABLE) {
 		tdq->tdq_transferable--;
@@ -443,16 +444,16 @@
 #endif
 	if (ts->ts_runq == &tdq->tdq_timeshare) {
 		if (tdq->tdq_idx != tdq->tdq_ridx)
-			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
+			runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
 		else
-			runq_remove_idx(ts->ts_runq, ts, NULL);
+			runq_remove_idx(ts->ts_runq, td, NULL);
 		/*
 		 * For timeshare threads we update the priority here so
 		 * the priority reflects the time we've been sleeping.
 		 */
 		ts->ts_ltick = ticks;
-		sched_pctcpu_update(ts);
-		sched_priority(TS_TO_TD(ts));
+		sched_pctcpu_update(td);
+		sched_priority(td);
 	} else
 		runq_remove(ts->ts_runq, ts);
 }
@@ -462,17 +463,17 @@
  * for this thread to the referenced thread queue.
  */
 static void
-tdq_load_add(struct tdq *tdq, struct td_sched *ts)
+tdq_load_add(struct tdq *tdq, struct thread *td)
 {
 	int class;
 
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-	THREAD_LOCK_ASSERT(TS_TO_TD(ts), MA_OWNED);
-	class = PRI_BASE(TS_TO_TD(ts)->td_pri_class);
+	THREAD_LOCK_ASSERT(td, MA_OWNED);
+	class = PRI_BASE(td->td_pri_class);
 	tdq->tdq_load++;
 	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
 	if (class != PRI_ITHD &&
-	    (TS_TO_TD(ts)->td_proc->p_flag & P_NOLOAD) == 0)
+	    (td->td_proc->p_flag & P_NOLOAD) == 0)
 #ifdef SMP
 		tdq->tdq_group->tdg_load++;
 #else
@@ -485,15 +486,15 @@
  * exiting.
  */
 static void
-tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
+tdq_load_rem(struct tdq *tdq, struct thread *td)
 {
 	int class;
 
-	THREAD_LOCK_ASSERT(TS_TO_TD(ts), MA_OWNED);
+	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-	class = PRI_BASE(TS_TO_TD(ts)->td_pri_class);
+	class = PRI_BASE(td->td_pri_class);
 	if (class != PRI_ITHD &&
-	    (TS_TO_TD(ts)->td_proc->p_flag & P_NOLOAD) == 0)
+	    (td->td_proc->p_flag & P_NOLOAD) == 0)
 #ifdef SMP
 		tdq->tdq_group->tdg_load--;
 #else
@@ -503,7 +504,7 @@
 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
 	tdq->tdq_load--;
 	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
-	ts->ts_runq = NULL;
+	TD_TO_TS(td)->ts_runq = NULL;
 }
 
 #ifdef SMP
@@ -692,7 +693,7 @@
 static void
 tdq_move(struct tdq *from, struct tdq *to)
 {
-	struct td_sched *ts;
+	struct thread *td;
 	struct thread *td;
 	struct tdq *tdq;
 	int cpu;
@@ -702,23 +703,22 @@
 
 	tdq = from;
 	cpu = TDQ_ID(to);
-	ts = tdq_steal(tdq);
-	if (ts == NULL) {
+	td = tdq_steal(tdq);
+	if (td == NULL) {
 		struct tdq_group *tdg;
 
 		tdg = tdq->tdq_group;
 		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
 			if (tdq == from || tdq->tdq_transferable == 0)
 				continue;
-			ts = tdq_steal(tdq);
+			td = tdq_steal(tdq);
 			break;
 		}
-		if (ts == NULL)
+		if (td == NULL)
 			return;
 	}
 	if (tdq == to)
 		return;
-	td = TS_TO_TD(ts);
 	/*
 	 * Although the run queue is locked the thread may be blocked.  Lock
 	 * it to clear this and acquire the run-queue lock.
@@ -727,7 +727,7 @@
 	/* Drop recursive lock on from acquired via thread_lock(). */
 	TDQ_UNLOCK(from);
 	sched_rem(td);
-	ts->ts_cpu = cpu;
+	TD_TO_TS(ts)->ts_cpu = cpu;
 	td->td_lock = TDQ_LOCKPTR(to);
 	tdq_add(to, td, SRQ_YIELDING);
 }
@@ -811,7 +811,7 @@
  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
  */
 static void
-tdq_notify(struct td_sched *ts)
+tdq_notify(struct thread *td)
 {
 	struct thread *ctd;
 	struct pcpu *pcpu;
@@ -819,8 +819,8 @@
 	int pri;
 	int cpu;
 
-	cpu = ts->ts_cpu;
-	pri = TS_TO_TD(ts)->td_priority;
+	cpu = TD_TO_TS(ts)->ts_cpu;
+	pri = td->td_priority;
 	pcpu = pcpu_find(cpu);
 	ctd = pcpu->pc_curthread;
 	cpri = ctd->td_priority;
@@ -948,7 +948,7 @@
  * current lock and returns with the assigned queue locked.
  */
 static inline struct tdq *
-sched_setcpu(struct td_sched *td, int cpu, int flags)
+sched_setcpu(struct thread *td, int cpu, int flags)
 {
 	struct thread *td;
 	struct tdq *tdq;
@@ -1063,8 +1063,9 @@
  * a determination based on load or priority of available processors.
  */
 static int
-sched_pickcpu(struct td_sched *ts, int flags)
+sched_pickcpu(struct thread *td, int flags)
 {
+	struct td_sched *ts;
 	struct tdq *tdq;
 	int self;
 	int pri;
@@ -1081,7 +1082,8 @@
 		    curthread->td_priority);
 		return (self);
 	}
-	pri = TS_TO_TD(ts)->td_priority;
+	pri = td->td_priority;
+	ts = TD_TO_TS(td);
 	cpu = ts->ts_cpu;
 	/*
 	 * Regardless of affinity, if the last cpu is idle send it there.
@@ -1137,29 +1139,28 @@
 /*
  * Pick the highest priority task we have and return it.
  */
-static struct td_sched *
+static struct thread *
 tdq_choose(struct tdq *tdq)
 {
-	struct td_sched *ts;
 
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-	ts = runq_choose(&tdq->tdq_realtime);
-	if (ts != NULL)
-		return (ts);
-	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
-	if (ts != NULL) {
-		KASSERT(TS_TO_TD(ts)->td_priority >= PRI_MIN_TIMESHARE,
+	td = runq_choose(&tdq->tdq_realtime);
+	if (td != NULL)
+		return (td);
+	td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
+	if (td != NULL) {
+		KASSERT(td->td_priority >= PRI_MIN_TIMESHARE,
 		    ("tdq_choose: Invalid priority on timeshare queue %d",
-		    TS_TO_TD(ts)->td_priority));
-		return (ts);
+		    td->td_priority));
+		return (td);
 	}
 
-	ts = runq_choose(&tdq->tdq_idle);
-	if (ts != NULL) {
-		KASSERT(TS_TO_TD(ts)->td_priority >= PRI_MIN_IDLE,
+	td = runq_choose(&tdq->tdq_idle);
+	if (td != NULL) {
+		KASSERT(td->td_priority >= PRI_MIN_IDLE,
 		    ("tdq_choose: Invalid priority on idle queue %d",
-		    TS_TO_TD(ts)->td_priority));
-		return (ts);
+		    td->td_priority));
+		return (td);
 	}
 
 	return (NULL);
@@ -1573,8 +1574,9 @@
  * mechanism since it happens with less regular and frequent events.
  */
 static void
-sched_pctcpu_update(struct td_sched *ts)
+sched_pctcpu_update(struct thread *td)
 {
+	struct td_sched *ts = TD_TO_TS(td);
 
 	if (ts->ts_ticks == 0)
 		return;
@@ -1757,7 +1759,7 @@
 	TDQ_UNLOCK(tdq);
 	thread_lock(td);
 	spinlock_exit();
-	sched_setcpu(TD_TO_TS(td), TDQ_ID(tdq), SRQ_YIELDING);
+	sched_setcpu(td, TDQ_ID(tdq), SRQ_YIELDING);
 #else
 	td->td_lock = TDQ_LOCKPTR(tdq);
 #endif
@@ -1985,7 +1987,7 @@
 		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
 		ts->ts_slptime += hzticks;
 		sched_interact_update(td);
-		sched_pctcpu_update(ts);
+		sched_pctcpu_update(td);
 		sched_priority(td);
 	}
 	/* Reset the slice value after we sleep. */
@@ -2175,7 +2177,7 @@
 	 * second.
 	 */
 	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
-		sched_pctcpu_update(ts);
+		sched_pctcpu_update(curthread);
 }
 
 /*
@@ -2221,7 +2223,7 @@
 	ts = tdq_choose(tdq);
 	if (ts) {
 		tdq_runq_rem(tdq, ts);
-		return (TS_TO_TD(ts));
+		return (td);
 	}
 #ifdef SMP
 	/*
@@ -2365,7 +2367,7 @@
 		cpu = ts->ts_cpu;
 	else
 		cpu = sched_pickcpu(ts, flags);
-	tdq = sched_setcpu(ts, cpu, flags);
+	tdq = sched_setcpu(td, cpu, flags);
 	tdq_add(tdq, td, flags);
 	if (cpu != cpuid) {
 		tdq_notify(ts);
@@ -2434,7 +2436,7 @@
 	if (ts->ts_ticks) {
 		int rtick;
 
-		sched_pctcpu_update(ts);
+		sched_pctcpu_update(td);
 		/* How many rtick per second ? */
 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;


More information about the p4-projects mailing list