PERFORCE change 52575 for review

Julian Elischer julian at FreeBSD.org
Sun May 9 19:13:30 PDT 2004


http://perforce.freebsd.org/chv.cgi?CH=52575

Change 52575 by julian at julian_ref on 2004/05/09 19:12:45

	checkin for safety

Affected files ...

.. //depot/projects/nsched/sys/i386/i386/machdep.c#5 edit
.. //depot/projects/nsched/sys/kern/kern_exit.c#4 edit
.. //depot/projects/nsched/sys/kern/kern_kse.c#4 edit
.. //depot/projects/nsched/sys/kern/kern_synch.c#3 edit
.. //depot/projects/nsched/sys/kern/kern_thr.c#4 edit
.. //depot/projects/nsched/sys/kern/kern_thread.c#11 edit
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#6 edit
.. //depot/projects/nsched/sys/sys/proc.h#6 edit
.. //depot/projects/nsched/sys/sys/sched.h#4 edit

Differences ...

==== //depot/projects/nsched/sys/i386/i386/machdep.c#5 (text+ko) ====

@@ -1950,15 +1950,18 @@
 	int gsel_tss, metadata_missing, off, x;
 	struct pcpu *pc;
 
+	/* 
+	 * Set up things that proc0 would have associated with it already 
+	 * if it were taken from the process allocation cache.
+	 * This includes a ksegrp, a thread, and a stack and uarea to go
+	 * with the thread. The pcb is deliniated ready for use.
+	 * Note that the stack for proc0 has no guard page.
+	 */
 	proc0.p_uarea = proc0uarea;
 	thread0.td_kstack = proc0kstack;
 	thread0.td_pcb = (struct pcb *)
 	   (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
 	atdevbase = ISA_HOLE_START + KERNBASE;
-
-	/*
-	 * Link a few more bits of the proc0 together.
-	 */
 	proc_linkup(&proc0, &ksegrp0, &thread0);
 
 	metadata_missing = 0;

==== //depot/projects/nsched/sys/kern/kern_exit.c#4 (text+ko) ====

@@ -155,6 +155,12 @@
 		 */
 		if (thread_single(SINGLE_EXIT))
 			panic ("Exit: Single threading fouled up");
+			/* 
+			 * For this to happen, the thread_suspend_check(0)
+			 * above must have failed to kill a sibling
+			 * even though we should have gone single-file
+			 * because of the PROC lock.
+			 */
 		/*
 		 * All other activity in this process is now stopped.
 		 * Remove excess KSEs and KSEGRPS. XXXKSE (when we have them)
@@ -162,18 +168,8 @@
 		 * Turn off threading support.
 		 */
 		p->p_flag &= ~P_SA;
-		thread_single_end();	/* Don't need this any more. */
+		thread_single_end();	/* end single-threading mode */
 	}
-	/*
-	 * With this state set:
-	 * Any thread entering the kernel from userspace will thread_exit()
-	 * in trap().  Any thread attempting to sleep will return immediatly
-	 * with EINTR or EWOULDBLOCK, which will hopefully force them
-	 * to back out to userland, freeing resources as they go, and
-	 * anything attempting to return to userland will thread_exit()
-	 * from userret().  thread_exit() will do a wakeup on p->p_numthreads
-	 * if it transitions to 1.
-	 */
 
 	p->p_flag |= P_WEXIT;
 	PROC_UNLOCK(p);
@@ -391,16 +387,6 @@
 	lim_free(plim);
 
 	/*
-	 * Release this thread's reference to the ucred.  The actual proc
-	 * reference will stay around until the proc is harvested by
-	 * wait().  At this point the ucred is immutable (no other threads
-	 * from this proc are around that can change it) so we leave the
-	 * per-thread ucred pointer intact in case it is needed although
-	 * in theory nothing should be using it at this point.
-	 */
-	crfree(td->td_ucred);
-
-	/*
 	 * Remove proc from allproc queue and pidhash chain.
 	 * Place onto zombproc.  Unlink from parent's child list.
 	 */
@@ -503,6 +489,15 @@
 	 */
 	cpu_exit(td);
 
+	/*
+	 * Release this thread's reference to the ucred.  The actual proc
+	 * reference will stay around until the proc is harvested by
+	 * wait().  XXX maybe this should be done there too.
+	 */
+	crfree(td->td_ucred);
+	td->td_ucred = NULL;
+
+
 	PROC_LOCK(p);
 	PROC_LOCK(p->p_pptr);
 	sx_xunlock(&proctree_lock);
@@ -533,7 +528,7 @@
 	cpu_sched_exit(td); /* XXXKSE check if this should be in thread_exit */
 	/*
 	 * Allow the scheduler to adjust the priority of the
-	 * parent when a kseg is exiting.
+	 * parent when a process is exiting.
 	 */
 	if (p->p_pid != 1) 
 		sched_exit(p->p_pptr, td);

==== //depot/projects/nsched/sys/kern/kern_kse.c#4 (text+ko) ====

@@ -444,7 +444,7 @@
 	PROC_LOCK(p);
 	if (!(p->p_flag & P_SA)) {
 		first = 1;
-		p->p_flag |= P_SA;
+		p->p_flag |= P_SA|P_HADTHREADS;
 	}
 	PROC_UNLOCK(p);
 	if (!sa && !uap->newgroup && !first)

==== //depot/projects/nsched/sys/kern/kern_synch.c#3 (text+ko) ====

@@ -361,7 +361,7 @@
 	    p->p_comm);
 	if (td->td_proc->p_flag & P_SA)
 		thread_switchout(td);
-	sched_switch(td);
+	sched_switch(td, flags);
 
 	CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,
 	    p->p_comm);

==== //depot/projects/nsched/sys/kern/kern_thr.c#4 (text+ko) ====

@@ -101,7 +101,7 @@
 	sched_exit_thread(p->p_pptr, td);
 	thread_stash(td);
 
-	cpu_throw(td, choosethread());
+	cpu_throw(td, choosethread(SW_VOL));
 }
 
 #define	RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
@@ -141,6 +141,7 @@
 	td0->td_proc = td->td_proc;
 	PROC_LOCK(td->td_proc);
 	td0->td_sigmask = td->td_sigmask;
+	td->td_proc->p_flag |= P_HADTHREADS;
 	PROC_UNLOCK(td->td_proc);
 	td0->td_ucred = crhold(td->td_ucred);
 

==== //depot/projects/nsched/sys/kern/kern_thread.c#11 (text+ko) ====

@@ -395,7 +395,11 @@
 }
 
 /*
- * Reap zombie kse resource.
+ * Reap zombie thread resources.
+ * These include threads & ksegrps that were still live and in use
+ * at they were exiting, and possibly other structures associated
+ * with them by the threading and scheduling modules that could not
+ * be freed until the running thread had completly stopped running.
  */
 void
 thread_reap(void)
@@ -406,6 +410,8 @@
 	/*
 	 * Don't even bother to lock if none at this instant,
 	 * we really don't care about the next instant..
+	 * Note, never call the other GC routines unless we have a 
+	 * thread or ksegrp to clean up as well.
 	 */
 	if ((!TAILQ_EMPTY(&zombie_threads))
 	    || (!TAILQ_EMPTY(&zombie_ksegrps))) {
@@ -522,11 +528,24 @@
 
 /*
  * Discard the current thread and exit from its context.
+ * Always called with scheduler locked.
  *
  * Because we can't free a thread while we're operating under its context,
  * push the current thread into our CPU's deadthread holder. This means
  * we needn't worry about someone else grabbing our context before we
- * do a cpu_throw().
+ * do a cpu_throw().  This may not be needed now as we are under schedlock.
+ * Maybe we can just do a thread_stash() as thr_exit1 does.
+ */
+/*  XXX
+ * libthr expects its thread exit to return for the last
+ * thread, meaning that the program is back to non-threaded
+ * mode I guess. Because we do this (cpu_throw) unconditionally
+ * here, they have their own version of it. (thr_exit1()) 
+ * that doesn't do it all if this was the last thread.
+ * It is also called from thread_suspend_check().
+ * Of course in the end, they end up coming here through exit1
+ * anyhow..  After fixing 'thr' to play by the rules we should be able 
+ * to merge these two functions together.
  */
 void
 thread_exit(void)
@@ -556,48 +575,59 @@
 	/*
 	 * The last thread is left attached to the process
 	 * So that the whole bundle gets recycled. Skip
-	 * all this stuff.
+	 * all this stuff if we never had threads.
 	 */
-	if (p->p_numthreads > 1) {
-		thread_unlink(td);
-		if (p->p_maxthrwaits)
-			wakeup(&p->p_numthreads);
-		/*
-		 * The test below is NOT true if we are the
-		 * sole exiting thread. P_STOPPED_SINGLE is unset
-		 * in exit1() after it is the only survivor.
-		 */
-		if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
-			if (p->p_numthreads == p->p_suspcount) {
-				thread_unsuspend_one(p->p_singlethread);
+	if (p->p_flag & P_HADTHREADS) {
+		if (p->p_numthreads > 1) {
+			thread_unlink(td);
+			if (p->p_maxthrwaits)
+				wakeup(&p->p_numthreads);
+			/*
+			 * The test below is NOT true if we are the
+			 * sole exiting thread. P_STOPPED_SINGLE is unset
+			 * in exit1() after it is the only survivor.
+			 */
+			if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
+				if (p->p_numthreads == p->p_suspcount) {
+					thread_unsuspend_one(p->p_singlethread);
+				}
 			}
-		}
 
-		/*
-		 * Because each upcall structure has an owner thread,
-		 * owner thread exits only when process is in exiting
-		 * state, so upcall to userland is no longer needed,
-		 * deleting upcall structure is safe here.
-		 * So when all threads in a group is exited, all upcalls
-		 * in the group should be automatically freed.
-		 */
-		if (td->td_upcall)
-			upcall_remove(td);
+			/*
+			 * Because each upcall structure has an owner thread,
+			 * owner thread exits only when process is in exiting
+			 * state, so upcall to userland is no longer needed,
+			 * deleting upcall structure is safe here.
+			 * So when all threads in a group is exited, all upcalls
+			 * in the group should be automatically freed.
+			 *  XXXKSE This is a KSE thing and should be exported
+			 * there somehow.
+			 */
+			if (td->td_upcall)
+				upcall_remove(td);
 
-		sched_exit_thread(td->td_proc->p_pptr, td);
-		PROC_UNLOCK(p);
-		td->td_state	= TDS_INACTIVE;
-#if 0
-		td->td_proc	= NULL;
-#endif
-		td->td_ksegrp	= NULL;
-		PCPU_SET(deadthread, td);
-	} else {
-		PROC_UNLOCK(p);
+			if (kg->kg_numthreads == 0) {
+				/* This kseg is kaput */
+				sched_set_concurrancy(kg, 0);
+				ksegrp_unlink(kg);
+			}
+			
+			sched_exit_thread(td->td_proc->p_pptr, td);
+			td->td_state	= TDS_INACTIVE;
+	#if 0
+			td->td_proc	= NULL;
+	#endif
+			td->td_ksegrp	= NULL;
+			PCPU_SET(deadthread, td);
+		} else {
+			if (p->p_numthreads == 1 ) {
+				sched_set_concurrancy(kg, 1);
+			}
+		}
 	}
-	/* XXX Shouldn't cpu_throw() here. */
+	PROC_UNLOCK(p);
 	mtx_assert(&sched_lock, MA_OWNED);
-	cpu_throw(td, choosethread());
+	cpu_throw(td, choosethread(SW_VOL));
 	panic("I'm a teapot!");
 	/* NOTREACHED */
 }

==== //depot/projects/nsched/sys/kern/sched_4bsd.c#6 (text+ko) ====

@@ -148,6 +148,7 @@
 	int		skg_runq_kses;	/* (j) Num KSEs on runq. */
 	int		skg_idle_kses;	/* (j) Num KSEs on iq. */
 	int		skg_kses;	/* (j) Num KSEs in group. */
+	int		skg_concurrancy; /* (j) desired concurrancy */
 }; 
 #define	kg_kseq			kg_sched->skg_kseq
 #define	kg_iq			kg_sched->skg_iq
@@ -751,7 +752,7 @@
 }
 
 void
-sched_switch(struct thread *td)
+sched_switch(struct thread *td, int flags)
 {
 	struct thread *newtd;
 	struct kse *ke;
@@ -785,7 +786,7 @@
 		 */
 		kse_reassign(ke);
 	}
-	newtd = choosethread();
+	newtd = choosethread(flags);
 	if (td != newtd)
 		cpu_switch(td, newtd);
 	sched_lock.mtx_lock = (uintptr_t)td;
@@ -1297,12 +1298,28 @@
 		childtd->td_last_kse	= NULL;
 }
 
+/* 
+ * Whenever we have idle KSEs and there are too many for the concurrancy,
+ * then free as many as we can.
+ */
+#define REDUCE_KSES(skg) 					\
+do {								\
+	while ((skg->skg_concurrancy < skg->skg_kses) &&	\
+    	(skg->skg_idle_kses > 0)) {				\
+		kse_unlink(TAILQ_FIRST(&skg->skg_iq));		\
+	}							\
+} while (0)
+	
 void
 sched_set_concurrancy(struct ksegrp *kg, int concurrancy)
 {
 	struct kse *newke;
+	struct kg_sched *skg;
 
-	while (kg->kg_kses < concurrancy) {
+	skg = kg->kg_sched;
+	skg->skg_concurrancy = concurrancy;
+	REDUCE_KSES(skg);
+	while (skg->skg_kses < skg->skg_concurrancy) {
 		newke = kse_alloc();
 		bzero(&newke->ke_startzero, RANGEOF(struct kse,
 		      ke_startzero, ke_endzero));
@@ -1426,7 +1443,7 @@
  * if the switch is voluntary or involuntary.
  */
 struct thread *
-choosethread(void)
+choosethread(int flags)
 {
 	struct kse *ke;
 	struct thread *td;
@@ -1445,7 +1462,16 @@
 #endif
 
 retry:
-	ke = sched_choose();
+	kg = curthread->td_ksegrp;
+#if 0
+	if (flags & SW_VOL) {
+		if (kg->kg_runnable) {
+			td = TAILQ_FIRST(&kg->kg_runq);
+		}
+	}
+	if (ke == NULL)
+#endif
+		ke = sched_choose();
 	if (ke) {
 		td = ke->ke_thread;
 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
@@ -1528,6 +1554,7 @@
 	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
 	kg->kg_idle_kses++;
 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
+	REDUCE_KSES(kg->kg_sched); /* if we are closing down discard it */
 	return;
 }
 

==== //depot/projects/nsched/sys/sys/proc.h#6 (text+ko) ====

@@ -532,9 +532,7 @@
 
 #define	NOCPU	0xff		/* For when we aren't on a CPU. (SMP) */
 
-/* Status values (p_stat). */
-
-/* These flags are kept in p_flag. */
+/* These flags are kept in p_flag. Only change under PROC LOCK */
 #define	P_ADVLOCK	0x00001	/* Process may hold a POSIX advisory lock. */
 #define	P_CONTROLT	0x00002	/* Has a controlling terminal. */
 #define	P_KTHREAD	0x00004	/* Kernel thread. (*)*/
@@ -542,6 +540,7 @@
 #define	P_PPWAIT	0x00010	/* Parent is waiting for child to exec/exit. */
 #define	P_PROFIL	0x00020	/* Has started profiling. */
 #define	P_STOPPROF	0x00040	/* Has thread in requesting to stop prof */
+#define	P_HADTHREADS	0x00080	/* Has had threads (no cleanup shortcuts) */
 #define	P_SUGID		0x00100	/* Had set id privileges since last exec. */
 #define	P_SYSTEM	0x00200	/* System proc: no sigs, stats or swapping. */
 #define	P_SINGLE_EXIT	0x00400	/* Threads suspending should exit, not wait. */
@@ -739,7 +738,7 @@
 
 void	adjustrunqueue(struct thread *, int newpri);
 void	ast(struct trapframe *framep);
-struct	thread *choosethread(void);
+struct	thread *choosethread(int flags);
 int	cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
 int	enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess);
 int	enterthispgrp(struct proc *p, struct pgrp *pgrp);

==== //depot/projects/nsched/sys/sys/sched.h#4 (text+ko) ====

@@ -67,7 +67,7 @@
 fixpt_t	sched_pctcpu(struct thread *td);
 void	sched_prio(struct thread *td, u_char prio);
 void	sched_sleep(struct thread *td);
-void	sched_switch(struct thread *td);
+void	sched_switch(struct thread *td, int flags);
 void	sched_userret(struct thread *td);
 void	sched_wakeup(struct thread *td);
 


More information about the p4-projects mailing list