PERFORCE change 103631 for review

Roman Divacky rdivacky at FreeBSD.org
Fri Aug 11 10:33:13 UTC 2006


http://perforce.freebsd.org/chv.cgi?CH=103631

Change 103631 by rdivacky at rdivacky_witten on 2006/08/11 10:33:01

	A bunch of bugs fixed from run with INVARIANTS/WITNESS
	
	o		in linux_proc_init shuffle the code to never malloc with locks held
	o		in linux_proc_exit unlock before doing futex stuff
	o		in linux_proc_exit move free-ing of shared after removing thread from list there
	o		the same in linux_proc_exec + addition of thread removal from list
	o		in linux_schedtail unlock before copyout
	o		in linux_set_tid_address change the exit_group -> set_tid_address in debug
	o		in linux_exit_group remove the debuging code whicih let only 10 threads to be killed
			also change that to use psignal() instead of kill()

Affected files ...

.. //depot/projects/soc2006/rdivacky_linuxolator/i386/linux/linux_machdep.c#38 edit

Differences ...

==== //depot/projects/soc2006/rdivacky_linuxolator/i386/linux/linux_machdep.c#38 (text+ko) ====

@@ -1167,6 +1167,18 @@
 	   	/* non-exec call */
    		MALLOC(em, struct linux_emuldata *, sizeof *em, M_LINUX, M_WAITOK | M_ZERO);
 		em->pid = child;
+		if (flags & CLONE_VM) {
+		   	/* handled later */
+		} else {
+		   	struct linux_emuldata_shared *s;
+
+   			MALLOC(s, struct linux_emuldata_shared *, sizeof *s, M_LINUX, M_WAITOK | M_ZERO);
+			em->shared = s;
+			s->refs = 1;
+			s->group_pid = child;
+
+			LIST_INIT(&s->threads);
+		}
 		EMUL_WLOCK(&emul_lock);
 		SLIST_INSERT_HEAD(&emuldata_head, em, emuldatas);
 	} else {
@@ -1190,7 +1202,6 @@
 	 * the newly created proc
 	 */
 	if (child != 0) {
-   	   	em->shared = NULL;
    	   	if (flags & CLONE_VM) {
    		   	/* lookup the parent */
 		   	p_em = em_find(td->td_proc->p_pid, EMUL_LOCKED);
@@ -1204,14 +1215,7 @@
 				em->shared->refs++;
 			}
 		} else {
-	   		struct linux_emuldata_shared *s;
-
-   			MALLOC(s, struct linux_emuldata_shared *, sizeof *s, M_LINUX, M_WAITOK | M_ZERO);
-			em->shared = s;
-			s->refs = 1;
-			s->group_pid = child;
-
-			LIST_INIT(&s->threads);
+		   	/* handled earlier to avoid malloc(M_WAITOK) with rwlock held */
 		}
 	}
 
@@ -1236,6 +1240,7 @@
    	struct linux_emuldata *em;
 	int error;
 	struct thread *td = FIRST_THREAD_IN_PROC(p);
+	int *child_clear_tid;
 
 	if (p->p_sysent != &elf_linux_sysvec)
 	   	return;
@@ -1249,18 +1254,36 @@
 #endif
 		return;
 	}
-	if (em->child_clear_tid != NULL) {
+
+	child_clear_tid = em->child_clear_tid;
+	
+	EMUL_RUNLOCK(&emul_lock);
+	/* XXX: there is a race but I think we can ommit that
+	 * because its not very possible that the same process 
+	 * will exit on different cpus etc.
+	 */
+	EMUL_WLOCK(&emul_lock);
+	LIST_REMOVE(em, threads);
+	SLIST_REMOVE(&emuldata_head, em, linux_emuldata, emuldatas);
+
+	em->shared->refs--;
+	if (em->shared->refs == 0)
+		FREE(em->shared, M_LINUX);
+	EMUL_WUNLOCK(&emul_lock);
+
+	if (child_clear_tid != NULL) {
 	   	struct linux_sys_futex_args cup;
 		int null = 0;
 
-		error = copyout(&null, em->child_clear_tid, sizeof(null));
+		/* XXX: doesnt futex use the addr? */
+		error = copyout(&null, child_clear_tid, sizeof(null));
 		if (error) {
 		   	EMUL_RUNLOCK(&emul_lock);
 		   	return;
 		}
 
 		/* futexes stuff */
-		cup.uaddr = em->child_clear_tid;
+		cup.uaddr = child_clear_tid;
 		cup.op = LINUX_FUTEX_WAKE;
 		cup.val = 0x7fffffff; /* Awake everyone */
 		cup.timeout = NULL;
@@ -1271,20 +1294,6 @@
 		   	printf(LMSG("futex stuff in proc_exit failed.\n"));
 	}
 
-	em->shared->refs--;
-	if (em->shared->refs == 0)
-		FREE(em->shared, M_LINUX);
-
-	EMUL_RUNLOCK(&emul_lock);
-	/* XXX: there is a race but I think we can ommit that
-	 * because its not very possible that the same process 
-	 * will exit on different cpus etc.
-	 */
-	EMUL_WLOCK(&emul_lock);
-	LIST_REMOVE(em, threads);
-	SLIST_REMOVE(&emuldata_head, em, linux_emuldata, emuldatas);
-	EMUL_WUNLOCK(&emul_lock);
-
 	/* clean the stuff up */
 	FREE(em, M_LINUX);
 }
@@ -1311,10 +1320,6 @@
 #endif
 			return;			
 		}
-
-		em->shared->refs--;
-		if (em->shared->refs == 0)
-		   	FREE(em->shared, M_LINUX);
 		
 		EMUL_RUNLOCK(&emul_lock);
 		/* XXX: there is a race but I think we can ommit that
@@ -1322,7 +1327,12 @@
 		 * will exit on different cpus etc.
 		 */
 		EMUL_WLOCK(&emul_lock);
+		LIST_REMOVE(em, threads);
 		SLIST_REMOVE(&emuldata_head, em, linux_emuldata, emuldatas);
+
+		em->shared->refs--;
+		if (em->shared->refs == 0)
+		   	FREE(em->shared, M_LINUX);
 		EMUL_WUNLOCK(&emul_lock);
 
 		FREE(em, M_LINUX);
@@ -1339,6 +1349,7 @@
 #ifdef	DEBUG
 	struct thread *td = FIRST_THREAD_IN_PROC(p);
 #endif
+	int *child_set_tid;
 
 	if (p->p_sysent != &elf_linux_sysvec)
 	   	return;
@@ -1360,11 +1371,12 @@
 #endif
 		return;
 	}
+	child_set_tid = em->child_set_tid;
+	EMUL_RUNLOCK(&emul_lock);
 
-	if (em->child_set_tid != NULL)
-	   	error = copyout(&p->p_pid, em->child_set_tid, sizeof(p->p_pid));
+	if (child_set_tid != NULL)
+	   	error = copyout(&p->p_pid, (int *) child_set_tid, sizeof(p->p_pid));
 
-	EMUL_RUNLOCK(&emul_lock);
 	return;
 }
 
@@ -1374,7 +1386,7 @@
    	struct linux_emuldata *em;
 
 #ifdef DEBUG
-	if (ldebug(exit_group))
+	if (ldebug(set_tid_address))
 		printf(ARGS(set_tid_address, "%p"), args->tidptr);
 #endif
 
@@ -1398,10 +1410,8 @@
 int
 linux_exit_group(struct thread *td, struct linux_exit_group_args *args)
 {
-   	struct linux_emuldata *em, *td_em;
+   	struct linux_emuldata *em, *td_em, *tmp_em;
 	struct proc *sp;
-	struct kill_args ka;
-	int i = 0;
 
 #ifdef DEBUG
 	if (ldebug(exit_group))
@@ -1417,23 +1427,16 @@
 		return (0);
 	}
 
-     	LIST_FOREACH(em, &td_em->shared->threads, threads) {
-		if (i++ > 10)
-		   	break;
-
+     	LIST_FOREACH_SAFE(em, &td_em->shared->threads, threads, tmp_em) {
 	   	if (em->pid == td_em->pid)
 		   	continue;
 
 		sp = pfind(em->pid);
+		psignal(sp, SIGKILL);
 		PROC_UNLOCK(sp);
 #ifdef DEBUG
 		printf(LMSG("linux_sys_exit_group: kill PID %d\n"), em->pid);
 #endif
-		ka.pid = em->pid;
-		ka.signum = SIGKILL;
-		/* XXX: ehm? */
-		kill(FIRST_THREAD_IN_PROC(sp), &ka);
-
 	}
 
 	EMUL_RUNLOCK(&emul_lock);


More information about the p4-projects mailing list