PERFORCE change 78769 for review

John Baldwin jhb at FreeBSD.org
Tue Jun 21 17:31:47 GMT 2005


http://perforce.freebsd.org/chv.cgi?CH=78769

Change 78769 by jhb at jhb_slimer on 2005/06/21 17:31:03

	Revert some changes that aren't really needed and are just cluttering
	up this branch.

Affected files ...

.. //depot/projects/smpng/sys/i386/i386/machdep.c#95 edit
.. //depot/projects/smpng/sys/i386/i386/sys_machdep.c#42 edit
.. //depot/projects/smpng/sys/i386/i386/vm_machdep.c#67 edit
.. //depot/projects/smpng/sys/notes#46 edit

Differences ...

==== //depot/projects/smpng/sys/i386/i386/machdep.c#95 (text+ko) ====

@@ -1156,11 +1156,8 @@
 	pcb->pcb_gs = _udatasel;
 	load_gs(_udatasel);
 
-	mtx_lock_spin(&sched_lock);
 	if (td->td_proc->p_md.md_ldt)
 		user_ldt_free(td);
-	else
-		mtx_unlock_spin(&sched_lock);
   
 	bzero((char *)regs, sizeof(struct trapframe));
 	regs->tf_eip = entry;

==== //depot/projects/smpng/sys/i386/i386/sys_machdep.c#42 (text+ko) ====

@@ -384,7 +384,7 @@
 #endif
 
 /*
- * Must be called with sched_lock held but not recursed.
+ * Must be called with either sched_lock free or held but not recursed.
  * If it does not return NULL, it will return with it owned.
  */
 struct proc_ldt *
@@ -392,8 +392,9 @@
 {
 	struct proc_ldt *pldt, *new_ldt;
 
-	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
-	mtx_unlock_spin(&sched_lock);
+	if (mtx_owned(&sched_lock))
+		mtx_unlock_spin(&sched_lock);
+	mtx_assert(&sched_lock, MA_NOTOWNED);
 	MALLOC(new_ldt, struct proc_ldt *, sizeof(struct proc_ldt),
 		M_SUBPROC, M_WAITOK);
 
@@ -424,7 +425,7 @@
 }
 
 /*
- * Must be called with sched_lock held but not recursed.
+ * Must be called either with sched_lock free or held but not recursed.
  * If md_ldt is not NULL, it will return with sched_lock released.
  */
 void
@@ -436,6 +437,8 @@
 	if (pldt == NULL)
 		return;
 
+	if (!mtx_owned(&sched_lock))
+		mtx_lock_spin(&sched_lock);
 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
 	if (td == PCPU_GET(curthread)) {
 		lldt(_default_ldt);
@@ -695,8 +698,7 @@
 	if (len < NLDT + 1)
 		len = NLDT + 1;
 
-	/* allocate user ldt */
-	mtx_lock_spin(&sched_lock);
+	/* Allocate a user ldt. */
 	pldt = mdp->md_ldt;
 	if (!pldt || len > pldt->ldt_len) {
 		struct proc_ldt *new_ldt;
@@ -705,6 +707,8 @@
 		if (new_ldt == NULL)
 			return (ENOMEM);
 		pldt = mdp->md_ldt;
+
+		/* sched_lock was acquired by user_ldt_alloc. */
 		if (pldt) {
 			if (new_ldt->ldt_len > pldt->ldt_len) {
 				old_ldt_base = pldt->ldt_base;
@@ -720,7 +724,7 @@
 			} else {
 				/*
 				 * If other threads already did the work,
-				 * do nothing
+				 * do nothing.
 				 */
 				mtx_unlock_spin(&sched_lock);
 				kmem_free(kernel_map,
@@ -741,7 +745,6 @@
 		set_user_ldt(mdp);
 		mtx_unlock_spin(&sched_lock);
 #endif
-	} else
-		mtx_unlock_spin(&sched_lock);
+	}
 	return (0);
 }

==== //depot/projects/smpng/sys/i386/i386/vm_machdep.c#67 (text+ko) ====

@@ -155,7 +155,6 @@
 			struct mdproc *mdp1 = &p1->p_md;
 			struct proc_ldt *pldt;
 
-			mtx_lock_spin(&sched_lock);
 			pldt = mdp1->md_ldt;
 			if (pldt && pldt->ldt_refcnt > 1) {
 				pldt = user_ldt_alloc(mdp1, pldt->ldt_len);
@@ -164,8 +163,7 @@
 				mdp1->md_ldt = pldt;
 				set_user_ldt(mdp1);
 				user_ldt_free(td1);
-			} else
-				mtx_unlock_spin(&sched_lock);
+			}
 		}
 		return;
 	}
@@ -300,7 +298,6 @@
 cpu_exit(struct thread *td)
 {
 
-	mtx_lock_spin(&sched_lock);
 	if (td->td_proc->p_md.md_ldt) {
 
 		/* Reset pc->pcb_gs and %gs before invalidating it. */
@@ -308,15 +305,13 @@
 		load_gs(_udatasel);
 
 		user_ldt_free(td);
-	} else
-		mtx_unlock_spin(&sched_lock);
+	}
 }
 
 void
 cpu_thread_exit(struct thread *td)
 {
 
-
 #ifdef DEV_NPX
 	npxexit(td);
 #endif

==== //depot/projects/smpng/sys/notes#46 (text+ko) ====

@@ -3,10 +3,6 @@
 
 - Consolidate linux module version and dependencies into MI location.
   - Untested
-- Redo sched_lock locking for i386 ldt to not use mtx_owned().  This is
-  the only user of mtx_owned() on spin locks and keeps us from optimizing
-  spin locks on UP kernels to only do critical sections.
-  - Untested
 - Fixup asm constraints to use '+' rather than declaring a variable in both
   the input and output sections with '0', etc. in both the at386 and pc98
   bus headers.  Also removes bogus 'cc' clobbers.  gcc already clobbers 'cc'


More information about the p4-projects mailing list