PERFORCE change 159233 for review

Julian Elischer julian at FreeBSD.org
Sun Mar 15 00:43:34 PDT 2009


http://perforce.freebsd.org/chv.cgi?CH=159233

Change 159233 by julian at julian_trafmon1 on 2009/03/15 07:42:35

	IFC at 159229

Affected files ...

.. //depot/projects/vimage/src/sys/dev/pci/pci_pci.c#14 integrate
.. //depot/projects/vimage/src/sys/kern/kern_thread.c#21 integrate
.. //depot/projects/vimage/src/sys/kern/subr_lock.c#13 integrate
.. //depot/projects/vimage/src/sys/sys/lock_profile.h#9 integrate

Differences ...

==== //depot/projects/vimage/src/sys/dev/pci/pci_pci.c#14 (text+ko) ====

@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/dev/pci/pci_pci.c,v 1.57 2009/03/14 14:08:53 imp Exp $");
+__FBSDID("$FreeBSD: src/sys/dev/pci/pci_pci.c,v 1.58 2009/03/15 06:40:57 imp Exp $");
 
 /*
  * PCI:PCI bridge support.
@@ -413,12 +413,14 @@
 			}
 		} else {
 			ok = 1;
+#if 0
 			/*
 			 * If we overlap with the subtractive range, then
 			 * pick the upper range to use.
 			 */
 			if (start < sc->iolimit && end > sc->iobase)
 				start = sc->iolimit + 1;
+#endif
 		}
 		if (end < start) {
 			device_printf(dev, "ioport: end (%lx) < start (%lx)\n",
@@ -478,6 +480,7 @@
 			}
 		} else if (!ok) {
 			ok = 1;	/* subtractive bridge: always ok */
+#if 0
 			if (pcib_is_nonprefetch_open(sc)) {
 				if (start < sc->memlimit && end > sc->membase)
 					start = sc->memlimit + 1;
@@ -486,6 +489,7 @@
 				if (start < sc->pmemlimit && end > sc->pmembase)
 					start = sc->pmemlimit + 1;
 			}
+#endif
 		}
 		if (end < start) {
 			device_printf(dev, "memory: end (%lx) < start (%lx)\n",

==== //depot/projects/vimage/src/sys/kern/kern_thread.c#21 (text+ko) ====

@@ -29,7 +29,7 @@
 #include "opt_witness.h"
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_thread.c,v 1.282 2008/11/17 20:49:29 pjd Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_thread.c,v 1.283 2009/03/15 06:41:47 jeff Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -306,6 +306,8 @@
 void
 thread_free(struct thread *td)
 {
+
+	lock_profile_thread_exit(td);
 	if (td->td_cpuset)
 		cpuset_rel(td->td_cpuset);
 	td->td_cpuset = NULL;
@@ -439,6 +441,7 @@
 	/* Wait for any remaining threads to exit cpu_throw(). */
 	while (p->p_exitthreads)
 		sched_relinquish(curthread);
+	lock_profile_thread_exit(td);
 	cpuset_rel(td->td_cpuset);
 	td->td_cpuset = NULL;
 	cpu_thread_clean(td);

==== //depot/projects/vimage/src/sys/kern/subr_lock.c#13 (text+ko) ====

@@ -33,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/subr_lock.c,v 1.24 2008/07/27 21:45:20 kmacy Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/subr_lock.c,v 1.25 2009/03/15 06:41:47 jeff Exp $");
 
 #include "opt_ddb.h"
 #include "opt_mprof.h"
@@ -46,9 +46,11 @@
 #include <sys/lock.h>
 #include <sys/lock_profile.h>
 #include <sys/malloc.h>
+#include <sys/mutex.h>
 #include <sys/pcpu.h>
 #include <sys/proc.h>
 #include <sys/sbuf.h>
+#include <sys/sched.h>
 #include <sys/smp.h>
 #include <sys/sysctl.h>
 
@@ -186,7 +188,8 @@
 
 struct lock_prof_cpu *lp_cpu[MAXCPU];
 
-int lock_prof_enable = 0;
+volatile int lock_prof_enable = 0;
+static volatile int lock_prof_resetting;
 
 /* SWAG: sbuf size = avg stat. line size * number of locks */
 #define LPROF_SBUF_SIZE		256 * 400
@@ -239,25 +242,77 @@
 }
 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
 
+/*
+ * To be certain that lock profiling has idled on all cpus before we
+ * reset, we schedule the resetting thread on all active cpus.  Since
+ * all operations happen within critical sections we can be sure that
+ * it is safe to zero the profiling structures.
+ */
+static void
+lock_prof_idle(void)
+{
+	struct thread *td;
+	int cpu;
+
+	td = curthread;
+	thread_lock(td);
+	for (cpu = 0; cpu <= mp_maxid; cpu++) {
+		if (CPU_ABSENT(cpu))
+			continue;
+		sched_bind(td, cpu);
+	}
+	sched_unbind(td);
+	thread_unlock(td);
+}
+
+static void
+lock_prof_reset_wait(void)
+{
+
+	/*
+	 * Spin relinquishing our cpu so that lock_prof_idle may
+	 * run on it.
+	 */
+	while (lock_prof_resetting)
+		sched_relinquish(curthread);
+}
+
 static void
 lock_prof_reset(void)
 {
 	struct lock_prof_cpu *lpc;
 	int enabled, i, cpu;
 
+	/*
+	 * We not only race with acquiring and releasing locks but also
+	 * thread exit.  To be certain that threads exit without valid head
+	 * pointers they must see resetting set before enabled is cleared.
+	 * Otherwise a lock may not be removed from a per-thread list due
+	 * to disabled being set but not wait for reset() to remove it below.
+	 */
+	atomic_store_rel_int(&lock_prof_resetting, 1);
 	enabled = lock_prof_enable;
 	lock_prof_enable = 0;
-	pause("lpreset", hz / 10);
+	lock_prof_idle();
+	/*
+	 * Some objects may have migrated between CPUs.  Clear all links
+	 * before we zero the structures.  Some items may still be linked
+	 * into per-thread lists as well.
+	 */
 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
 		lpc = lp_cpu[cpu];
 		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
 			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
 			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
 		}
+	}
+	for (cpu = 0; cpu <= mp_maxid; cpu++) {
+		lpc = lp_cpu[cpu];
 		bzero(lpc, sizeof(*lpc));
 		lock_prof_init_type(&lpc->lpc_types[0]);
 		lock_prof_init_type(&lpc->lpc_types[1]);
 	}
+	atomic_store_rel_int(&lock_prof_resetting, 0);
 	lock_prof_enable = enabled;
 }
 
@@ -351,7 +406,7 @@
 	    "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
 	enabled = lock_prof_enable;
 	lock_prof_enable = 0;
-	pause("lpreset", hz / 10);
+	lock_prof_idle();
 	t = ticks;
 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
 		if (lp_cpu[cpu] == NULL)
@@ -461,16 +516,13 @@
 		if (l->lpo_obj == lo && l->lpo_file == file &&
 		    l->lpo_line == line)
 			return (l);
-	critical_enter();
 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
 	l = LIST_FIRST(&type->lpt_lpoalloc);
 	if (l == NULL) {
 		lock_prof_rejected++;
-		critical_exit();
 		return (NULL);
 	}
 	LIST_REMOVE(l, lpo_link);
-	critical_exit();
 	l->lpo_obj = lo;
 	l->lpo_file = file;
 	l->lpo_line = line;
@@ -497,18 +549,49 @@
 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
 	if (spin && lock_prof_skipspin == 1)
 		return;
+	critical_enter();
+	/* Recheck enabled now that we're in a critical section. */
+	if (lock_prof_enable == 0)
+		goto out;
 	l = lock_profile_object_lookup(lo, spin, file, line);
 	if (l == NULL)
-		return;
+		goto out;
 	l->lpo_cnt++;
 	if (++l->lpo_ref > 1)
-		return;
+		goto out;
 	l->lpo_contest_locking = contested;
 	l->lpo_acqtime = nanoseconds(); 
 	if (waittime && (l->lpo_acqtime > waittime))
 		l->lpo_waittime = l->lpo_acqtime - waittime;
 	else
 		l->lpo_waittime = 0;
+out:
+	critical_exit();
+}
+
+void
+lock_profile_thread_exit(struct thread *td)
+{
+#ifdef INVARIANTS
+	struct lock_profile_object *l;
+
+	MPASS(curthread->td_critnest == 0);
+#endif
+	/*
+	 * If lock profiling was disabled we have to wait for reset to
+	 * clear our pointers before we can exit safely.
+	 */
+	lock_prof_reset_wait();
+#ifdef INVARIANTS
+	LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
+		printf("thread still holds lock acquired at %s:%d\n",
+		    l->lpo_file, l->lpo_line);
+	LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
+		printf("thread still holds lock acquired at %s:%d\n",
+		    l->lpo_file, l->lpo_line);
+#endif
+	MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
+	MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
 }
 
 void
@@ -521,11 +604,20 @@
 	struct lpohead *head;
 	int spin;
 
-	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
+	if (lo->lo_flags & LO_NOPROFILE)
 		return;
 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
 	head = &curthread->td_lprof[spin];
+	if (LIST_FIRST(head) == NULL)
+		return;
 	critical_enter();
+	/* Recheck enabled now that we're in a critical section. */
+	if (lock_prof_enable == 0 && lock_prof_resetting == 1)
+		goto out;
+	/*
+	 * If lock profiling is not enabled we still want to remove the
+	 * lpo from our queue.
+	 */
 	LIST_FOREACH(l, head, lpo_link)
 		if (l->lpo_obj == lo)
 			break;

==== //depot/projects/vimage/src/sys/sys/lock_profile.h#9 (text+ko) ====

@@ -24,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: src/sys/sys/lock_profile.h,v 1.18 2007/12/16 06:07:34 kmacy Exp $
+ * $FreeBSD: src/sys/sys/lock_profile.h,v 1.19 2009/03/15 06:41:47 jeff Exp $
  */
 
 
@@ -43,11 +43,13 @@
 u_int64_t nanoseconds(void);
 #endif
 
-extern int lock_prof_enable;
+extern volatile int lock_prof_enable;
 
 void lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
     uint64_t waittime, const char *file, int line);
 void lock_profile_release_lock(struct lock_object *lo);
+void lock_profile_thread_exit(struct thread *td);
+
 
 static inline void
 lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested,
@@ -61,21 +63,10 @@
 
 #else /* !LOCK_PROFILING */
 
-static inline void
-lock_profile_release_lock(struct lock_object *lo)
-{
-}
-
-static inline void
-lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested, uint64_t *waittime)
-{
-}
-
-static inline void
-lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime,  
-    const char *file, int line)
-{
-}
+#define	lock_profile_release_lock(lo)
+#define lock_profile_obtain_lock_failed(lo, contested, waittime)
+#define lock_profile_obtain_lock_success(lo, contested, waittime, file, line)
+#define	lock_profile_thread_exit(td)
 
 #endif  /* !LOCK_PROFILING */
 


More information about the p4-projects mailing list