PERFORCE change 167763 for review

Marko Zec zec at FreeBSD.org
Tue Aug 25 01:12:44 UTC 2009


http://perforce.freebsd.org/chv.cgi?CH=167763

Change 167763 by zec at zec_tpx32 on 2009/08/25 01:12:13

	Introduce a separate sx lock for protecting lists of vnet
	sysinit and sysuninit handlers.
	
	Previously, sx_vnet, which is a lock designated for protecting
	the vnet list, was (ab)used for protecting vnet sysinit /
	sysuninit handler lists as well.  Holding exclusively the
	sx_vnet lock while invoking sysinit and / or sysuninit
	handlers turns out to be problematic, since some of the
	handlers (for whatever reason, justified or not) may want
	to wake up and wait for another thread to walk over the
	vnet list, hence acquire a shared lock on sx_vnet.  This
	in turn leads to a deadlock, given that the thread holding
	the exclusive lock on sx_vnet waits for another thread which
	is blocked on attempt to acquire shared lock on sx_vnet.
	Protecting vnet sysinit / sysuninit lists with a separate
	lock mitigates this issue, which was first observed in
	flowtable_flush() / flowtable_cleaner().

Affected files ...

.. //depot/projects/vimage-commit2/src/sys/net/vnet.c#4 edit

Differences ...

==== //depot/projects/vimage-commit2/src/sys/net/vnet.c#4 (text+ko) ====

@@ -182,9 +182,10 @@
 
 /*
  * Global lists of subsystem constructor and destructors for vnets.  They are
- * registered via VNET_SYSINIT() and VNET_SYSUNINIT().  The lists are
- * protected by the vnet_sxlock global lock.
+ * registered via VNET_SYSINIT() and VNET_SYSUNINIT().  Both lists are
+ * protected by the vnet_sysinit_sxlock global lock.
  */
+struct sx		vnet_sysinit_sxlock;
 static TAILQ_HEAD(vnet_sysinit_head, vnet_sysinit) vnet_constructors =
 	TAILQ_HEAD_INITIALIZER(vnet_constructors);
 static TAILQ_HEAD(vnet_sysuninit_head, vnet_sysinit) vnet_destructors =
@@ -228,12 +229,12 @@
 
 	/* Initialize / attach vnet module instances. */
 	CURVNET_SET_QUIET(vnet);
-
-	sx_xlock(&vnet_sxlock);
+	sx_slock(&vnet_sysinit_sxlock);	
 	vnet_sysinit();
+	sx_sunlock(&vnet_sysinit_sxlock);	
 	CURVNET_RESTORE();
 
-	rw_wlock(&vnet_rwlock);
+	VNET_LIST_WLOCK();
 	LIST_INSERT_HEAD(&vnet_head, vnet, vnet_le);
 	VNET_LIST_WUNLOCK();
 
@@ -253,7 +254,7 @@
 
 	VNET_LIST_WLOCK();
 	LIST_REMOVE(vnet, vnet_le);
-	rw_wunlock(&vnet_rwlock);
+	VNET_LIST_WUNLOCK();
 
 	CURVNET_SET_QUIET(vnet);
 
@@ -263,9 +264,9 @@
 			if_vmove(ifp, ifp->if_home_vnet);
 	}
 
+	sx_slock(&vnet_sysinit_sxlock);	
 	vnet_sysuninit();
-	sx_xunlock(&vnet_sxlock);
-
+	sx_sunlock(&vnet_sysinit_sxlock);	
 	CURVNET_RESTORE();
 
 	/*
@@ -287,6 +288,7 @@
 
 	rw_init(&vnet_rwlock, "vnet_rwlock");
 	sx_init(&vnet_sxlock, "vnet_sxlock");
+	sx_init(&vnet_sysinit_sxlock, "vnet_sysinit_sxlock");
 	LIST_INIT(&vnet_head);
 }
 SYSINIT(vnet_init_prelink, SI_SUB_VNET_PRELINK, SI_ORDER_FIRST,
@@ -494,7 +496,7 @@
 	KASSERT(vs->subsystem > SI_SUB_VNET, ("vnet sysinit too early"));
 
 	/* Add the constructor to the global list of vnet constructors. */
-	sx_xlock(&vnet_sxlock);
+	sx_xlock(&vnet_sysinit_sxlock);
 	TAILQ_FOREACH(vs2, &vnet_constructors, link) {
 		if (vs2->subsystem > vs->subsystem)
 			break;
@@ -515,7 +517,7 @@
 		vs->func(vs->arg);
 		CURVNET_RESTORE();
 	}
-	sx_xunlock(&vnet_sxlock);
+	sx_xunlock(&vnet_sysinit_sxlock);
 }
 
 void
@@ -526,9 +528,9 @@
 	vs = arg;
 
 	/* Remove the constructor from the global list of vnet constructors. */
-	sx_xlock(&vnet_sxlock);
+	sx_xlock(&vnet_sysinit_sxlock);
 	TAILQ_REMOVE(&vnet_constructors, vs, link);
-	sx_xunlock(&vnet_sxlock);
+	sx_xunlock(&vnet_sysinit_sxlock);
 }
 
 void
@@ -539,7 +541,7 @@
 	vs = arg;
 
 	/* Add the destructor to the global list of vnet destructors. */
-	sx_xlock(&vnet_sxlock);
+	sx_xlock(&vnet_sysinit_sxlock);
 	TAILQ_FOREACH(vs2, &vnet_destructors, link) {
 		if (vs2->subsystem > vs->subsystem)
 			break;
@@ -550,7 +552,7 @@
 		TAILQ_INSERT_BEFORE(vs2, vs, link);
 	else
 		TAILQ_INSERT_TAIL(&vnet_destructors, vs, link);
-	sx_xunlock(&vnet_sxlock);
+	sx_xunlock(&vnet_sysinit_sxlock);
 }
 
 void
@@ -565,7 +567,7 @@
 	 * Invoke the destructor on all the existing vnets when it is
 	 * deregistered.
 	 */
-	sx_xlock(&vnet_sxlock);
+	sx_xlock(&vnet_sysinit_sxlock);
 	VNET_FOREACH(vnet) {
 		CURVNET_SET_QUIET(vnet);
 		vs->func(vs->arg);
@@ -574,20 +576,20 @@
 
 	/* Remove the destructor from the global list of vnet destructors. */
 	TAILQ_REMOVE(&vnet_destructors, vs, link);
-	sx_xunlock(&vnet_sxlock);
+	sx_xunlock(&vnet_sysinit_sxlock);
 }
 
 /*
  * Invoke all registered vnet constructors on the current vnet.  Used during
  * vnet construction.  The caller is responsible for ensuring the new vnet is
- * the current vnet and that the vnet_sxlock lock is locked.
+ * the current vnet and that the vnet_sysinit_sxlock lock is locked.
  */
 void
 vnet_sysinit(void)
 {
 	struct vnet_sysinit *vs;
 
-	sx_assert(&vnet_sxlock, SA_LOCKED);
+	sx_assert(&vnet_sysinit_sxlock, SA_LOCKED);
 	TAILQ_FOREACH(vs, &vnet_constructors, link) {
 		vs->func(vs->arg);
 	}
@@ -596,14 +598,14 @@
 /*
  * Invoke all registered vnet destructors on the current vnet.  Used during
  * vnet destruction.  The caller is responsible for ensuring the dying vnet
- * is the current vnet and that the vnet_sxlock lock is locked.
+ * the current vnet and that the vnet_sysinit_sxlock lock is locked.
  */
 void
 vnet_sysuninit(void)
 {
 	struct vnet_sysinit *vs;
 
-	sx_assert(&vnet_sxlock, SA_LOCKED);
+	sx_assert(&vnet_sysinit_sxlock, SA_LOCKED);
 	TAILQ_FOREACH_REVERSE(vs, &vnet_destructors, vnet_sysuninit_head,
 	    link) {
 		vs->func(vs->arg);


More information about the p4-projects mailing list