svn commit: r269356 - stable/10/sys/dev/cxgbe

Navdeep Parhar np at FreeBSD.org
Thu Jul 31 23:04:42 UTC 2014


Author: np
Date: Thu Jul 31 23:04:41 2014
New Revision: 269356
URL: http://svnweb.freebsd.org/changeset/base/269356

Log:
  MFC r268971 and r269032.
  
  r268971:
  Simplify r267600, there's no need to distinguish between allocated and
  inlined mbufs.
  
  r269032:
  cxgbe(4):  Keep track of the clusters that have to be freed by the
  custom free routine (rxb_free) in the driver.  Fail MOD_UNLOAD with
  EBUSY if any such cluster has been handed up to the kernel but hasn't
  been freed yet.  This prevents a panic later when the cluster finally
  needs to be freed but rxb_free is gone from the kernel.

Modified:
  stable/10/sys/dev/cxgbe/adapter.h
  stable/10/sys/dev/cxgbe/t4_main.c
  stable/10/sys/dev/cxgbe/t4_sge.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/dev/cxgbe/adapter.h
==============================================================================
--- stable/10/sys/dev/cxgbe/adapter.h	Thu Jul 31 22:32:39 2014	(r269355)
+++ stable/10/sys/dev/cxgbe/adapter.h	Thu Jul 31 23:04:41 2014	(r269356)
@@ -254,8 +254,7 @@ struct cluster_metadata {
 
 struct fl_sdesc {
 	caddr_t cl;
-	uint8_t nimbuf;		/* # of inline mbufs with ref on the cluster */
-	uint8_t nembuf;		/* # of allocated mbufs with ref */
+	uint16_t nmbuf;	/* # of driver originated mbufs with ref on cluster */
 	struct cluster_layout cll;
 };
 
@@ -852,6 +851,8 @@ void end_synchronized_op(struct adapter 
 
 /* t4_sge.c */
 void t4_sge_modload(void);
+void t4_sge_modunload(void);
+uint64_t t4_sge_extfree_refs(void);
 void t4_init_sge_cpl_handlers(struct adapter *);
 void t4_tweak_chip_settings(struct adapter *);
 int t4_read_chip_settings(struct adapter *);

Modified: stable/10/sys/dev/cxgbe/t4_main.c
==============================================================================
--- stable/10/sys/dev/cxgbe/t4_main.c	Thu Jul 31 22:32:39 2014	(r269355)
+++ stable/10/sys/dev/cxgbe/t4_main.c	Thu Jul 31 23:04:41 2014	(r269356)
@@ -8090,6 +8090,9 @@ tweak_tunables(void)
 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
 }
 
+static struct sx mlu;	/* mod load unload */
+SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
+
 static int
 mod_event(module_t mod, int cmd, void *arg)
 {
@@ -8098,41 +8101,67 @@ mod_event(module_t mod, int cmd, void *a
 
 	switch (cmd) {
 	case MOD_LOAD:
-		if (atomic_fetchadd_int(&loaded, 1))
-			break;
-		t4_sge_modload();
-		sx_init(&t4_list_lock, "T4/T5 adapters");
-		SLIST_INIT(&t4_list);
+		sx_xlock(&mlu);
+		if (loaded++ == 0) {
+			t4_sge_modload();
+			sx_init(&t4_list_lock, "T4/T5 adapters");
+			SLIST_INIT(&t4_list);
 #ifdef TCP_OFFLOAD
-		sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
-		SLIST_INIT(&t4_uld_list);
+			sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
+			SLIST_INIT(&t4_uld_list);
 #endif
-		t4_tracer_modload();
-		tweak_tunables();
+			t4_tracer_modload();
+			tweak_tunables();
+		}
+		sx_xunlock(&mlu);
 		break;
 
 	case MOD_UNLOAD:
-		if (atomic_fetchadd_int(&loaded, -1) > 1)
-			break;
-		t4_tracer_modunload();
+		sx_xlock(&mlu);
+		if (--loaded == 0) {
+			int tries;
+
+			sx_slock(&t4_list_lock);
+			if (!SLIST_EMPTY(&t4_list)) {
+				rc = EBUSY;
+				sx_sunlock(&t4_list_lock);
+				goto done_unload;
+			}
+#ifdef TCP_OFFLOAD
+			sx_slock(&t4_uld_list_lock);
+			if (!SLIST_EMPTY(&t4_uld_list)) {
+				rc = EBUSY;
+				sx_sunlock(&t4_uld_list_lock);
+				sx_sunlock(&t4_list_lock);
+				goto done_unload;
+			}
+#endif
+			tries = 0;
+			while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
+				uprintf("%ju clusters with custom free routine "
+				    "still is use.\n", t4_sge_extfree_refs());
+				pause("t4unload", 2 * hz);
+			}
 #ifdef TCP_OFFLOAD
-		sx_slock(&t4_uld_list_lock);
-		if (!SLIST_EMPTY(&t4_uld_list)) {
-			rc = EBUSY;
 			sx_sunlock(&t4_uld_list_lock);
-			break;
-		}
-		sx_sunlock(&t4_uld_list_lock);
-		sx_destroy(&t4_uld_list_lock);
 #endif
-		sx_slock(&t4_list_lock);
-		if (!SLIST_EMPTY(&t4_list)) {
-			rc = EBUSY;
 			sx_sunlock(&t4_list_lock);
-			break;
+
+			if (t4_sge_extfree_refs() == 0) {
+				t4_tracer_modunload();
+#ifdef TCP_OFFLOAD
+				sx_destroy(&t4_uld_list_lock);
+#endif
+				sx_destroy(&t4_list_lock);
+				t4_sge_modunload();
+				loaded = 0;
+			} else {
+				rc = EBUSY;
+				loaded++;	/* undo earlier decrement */
+			}
 		}
-		sx_sunlock(&t4_list_lock);
-		sx_destroy(&t4_list_lock);
+done_unload:
+		sx_xunlock(&mlu);
 		break;
 	}
 

Modified: stable/10/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- stable/10/sys/dev/cxgbe/t4_sge.c	Thu Jul 31 22:32:39 2014	(r269355)
+++ stable/10/sys/dev/cxgbe/t4_sge.c	Thu Jul 31 23:04:41 2014	(r269356)
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/time.h>
 #include <sys/sysctl.h>
 #include <sys/smp.h>
+#include <sys/counter.h>
 #include <net/bpf.h>
 #include <net/ethernet.h>
 #include <net/if.h>
@@ -242,6 +243,9 @@ static int handle_fw_msg(struct sge_iq *
 static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
 static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS);
 
+static counter_u64_t extfree_refs;
+static counter_u64_t extfree_rels;
+
 /*
  * Called on MOD_LOAD.  Validates and calculates the SGE tunables.
  */
@@ -313,6 +317,30 @@ t4_sge_modload(void)
 		    " using 0 instead.\n", cong_drop);
 		cong_drop = 0;
 	}
+
+	extfree_refs = counter_u64_alloc(M_WAITOK);
+	extfree_rels = counter_u64_alloc(M_WAITOK);
+	counter_u64_zero(extfree_refs);
+	counter_u64_zero(extfree_rels);
+}
+
+void
+t4_sge_modunload(void)
+{
+
+	counter_u64_free(extfree_refs);
+	counter_u64_free(extfree_rels);
+}
+
+uint64_t
+t4_sge_extfree_refs(void)
+{
+	uint64_t refs, rels;
+
+	rels = counter_u64_fetch(extfree_rels);
+	refs = counter_u64_fetch(extfree_refs);
+
+	return (refs - rels);
 }
 
 void
@@ -1450,6 +1478,7 @@ rxb_free(struct mbuf *m, void *arg1, voi
 	caddr_t cl = arg2;
 
 	uma_zfree(zone, cl);
+	counter_u64_add(extfree_rels, 1);
 
 	return (EXT_FREE_OK);
 }
@@ -1498,7 +1527,7 @@ get_scatter_segment(struct adapter *sc, 
 		/* copy data to mbuf */
 		bcopy(payload, mtod(m, caddr_t), len);
 
-	} else if (sd->nimbuf * MSIZE < cll->region1) {
+	} else if (sd->nmbuf * MSIZE < cll->region1) {
 
 		/*
 		 * There's spare room in the cluster for an mbuf.  Create one
@@ -1506,14 +1535,15 @@ get_scatter_segment(struct adapter *sc, 
 		 */
 
 		MPASS(clm != NULL);
-		m = (struct mbuf *)(sd->cl + sd->nimbuf * MSIZE);
+		m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE);
 		/* No bzero required */
 		if (m_init(m, NULL, 0, M_NOWAIT, MT_DATA, flags | M_NOFREE))
 			return (NULL);
 		fl->mbuf_inlined++;
 		m_extaddref(m, payload, padded_len, &clm->refcount, rxb_free,
 		    swz->zone, sd->cl);
-		sd->nimbuf++;
+		if (sd->nmbuf++ == 0)
+			counter_u64_add(extfree_refs, 1);
 
 	} else {
 
@@ -1530,7 +1560,8 @@ get_scatter_segment(struct adapter *sc, 
 		if (clm != NULL) {
 			m_extaddref(m, payload, padded_len, &clm->refcount,
 			    rxb_free, swz->zone, sd->cl);
-			sd->nembuf++;
+			if (sd->nmbuf++ == 0)
+				counter_u64_add(extfree_refs, 1);
 		} else {
 			m_cljset(m, sd->cl, swz->type);
 			sd->cl = NULL;	/* consumed, not a recycle candidate */
@@ -3055,7 +3086,7 @@ refill_fl(struct adapter *sc, struct sge
 
 		if (sd->cl != NULL) {
 
-			if (sd->nimbuf + sd->nembuf == 0) {
+			if (sd->nmbuf == 0) {
 				/*
 				 * Fast recycle without involving any atomics on
 				 * the cluster's metadata (if the cluster has
@@ -3082,6 +3113,7 @@ refill_fl(struct adapter *sc, struct sge
 
 			if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
 				fl->cl_recycled++;
+				counter_u64_add(extfree_rels, 1);
 				goto recycled;
 			}
 			sd->cl = NULL;	/* gave up my reference */
@@ -3114,8 +3146,7 @@ recycled:
 #endif
 			clm->refcount = 1;
 		}
-		sd->nimbuf = 0;
-		sd->nembuf = 0;
+		sd->nmbuf = 0;
 recycled_fast:
 		fl->pending++;
 		fl->needed--;
@@ -3184,9 +3215,11 @@ free_fl_sdesc(struct adapter *sc, struct
 
 		cll = &sd->cll;
 		clm = cl_metadata(sc, fl, cll, sd->cl);
-		if (sd->nimbuf + sd->nembuf == 0 ||
-		    (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1)) {
+		if (sd->nmbuf == 0)
+			uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
+		else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) {
 			uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
+			counter_u64_add(extfree_rels, 1);
 		}
 		sd->cl = NULL;
 	}


More information about the svn-src-stable-10 mailing list