svn commit: r250326 - head/sys/dev/ath

Adrian Chadd adrian at FreeBSD.org
Tue May 7 07:52:19 UTC 2013


Author: adrian
Date: Tue May  7 07:52:18 2013
New Revision: 250326
URL: http://svnweb.freebsd.org/changeset/base/250326

Log:
  Re-work how transmit buffer limits are enforced - partly to fix the PR,
  but partly to just tidy up things.
  
  The problem here - there are too many TX buffers in the queue! By the
  time one needs to transmit an EAPOL frame (for this PR, it's the response
  to the group rekey notification from the AP) there are no ath_buf entries
  free and the EAPOL frame doesn't go out.
  
  Now, the problem!
  
  * Enforcing the TX buffer limitation _before_ we dequeue the frame?
    Bad idea. Because..
  * .. it means I can't check whether the mbuf has M_EAPOL set.
  
  The solution(s):
  
  * De-queue the frame first
  * Don't bother doing the TX buffer minimum free check until after
    we know whether it's an EAPOL frame or not.
  * If it's an EAPOL frame, allocate the buffer from the mgmt pool
    rather than the default pool.
  
  Whilst I'm here:
  
  * Add a tweak to limit how many buffers a single node can acquire.
  * Don't enforce that for EAPOL frames.
  * .. set that to default to 1/4 of the available buffers, or 32,
    whichever is more sane.
  
  This doesn't fix issues due to a sleeping node or a very poor performing
  node; but this doesn't make it worse.
  
  Tested:
  
  * AR5416 STA, TX'ing 100+ mbit UDP to an AP, but only 50mbit being received
    (thus the TX queue fills up.)
  * .. with CCMP / WPA2 encryption configured
  * .. and the group rekey time set to 10 seconds, just to elicit the
    behaviour very quickly.
  
  PR:		kern/138379

Modified:
  head/sys/dev/ath/if_ath.c
  head/sys/dev/ath/if_ath_sysctl.c
  head/sys/dev/ath/if_athioctl.h
  head/sys/dev/ath/if_athvar.h

Modified: head/sys/dev/ath/if_ath.c
==============================================================================
--- head/sys/dev/ath/if_ath.c	Tue May  7 07:44:07 2013	(r250325)
+++ head/sys/dev/ath/if_ath.c	Tue May  7 07:52:18 2013	(r250326)
@@ -694,6 +694,13 @@ ath_attach(u_int16_t devid, struct ath_s
 	 */
 	sc->sc_txq_mcastq_maxdepth = ath_txbuf;
 
+	/*
+	 * Default the maximum queue depth for a given node
+	 * to 1/4'th the TX buffers, or 64, whichever
+	 * is larger.
+	 */
+	sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4);
+
 	/* Enable CABQ by default */
 	sc->sc_cabq_enable = 1;
 
@@ -2661,33 +2668,98 @@ ath_start(struct ifnet *ifp)
 	ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start: called");
 
 	for (;;) {
-		ATH_TXBUF_LOCK(sc);
-		if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) {
-			/* XXX increment counter? */
-			ATH_TXBUF_UNLOCK(sc);
+		/*
+		 * Grab the frame that we're going to try and transmit.
+		 */
+		IFQ_DEQUEUE(&ifp->if_snd, m);
+		if (m == NULL)
+			break;
+		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
+
+		/*
+		 * Enforce how deep a node queue can get.
+		 *
+		 * XXX it would be nicer if we kept an mbuf queue per
+		 * node and only whacked them into ath_bufs when we
+		 * are ready to schedule some traffic from them.
+		 * .. that may come later.
+		 *
+		 * XXX we should also track the per-node hardware queue
+		 * depth so it is easy to limit the _SUM_ of the swq and
+		 * hwq frames.  Since we only schedule two HWQ frames
+		 * at a time, this should be OK for now.
+		 */
+		if ((!(m->m_flags & M_EAPOL)) &&
+		    (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) {
+			sc->sc_stats.ast_tx_nodeq_overflow++;
+			if (ni != NULL)
+				ieee80211_free_node(ni);
+			m_freem(m);
+			m = NULL;
+			continue;
+		}
+
+		/*
+		 * Check how many TX buffers are available.
+		 *
+		 * If this is for non-EAPOL traffic, just leave some
+		 * space free in order for buffer cloning and raw
+		 * frame transmission to occur.
+		 *
+		 * If it's for EAPOL traffic, ignore this for now.
+		 * Management traffic will be sent via the raw transmit
+		 * method which bypasses this check.
+		 *
+		 * This is needed to ensure that EAPOL frames during
+		 * (re) keying have a chance to go out.
+		 *
+		 * See kern/138379 for more information.
+		 */
+		if ((!(m->m_flags & M_EAPOL)) &&
+		    (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) {
+			sc->sc_stats.ast_tx_nobuf++;
 			IF_LOCK(&ifp->if_snd);
+			_IF_PREPEND(&ifp->if_snd, m);
 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 			IF_UNLOCK(&ifp->if_snd);
+			m = NULL;
 			break;
 		}
-		ATH_TXBUF_UNLOCK(sc);
-		
+
 		/*
 		 * Grab a TX buffer and associated resources.
+		 *
+		 * If it's an EAPOL frame, allocate a MGMT ath_buf.
+		 * That way even with temporary buffer exhaustion due to
+		 * the data path doesn't leave us without the ability
+		 * to transmit management frames.
+		 *
+		 * Otherwise allocate a normal buffer.
 		 */
-		bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
-		if (bf == NULL)
-			break;
+		if (m->m_flags & M_EAPOL)
+			bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
+		else
+			bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
 
-		IFQ_DEQUEUE(&ifp->if_snd, m);
-		if (m == NULL) {
-			ATH_TXBUF_LOCK(sc);
-			ath_returnbuf_head(sc, bf);
-			ATH_TXBUF_UNLOCK(sc);
+		if (bf == NULL) {
+			/*
+			 * If we failed to allocate a buffer, prepend it
+			 * and continue.
+			 *
+			 * We shouldn't fail normally, due to the check
+			 * above.
+			 */
+			sc->sc_stats.ast_tx_nobuf++;
+			IF_LOCK(&ifp->if_snd);
+			_IF_PREPEND(&ifp->if_snd, m);
+			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+			IF_UNLOCK(&ifp->if_snd);
+			m = NULL;
 			break;
 		}
-		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
+
 		npkts ++;
+
 		/*
 		 * Check for fragmentation.  If this frame
 		 * has been broken up verify we have enough

Modified: head/sys/dev/ath/if_ath_sysctl.c
==============================================================================
--- head/sys/dev/ath/if_ath_sysctl.c	Tue May  7 07:44:07 2013	(r250325)
+++ head/sys/dev/ath/if_ath_sysctl.c	Tue May  7 07:52:18 2013	(r250326)
@@ -748,11 +748,14 @@ ath_sysctlattach(struct ath_softc *sc)
 		"txq_data_minfree", CTLFLAG_RW, &sc->sc_txq_data_minfree,
 		0, "Minimum free buffers before adding a data frame"
 		" to the TX queue");
-
 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 		"txq_mcastq_maxdepth", CTLFLAG_RW,
 		&sc->sc_txq_mcastq_maxdepth, 0,
 		"Maximum buffer depth for multicast/broadcast frames");
+	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+		"txq_node_maxdepth", CTLFLAG_RW,
+		&sc->sc_txq_node_maxdepth, 0,
+		"Maximum buffer depth for a single node");
 
 #if 0
 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,

Modified: head/sys/dev/ath/if_athioctl.h
==============================================================================
--- head/sys/dev/ath/if_athioctl.h	Tue May  7 07:44:07 2013	(r250325)
+++ head/sys/dev/ath/if_athioctl.h	Tue May  7 07:52:18 2013	(r250326)
@@ -163,8 +163,9 @@ struct ath_stats {
 	u_int32_t	ast_tx_mcastq_overflow;	/* multicast queue overflow */
 	u_int32_t	ast_rx_keymiss;
 	u_int32_t	ast_tx_swfiltered;
+	u_int32_t	ast_tx_nodeq_overflow;	/* node sw queue overflow */
 
-	u_int32_t	ast_pad[15];
+	u_int32_t	ast_pad[14];
 };
 
 #define	SIOCGATHSTATS	_IOWR('i', 137, struct ifreq)

Modified: head/sys/dev/ath/if_athvar.h
==============================================================================
--- head/sys/dev/ath/if_athvar.h	Tue May  7 07:44:07 2013	(r250325)
+++ head/sys/dev/ath/if_athvar.h	Tue May  7 07:52:18 2013	(r250326)
@@ -799,6 +799,7 @@ struct ath_softc {
 	 *
 	 * + mcastq_maxdepth is the maximum depth allowed of the cabq.
 	 */
+	int			sc_txq_node_maxdepth;
 	int			sc_txq_data_minfree;
 	int			sc_txq_mcastq_maxdepth;
 


More information about the svn-src-all mailing list