svn commit: r225645 - user/adrian/if_ath_tx/sys/dev/ath

Adrian Chadd adrian at FreeBSD.org
Sun Sep 18 07:21:00 UTC 2011


Author: adrian
Date: Sun Sep 18 07:20:59 2011
New Revision: 225645
URL: http://svn.freebsd.org/changeset/base/225645

Log:
  Begin an overhaul of the TXQ locking to avoid recursive locking.
  
  The linux and atheros reference driver keep the hardware TXQ locked
  for the duration of both the TXQ completion processing and TXQ drain.
  This unfortunately means the TXQ lock is held during completion
  callbacks and during ieee80211_free_node(). When the last node reference
  is removed, net80211 will free the node - which calls back into the
  driver and in this code, it flushes the node state and removes it
  from various lists.
  
  Unfortunately access to these lists is locked by the TXQ lock and
  trying to grab it whilst it's already being held causes a panic.
  
  The solution is to go back to how FreeBSD did the locking - hold
  the lock whilst doing TXQ manipulation (add/remove/state changes)
  and then reacquire it whilst fiddling with the per-TID state.
  The lock isn't to be held across TX buffer completion.
  
  This commit partially implements this:
  
  * the non-aggregation pathway doesn't hold the lock across the completion
    handler
  * the aggregation pathway for single packets doesn't hold the lock
    across the completion handler
  * the aggregation pathway for aggregate frames now delays calling the
    completion handler until the TID state (BAW, etc) has been updated;
    it's done at the end of the function.
  
  The error and cleanup pathways will be fixed in a subsequent commit.

Modified:
  user/adrian/if_ath_tx/sys/dev/ath/if_ath.c
  user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c

Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath.c
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath.c	Sat Sep 17 22:53:05 2011	(r225644)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath.c	Sun Sep 18 07:20:59 2011	(r225645)
@@ -4350,11 +4350,12 @@ ath_tx_processq(struct ath_softc *sc, st
 		(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
 		txq->axq_link);
 	nacked = 0;
-	ATH_TXQ_LOCK(txq);
 	for (;;) {
+		ATH_TXQ_LOCK(txq);
 		txq->axq_intrcnt = 0;	/* reset periodic desc intr count */
 		bf = TAILQ_FIRST(&txq->axq_q);
 		if (bf == NULL) {
+			ATH_TXQ_UNLOCK(txq);
 			break;
 		}
 		ds = bf->bf_lastds;	/* XXX must be setup correctly! */
@@ -4366,6 +4367,7 @@ ath_tx_processq(struct ath_softc *sc, st
 			    status == HAL_OK);
 #endif
 		if (status == HAL_EINPROGRESS) {
+			ATH_TXQ_UNLOCK(txq);
 			break;
 		}
 		ATH_TXQ_REMOVE(txq, bf, bf_list);
@@ -4402,6 +4404,7 @@ ath_tx_processq(struct ath_softc *sc, st
 			ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
 				ts->ts_rssi);
 		}
+		ATH_TXQ_UNLOCK(txq);
 
 		/* If unicast frame, update general statistics */
 		if (ni != NULL) {
@@ -4458,6 +4461,7 @@ ath_tx_processq(struct ath_softc *sc, st
 #endif
 
 	/* Kick the TXQ scheduler */
+	ATH_TXQ_LOCK(txq);
 	ath_txq_sched(sc, txq);
 	ATH_TXQ_UNLOCK(txq);
 
@@ -4633,11 +4637,12 @@ ath_tx_draintxq(struct ath_softc *sc, st
 		bf->bf_flags &= ~ATH_BUF_BUSY;
 	ATH_TXBUF_UNLOCK(sc);
 
-	ATH_TXQ_LOCK(txq);
 	for (ix = 0;; ix++) {
+		ATH_TXQ_LOCK(txq);
 		bf = TAILQ_FIRST(&txq->axq_q);
 		if (bf == NULL) {
 			txq->axq_link = NULL;
+			ATH_TXQ_UNLOCK(txq);
 			break;
 		}
 		ATH_TXQ_REMOVE(txq, bf, bf_list);
@@ -4664,6 +4669,7 @@ ath_tx_draintxq(struct ath_softc *sc, st
 		 * Clear ATH_BUF_BUSY; the completion handler
 		 * will free the buffer.
 		 */
+		ATH_TXQ_UNLOCK(txq);
 		bf->bf_flags &= ~ATH_BUF_BUSY;
 		if (bf->bf_comp)
 			bf->bf_comp(sc, bf, 1);
@@ -4675,6 +4681,7 @@ ath_tx_draintxq(struct ath_softc *sc, st
 	 * Drain software queued frames which are on
 	 * active TIDs.
 	 */
+	ATH_TXQ_LOCK(txq);
 	ath_tx_txq_drain(sc, txq);
 	ATH_TXQ_UNLOCK(txq);
 }

Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c	Sat Sep 17 22:53:05 2011	(r225644)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c	Sun Sep 18 07:20:59 2011	(r225645)
@@ -2318,10 +2318,6 @@ ath_tx_tid_drain(struct ath_softc *sc, s
  * This occurs when a completion handler frees the last buffer
  * for a node, and the node is thus freed. This causes the node
  * to be cleaned up, which ends up calling ath_tx_node_flush.
- *
- * XXX Because the TXQ may be locked right now (when it's called
- * XXX from a completion handler which frees the last node)
- * XXX do a dirty recursive hack avoidance.
  */
 void
 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
@@ -2331,20 +2327,14 @@ ath_tx_node_flush(struct ath_softc *sc, 
 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
 		struct ath_tid *atid = &an->an_tid[tid];
 		struct ath_txq *txq = sc->sc_ac2q[atid->ac];
-		int islocked = 0;
 
 		/* Remove this tid from the list of active tids */
-		/* XXX eww, this needs to be fixed */
-		if (ATH_TXQ_IS_LOCKED(txq))
-			islocked = 1;
-		else
-			ATH_TXQ_LOCK(txq);
+		ATH_TXQ_LOCK(txq);
 		ath_tx_tid_unsched(sc, an, atid);
 
 		/* Free packets */
 		ath_tx_tid_drain(sc, an, atid);
-		if (! islocked)
-			ATH_TXQ_UNLOCK(txq);
+		ATH_TXQ_UNLOCK(txq);
 	}
 }
 
@@ -2407,7 +2397,8 @@ ath_tx_normal_comp(struct ath_softc *sc,
 	struct ath_tid *atid = &an->an_tid[tid];
 	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
 
-	ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
+	/* The TID state is protected behind the TXQ lock */
+	ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
 
 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
 	    __func__, bf, fail, atid->hwq_depth - 1);
@@ -2416,6 +2407,7 @@ ath_tx_normal_comp(struct ath_softc *sc,
 	if (atid->hwq_depth < 0)
 		device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
 		    __func__, atid->hwq_depth);
+	ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
 
 	/*
 	 * punt to rate control if we're not being cleaned up
@@ -2898,6 +2890,7 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
 	struct ath_tx_status ts;
 	struct ieee80211_tx_ampdu *tap;
 	ath_bufhead bf_q;
+	ath_bufhead bf_cq;
 	int seq_st, tx_ok;
 	int hasba, isaggr;
 	uint32_t ba[2];
@@ -2912,7 +2905,8 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
 	    __func__, atid->hwq_depth);
 
-	ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
+	/* The TID state is kept behind the TXQ lock */
+	ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
 
 	atid->hwq_depth--;
 	if (atid->hwq_depth < 0)
@@ -2924,6 +2918,7 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
 	 */
 	if (0 && atid->cleanup_inprogress) {
 		ath_tx_comp_cleanup_aggr(sc, bf_first);
+		ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
 		return;
 	}
 
@@ -2943,10 +2938,12 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
 	 */
 	if (ts.ts_status & HAL_TXERR_XRETRY) {
 		ath_tx_comp_aggr_error(sc, bf_first, atid);
+		ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
 		return;
 	}
 
 	TAILQ_INIT(&bf_q);
+	TAILQ_INIT(&bf_cq);
 	tap = ath_tx_get_tx_tid(an, tid);
 
 	/*
@@ -2993,6 +2990,20 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
 	/* bf_first is going to be invalid once this list is walked */
 	bf_first = NULL;
 
+	/*
+	 * Walk the list of completed frames and determine
+	 * which need to be completed and which need to be
+	 * retransmitted.
+	 *
+	 * For completed frames, the completion functions need
+	 * to be called at the end of this function as the last
+	 * node reference may free the node.
+	 *
+	 * Finally, since the TXQ lock can't be held during the
+	 * completion callback (to avoid lock recursion),
+	 * the completion calls have to be done outside of the
+	 * lock.
+	 */
 	while (bf) {
 		nframes++;
 		ba_index = ATH_BA_INDEX(seq_st, SEQNO(bf->bf_state.bfs_seqno));
@@ -3012,7 +3023,7 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
 				device_printf(sc->sc_dev,
 				    "%s: wasn't added: seqno %d\n",
 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
-			ath_tx_default_comp(sc, bf, 0);
+			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
 		} else {
 			drops += ath_tx_retry_subframe(sc, bf, &bf_q);
 			nbad++;
@@ -3020,6 +3031,9 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
 		bf = bf_next;
 	}
 
+	/* Now that the BAW updates have been done, unlock */
+	ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
 	if (nframes != nf)
 		device_printf(sc->sc_dev,
 		    "%s: num frames seen=%d; bf nframes=%d\n",
@@ -3054,15 +3068,23 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
 #endif
 
 	/* Prepend all frames to the beginning of the queue */
+	ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
 		TAILQ_REMOVE(&bf_q, bf, bf_list);
 		ATH_TXQ_INSERT_HEAD(atid, bf, bf_list);
 	}
 
 	ath_tx_tid_sched(sc, an, atid);
+	ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
 
 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
-	    "%s: finished; txa_start now %d\n", __func__, tap->txa_start);
+	    "%s: txa_start now %d\n", __func__, tap->txa_start);
+
+	/* Do deferred completion */
+	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
+		TAILQ_REMOVE(&bf_cq, bf, bf_list);
+		ath_tx_default_comp(sc, bf, 0);
+	}
 }
 
 /*
@@ -3081,7 +3103,7 @@ ath_tx_aggr_comp_unaggr(struct ath_softc
 	struct ath_tid *atid = &an->an_tid[tid];
 	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
 
-	ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
+	ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
 
 	if (tid == IEEE80211_NONQOS_TID)
 		device_printf(sc->sc_dev, "%s: TID=16!\n", __func__);
@@ -3112,6 +3134,7 @@ ath_tx_aggr_comp_unaggr(struct ath_softc
 	 */
 	if (atid->cleanup_inprogress) {
 		ath_tx_comp_cleanup_unaggr(sc, bf);
+		ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
 		return;
 	}
 
@@ -3121,6 +3144,7 @@ ath_tx_aggr_comp_unaggr(struct ath_softc
 	 */
 	if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
 		ath_tx_aggr_retry_unaggr(sc, bf);
+		ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
 		return;
 	}
 
@@ -3136,6 +3160,8 @@ ath_tx_aggr_comp_unaggr(struct ath_softc
 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
 	}
 
+	ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
+
 	ath_tx_default_comp(sc, bf, fail);
 	/* bf is freed at this point */
 }


More information about the svn-src-user mailing list