svn commit: r225055 - user/adrian/if_ath_tx/sys/dev/ath

Adrian Chadd adrian at FreeBSD.org
Sun Aug 21 03:48:08 UTC 2011


Author: adrian
Date: Sun Aug 21 03:48:07 2011
New Revision: 225055
URL: http://svn.freebsd.org/changeset/base/225055

Log:
  * Correctly lock stuff during ath_txq_sched()
  * Add the aggregate packet counter
  * For now, don't try to queue further work to a TXQ if there's more than two
    packets queued to it. This is an attempt to give the aggregation code more
    time to form aggregates.

Modified:
  user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c

Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c	Sun Aug 21 03:12:07 2011	(r225054)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c	Sun Aug 21 03:48:07 2011	(r225055)
@@ -2928,6 +2928,7 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
 			    bf->bf_state.bfs_al);
 			bf->bf_state.bfs_aggr = 1;
 			sc->sc_stats.tx_aggr.aggr_pkts[bf->bf_state.bfs_nframes]++;
+			sc->sc_stats.tx_aggr.aggr_aggr_pkt++;
 
 			/* Set rate 1, 2, 3 to 0 for aggregate frames */
 			bf->bf_state.bfs_rc[1].rix =
@@ -2961,6 +2962,10 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
 		/* Punt to txq */
 		ATH_TXQ_LOCK(txq);
 		ath_tx_handoff(sc, txq, bf);
+		if (txq->axq_depth > ATH_AGGR_MIN_QDEPTH) {
+			ATH_TXQ_UNLOCK(txq);
+			break;
+		}
 		ATH_TXQ_UNLOCK(txq);
 
 		/*
@@ -3058,11 +3063,26 @@ ath_txq_sched(struct ath_softc *sc, stru
 	struct ath_tid *atid, *next;
 
 	/*
+	 * Don't schedule if the hardware queue is busy.
+	 * This (hopefully) gives some more time to aggregate
+	 * some packets in the aggregation queue.
+	 * XXX TODO: only do this based on AMPDU buffersj, not
+	 * XXX normal ones.
+	 */
+	ATH_TXQ_LOCK(txq);
+	if (txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
+		ATH_TXQ_UNLOCK(txq);
+		return;
+	}
+
+	/*
 	 * For now, let's not worry about QoS, fair-scheduling
 	 * or the like. That's a later problem. Just throw
 	 * packets at the hardware.
 	 */
+	/* XXX txq is already locked */
 	TAILQ_FOREACH_SAFE(atid, &txq->axq_tidq, axq_qelem, next) {
+		ATH_TXQ_UNLOCK(txq);
 		/*
 		 * Suspend paused queues here; they'll be resumed
 		 * once the addba completes or times out.
@@ -3077,7 +3097,7 @@ ath_txq_sched(struct ath_softc *sc, stru
 			ATH_TXQ_UNLOCK(atid);
 			ATH_TXQ_LOCK(txq);
 			ath_tx_tid_unsched(sc, atid->an, atid->tid);
-			ATH_TXQ_UNLOCK(txq);
+			//ATH_TXQ_UNLOCK(txq);
 			continue;
 		}
 		ATH_TXQ_UNLOCK(atid);
@@ -3090,8 +3110,13 @@ ath_txq_sched(struct ath_softc *sc, stru
 		ATH_TXQ_LOCK(txq);
 		if (atid->axq_depth == 0)
 			ath_tx_tid_unsched(sc, atid->an, atid->tid);
-		ATH_TXQ_UNLOCK(txq);
+		if (txq->axq_depth > ATH_AGGR_MIN_QDEPTH) {
+			//ATH_TXQ_UNLOCK(txq);
+			break;
+		}
+		//ATH_TXQ_UNLOCK(txq);
 	}
+	ATH_TXQ_UNLOCK(txq);
 }
 
 /*


More information about the svn-src-user mailing list