svn commit: r225224 - user/adrian/if_ath_tx/sys/dev/ath
Adrian Chadd
adrian at FreeBSD.org
Sun Aug 28 03:05:16 UTC 2011
Author: adrian
Date: Sun Aug 28 03:05:16 2011
New Revision: 225224
URL: http://svn.freebsd.org/changeset/base/225224
Log:
Migrate back to the locking structure the atheros reference driver and
linux ath9k uses - hide the per-TID state behind the associated hardware
TXQ.
This is less fine grained than what I was doing before, but it results in:
* The code being cleaner (ie, there's not a mess of locks everywhere);
* The code being closer to what the reference/linux drivers do, making it
easier to port code over;
* A bunch of state changes become much more predictable - eg, the BAW
window updates occur in one bunch rather than interleaved with other
packet TX.
This isn't yet complete, as there are a few things to fix:
* recursive mutexes during ht node cleanup
* remove the current txq drain which uses ieee80211_iterate_nodes() which
grabs the IEEE80211_NODE_LOCK() and replace it with a similar setup
to what Linux/Reference driver does.
* Figure out how to properly halt the software TXQ (via ath_raw_xmit()
and via ath_start()) during the reset process so frames aren't queued
to the software queue whilst things are drained.
* Investigate whether a STA which is going away can have traffic queued
to its ieee80211_node, which is in the process of being freed or has
already been freed.
Modified:
user/adrian/if_ath_tx/sys/dev/ath/if_ath.c
user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c
user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx_ht.c
user/adrian/if_ath_tx/sys/dev/ath/if_athvar.h
Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath.c
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath.c Sun Aug 28 00:14:40 2011 (r225223)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath.c Sun Aug 28 03:05:16 2011 (r225224)
@@ -4322,12 +4322,11 @@ ath_tx_processq(struct ath_softc *sc, st
(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
txq->axq_link);
nacked = 0;
+ ATH_TXQ_LOCK(txq);
for (;;) {
txq->axq_intrcnt = 0; /* reset periodic desc intr count */
- ATH_TXQ_LOCK(txq);
bf = TAILQ_FIRST(&txq->axq_q);
if (bf == NULL) {
- ATH_TXQ_UNLOCK(txq);
break;
}
ds0 = &bf->bf_desc[0];
@@ -4340,7 +4339,6 @@ ath_tx_processq(struct ath_softc *sc, st
status == HAL_OK);
#endif
if (status == HAL_EINPROGRESS) {
- ATH_TXQ_UNLOCK(txq);
break;
}
ATH_TXQ_REMOVE(txq, bf, bf_list);
@@ -4363,7 +4361,6 @@ ath_tx_processq(struct ath_softc *sc, st
txq->axq_link = NULL;
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth--;
- ATH_TXQ_UNLOCK(txq);
ni = bf->bf_node;
/*
@@ -4435,6 +4432,7 @@ ath_tx_processq(struct ath_softc *sc, st
/* Kick the TXQ scheduler */
ath_txq_sched(sc, txq);
+ ATH_TXQ_UNLOCK(txq);
return nacked;
}
@@ -4555,7 +4553,9 @@ ath_tx_sched_proc(void *arg, int npendin
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
txq = &sc->sc_txq[i];
if (ATH_TXQ_SETUP(sc, i)) {
+ ATH_TXQ_LOCK(txq);
ath_txq_sched(sc, txq);
+ ATH_TXQ_UNLOCK(txq);
}
}
}
@@ -4640,18 +4640,16 @@ ath_tx_draintxq(struct ath_softc *sc, st
bf->bf_flags &= ~ATH_BUF_BUSY;
ATH_TXBUF_UNLOCK(sc);
+ ATH_TXQ_LOCK(txq);
for (ix = 0;; ix++) {
- ATH_TXQ_LOCK(txq);
bf = TAILQ_FIRST(&txq->axq_q);
if (bf == NULL) {
txq->axq_link = NULL;
- ATH_TXQ_UNLOCK(txq);
break;
}
ATH_TXQ_REMOVE(txq, bf, bf_list);
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth--;
- ATH_TXQ_UNLOCK(txq);
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RESET) {
struct ieee80211com *ic = sc->sc_ifp->if_l2com;
@@ -4679,6 +4677,7 @@ ath_tx_draintxq(struct ath_softc *sc, st
else
ath_tx_default_comp(sc, bf, 1);
}
+ ATH_TXQ_UNLOCK(txq);
}
static void
Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c Sun Aug 28 00:14:40 2011 (r225223)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx.c Sun Aug 28 03:05:16 2011 (r225224)
@@ -1235,8 +1235,7 @@ ath_tx_start(struct ath_softc *sc, struc
/* Don't do it whilst pending; the net80211 layer still assigns them */
/* XXX do we need locking here? */
if (is_ampdu_tx) {
- struct ath_node *an = ATH_NODE(ni);
- ATH_TXQ_LOCK(&an->an_tid[tid]);
+ ATH_TXQ_LOCK(txq);
/*
* Always call; this function will
* handle making sure that null data frames
@@ -1248,7 +1247,7 @@ ath_tx_start(struct ath_softc *sc, struc
subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
bf->bf_state.bfs_dobaw = 1;
}
- ATH_TXQ_UNLOCK(&an->an_tid[tid]);
+ ATH_TXQ_UNLOCK(txq);
}
/*
@@ -1743,8 +1742,9 @@ ath_tx_addto_baw(struct ath_softc *sc, s
{
int index, cindex;
struct ieee80211_tx_ampdu *tap;
+ struct ath_txq *txq = sc->sc_ac2q[tid->ac];
- ATH_TXQ_LOCK_ASSERT(tid);
+ ATH_TXQ_LOCK_ASSERT(txq);
if (bf->bf_state.bfs_isretried)
return;
@@ -1792,8 +1792,9 @@ ath_tx_update_baw(struct ath_softc *sc,
{
int index, cindex;
struct ieee80211_tx_ampdu *tap;
+ struct ath_txq *txq = sc->sc_ac2q[tid->ac];
- ATH_TXQ_LOCK_ASSERT(tid);
+ ATH_TXQ_LOCK_ASSERT(txq);
tap = ath_tx_get_tx_tid(an, tid->tid);
index = ATH_BA_INDEX(tap->txa_start, seqno);
@@ -1959,11 +1960,8 @@ ath_tx_swq(struct ath_softc *sc, struct
bf->bf_state.bfs_addedbaw = 0;
/* Queue frame to the tail of the software queue */
- ATH_TXQ_LOCK(atid);
- ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
- ATH_TXQ_UNLOCK(atid);
-
ATH_TXQ_LOCK(txq);
+ ATH_TXQ_INSERT_TAIL(atid, bf, bf_list);
ath_tx_tid_sched(sc, an, tid);
ATH_TXQ_UNLOCK(txq);
}
@@ -2012,9 +2010,6 @@ ath_tx_tid_init(struct ath_softc *sc, st
atid->ac = WME_AC_BE;
else
atid->ac = TID_TO_WME_AC(i);
- snprintf(atid->axq_name, 48, "%p %s %d",
- an, device_get_nameunit(sc->sc_dev), i);
- mtx_init(&atid->axq_lock, atid->axq_name, NULL, MTX_DEF);
}
}
@@ -2028,11 +2023,11 @@ ath_tx_tid_init(struct ath_softc *sc, st
static void
ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
{
- ATH_TXQ_LOCK(tid);
+ ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]);
tid->paused++;
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n",
__func__, tid->paused);
- ATH_TXQ_UNLOCK(tid);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]);
}
/*
@@ -2046,7 +2041,7 @@ ath_tx_tid_resume(struct ath_softc *sc,
{
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
- ATH_TXQ_LOCK(tid);
+ ATH_TXQ_LOCK(txq);
tid->paused--;
@@ -2054,12 +2049,10 @@ ath_tx_tid_resume(struct ath_softc *sc,
__func__, tid->paused);
if (tid->paused || tid->axq_depth == 0) {
- ATH_TXQ_UNLOCK(tid);
+ ATH_TXQ_UNLOCK(txq);
return;
}
- ATH_TXQ_UNLOCK(tid);
- ATH_TXQ_LOCK(txq);
ath_tx_tid_sched(sc, tid->an, tid->tid);
ATH_TXQ_UNLOCK(txq);
@@ -2105,12 +2098,12 @@ ath_tx_tid_drain(struct ath_softc *sc, s
tap = ath_tx_get_tx_tid(an, tid->tid);
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
+
/* Walk the queue, free frames */
for (;;) {
- ATH_TXQ_LOCK(tid);
bf = TAILQ_FIRST(&tid->axq_q);
if (bf == NULL) {
- ATH_TXQ_UNLOCK(tid);
break;
}
@@ -2139,7 +2132,6 @@ ath_tx_tid_drain(struct ath_softc *sc, s
__func__, SEQNO(bf->bf_state.bfs_seqno));
}
ATH_TXQ_REMOVE(tid, bf, bf_list);
- ATH_TXQ_UNLOCK(tid);
ath_tx_default_comp(sc, bf, 0);
}
@@ -2154,18 +2146,10 @@ ath_tx_tid_drain(struct ath_softc *sc, s
* The cleaner solution is to do the sequence number allocation
* when the packet is first transmitted - and thus the "retries"
* check above would be enough to update the BAW/seqno.
- *
- * XXX There may exist a situation where active traffic on a node
- * XXX is occuring in another thread whilst an interface is being
- * XXX reset. In this instance, traffic is going to be added (eg to the
- * XXX tail of the queue, or head if it's a retry) whilst this routine
- * XXX attempts to drain. This is going to make things be very out
- * XXX of whack.
*/
/* But don't do it for non-QoS TIDs */
if (tap) {
- ATH_TXQ_LOCK(tid);
#if 0
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: node %p: TID %d: sliding BAW left edge to %d\n",
@@ -2173,14 +2157,12 @@ ath_tx_tid_drain(struct ath_softc *sc, s
#endif
ni->ni_txseqs[tid->tid] = tap->txa_start;
tid->baw_tail = tid->baw_head;
- ATH_TXQ_UNLOCK(tid);
}
}
/*
* Flush all software queued packets for the given node.
*
- * Work around recursive TXQ locking.
* This occurs when a completion handler frees the last buffer
* for a node, and the node is thus freed. This causes the node
* to be cleaned up, which ends up calling ath_tx_node_flush.
@@ -2197,10 +2179,10 @@ ath_tx_node_flush(struct ath_softc *sc,
/* Remove this tid from the list of active tids */
ATH_TXQ_LOCK(txq);
ath_tx_tid_unsched(sc, an, tid);
- ATH_TXQ_UNLOCK(txq);
/* Free packets */
ath_tx_tid_drain(sc, an, atid);
+ ATH_TXQ_UNLOCK(txq);
}
}
@@ -2226,7 +2208,6 @@ ath_tx_tid_cleanup(struct ath_softc *sc,
atid = &an->an_tid[tid];
/* Mark hw-queued packets as having no parent now */
ath_tx_tid_txq_unmark(sc, an, tid);
- mtx_destroy(&atid->axq_lock);
}
}
@@ -2242,15 +2223,15 @@ ath_tx_normal_comp(struct ath_softc *sc,
struct ath_tid *atid = &an->an_tid[tid];
struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
+
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
__func__, bf, fail, atid->hwq_depth - 1);
- ATH_TXQ_LOCK(atid);
atid->hwq_depth--;
if (atid->hwq_depth < 0)
device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
__func__, atid->hwq_depth);
- ATH_TXQ_UNLOCK(atid);
/*
* punt to rate control if we're not being cleaned up
@@ -2276,6 +2257,8 @@ ath_tx_comp_cleanup_unaggr(struct ath_so
int tid = bf->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
+
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
__func__, tid, atid->incomp);
@@ -2313,13 +2296,14 @@ ath_tx_cleanup(struct ath_softc *sc, str
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: TID %d: called\n", __func__, tid);
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
+
/*
* Update the frames in the software TX queue:
*
* + Discard retry frames in the queue
* + Fix the completion function to be non-aggregate
*/
- ATH_TXQ_LOCK(atid);
bf = TAILQ_FIRST(&atid->axq_q);
while (bf) {
if (bf->bf_state.bfs_isretried) {
@@ -2347,7 +2331,6 @@ ath_tx_cleanup(struct ath_softc *sc, str
bf->bf_comp = ath_tx_normal_comp;
bf = TAILQ_NEXT(bf, bf_list);
}
- ATH_TXQ_UNLOCK(atid);
/* The caller is required to pause the TID */
#if 0
@@ -2363,7 +2346,6 @@ ath_tx_cleanup(struct ath_softc *sc, str
*/
tap = ath_tx_get_tx_tid(an, tid);
/* Need the lock - fiddling with BAW */
- ATH_TXQ_LOCK(atid);
while (atid->baw_head != atid->baw_tail) {
if (atid->tx_buf[atid->baw_head]) {
atid->incomp++;
@@ -2373,7 +2355,6 @@ ath_tx_cleanup(struct ath_softc *sc, str
INCR(atid->baw_head, ATH_TID_MAX_BUFS);
INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
}
- ATH_TXQ_UNLOCK(atid);
/*
* If cleanup is required, defer TID scheduling
@@ -2488,10 +2469,8 @@ ath_tx_aggr_retry_unaggr(struct ath_soft
/* Update BAW anyway */
if (bf->bf_state.bfs_dobaw) {
- ATH_TXQ_LOCK(atid);
ath_tx_update_baw(sc, an, atid,
SEQNO(bf->bf_state.bfs_seqno));
- ATH_TXQ_UNLOCK(atid);
if (! bf->bf_state.bfs_addedbaw)
device_printf(sc->sc_dev,
"%s: wasn't added: seqno %d\n",
@@ -2539,13 +2518,8 @@ ath_tx_aggr_retry_unaggr(struct ath_soft
* Insert this at the head of the queue, so it's
* retried before any current/subsequent frames.
*/
- ATH_TXQ_LOCK(atid);
ATH_TXQ_INSERT_HEAD(atid, bf, bf_list);
- ATH_TXQ_UNLOCK(atid);
-
- ATH_TXQ_LOCK(bf->bf_state.bfs_txq);
ath_tx_tid_sched(sc, an, atid->tid);
- ATH_TXQ_UNLOCK(bf->bf_state.bfs_txq);
}
/*
@@ -2564,6 +2538,8 @@ ath_tx_retry_subframe(struct ath_softc *
int tid = bf->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]);
+
ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
@@ -2591,14 +2567,12 @@ ath_tx_retry_subframe(struct ath_softc *
DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
"%s: max retries: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
- ATH_TXQ_LOCK(atid);
ath_tx_update_baw(sc, an, atid, SEQNO(bf->bf_state.bfs_seqno));
if (! bf->bf_state.bfs_addedbaw)
device_printf(sc->sc_dev,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
bf->bf_state.bfs_dobaw = 0;
- ATH_TXQ_UNLOCK(atid);
/* XXX subframe completion status? is that valid here? */
ath_tx_default_comp(sc, bf, 0);
return 1;
@@ -2626,6 +2600,8 @@ ath_tx_comp_aggr_error(struct ath_softc
struct ieee80211_tx_ampdu *tap;
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
+ ATH_TXQ_LOCK_ASSERT(txq);
+
tap = ath_tx_get_tx_tid(an, tid->tid);
TAILQ_INIT(&bf_q);
@@ -2669,16 +2645,12 @@ ath_tx_comp_aggr_error(struct ath_softc
#endif
/* Prepend all frames to the beginning of the queue */
- ATH_TXQ_LOCK(tid);
while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
TAILQ_REMOVE(&bf_q, bf, bf_list);
ATH_TXQ_INSERT_HEAD(tid, bf, bf_list);
}
- ATH_TXQ_UNLOCK(tid);
- ATH_TXQ_LOCK(txq);
ath_tx_tid_sched(sc, an, tid->tid);
- ATH_TXQ_UNLOCK(txq);
}
/*
@@ -2748,12 +2720,12 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
__func__, atid->hwq_depth);
- ATH_TXQ_LOCK(atid);
+ ATH_TXQ_LOCK_ASSERT(txq);
+
atid->hwq_depth--;
if (atid->hwq_depth < 0)
device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
__func__, atid->hwq_depth);
- ATH_TXQ_UNLOCK(atid);
/*
* Punt cleanup to the relevant function, not our problem now
@@ -2837,10 +2809,8 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
ATH_BA_ISSET(ba, ba_index));
if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
- ATH_TXQ_LOCK(atid);
ath_tx_update_baw(sc, an, atid,
SEQNO(bf->bf_state.bfs_seqno));
- ATH_TXQ_UNLOCK(atid);
bf->bf_state.bfs_dobaw = 0;
if (! bf->bf_state.bfs_addedbaw)
device_printf(sc->sc_dev,
@@ -2888,16 +2858,12 @@ ath_tx_aggr_comp_aggr(struct ath_softc *
#endif
/* Prepend all frames to the beginning of the queue */
- ATH_TXQ_LOCK(atid);
while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
TAILQ_REMOVE(&bf_q, bf, bf_list);
ATH_TXQ_INSERT_HEAD(atid, bf, bf_list);
}
- ATH_TXQ_UNLOCK(atid);
- ATH_TXQ_LOCK(txq);
ath_tx_tid_sched(sc, an, atid->tid);
- ATH_TXQ_UNLOCK(txq);
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: finished; txa_start now %d\n", __func__, tap->txa_start);
@@ -2918,6 +2884,9 @@ ath_tx_aggr_comp_unaggr(struct ath_softc
int tid = bf->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
+ struct ath_txq *txq = sc->sc_ac2q[atid->ac];
+
+ ATH_TXQ_LOCK_ASSERT(txq);
if (tid == IEEE80211_NONQOS_TID)
device_printf(sc->sc_dev, "%s: TID=16!\n", __func__);
@@ -2925,12 +2894,10 @@ ath_tx_aggr_comp_unaggr(struct ath_softc
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: tid=%d, hwq_depth=%d\n",
__func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth);
- ATH_TXQ_LOCK(atid);
atid->hwq_depth--;
if (atid->hwq_depth < 0)
device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n",
__func__, atid->hwq_depth);
- ATH_TXQ_UNLOCK(atid);
/*
* Update rate control status here, before we possibly
@@ -2966,14 +2933,12 @@ ath_tx_aggr_comp_unaggr(struct ath_softc
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
__func__, tid, SEQNO(bf->bf_state.bfs_seqno));
if (bf->bf_state.bfs_dobaw) {
- ATH_TXQ_LOCK(atid);
ath_tx_update_baw(sc, an, atid, SEQNO(bf->bf_state.bfs_seqno));
bf->bf_state.bfs_dobaw = 0;
if (! bf->bf_state.bfs_addedbaw)
device_printf(sc->sc_dev,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
- ATH_TXQ_UNLOCK(atid);
}
ath_tx_default_comp(sc, bf, fail);
@@ -2998,14 +2963,15 @@ void
ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, int tid)
{
struct ath_buf *bf;
- struct ath_txq *txq;
struct ath_tid *atid = &an->an_tid[tid];
+ struct ath_txq *txq = sc->sc_ac2q[atid->ac];
struct ieee80211_tx_ampdu *tap;
struct ieee80211_node *ni = &an->an_node;
ATH_AGGR_STATUS status;
ath_bufhead bf_q;
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid);
+ ATH_TXQ_LOCK_ASSERT(txq);
tap = ath_tx_get_tx_tid(an, tid);
@@ -3016,8 +2982,6 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
for (;;) {
status = ATH_AGGR_DONE;
- ATH_TXQ_LOCK(atid);
-
/*
* If the upper layer has paused the TID, don't
* queue any further packets.
@@ -3031,7 +2995,6 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
bf = TAILQ_FIRST(&atid->axq_q);
if (bf == NULL) {
- ATH_TXQ_UNLOCK(atid);
break;
}
@@ -3043,7 +3006,6 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: non-baw packet\n",
__func__);
ATH_TXQ_REMOVE(atid, bf, bf_list);
- ATH_TXQ_UNLOCK(atid);
bf->bf_state.bfs_aggr = 0;
ath_tx_setds(sc, bf);
ath_tx_chaindesclist(sc, bf);
@@ -3055,7 +3017,6 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
/* Queue the packet; continue */
goto queuepkt;
}
- ATH_TXQ_UNLOCK(atid);
/* Don't lock the TID - ath_tx_form_aggr will lock as needed */
TAILQ_INIT(&bf_q);
@@ -3128,7 +3089,7 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
ath_tx_set_ratectrl(sc, ni, bf);
}
queuepkt:
- txq = bf->bf_state.bfs_txq;
+ //txq = bf->bf_state.bfs_txq;
/* Set completion handler, multi-frame aggregate or not */
bf->bf_comp = ath_tx_aggr_comp;
@@ -3137,15 +3098,11 @@ ath_tx_tid_hw_queue_aggr(struct ath_soft
device_printf(sc->sc_dev, "%s: TID=16?\n", __func__);
/* Punt to txq */
- ATH_TXQ_LOCK(txq);
ath_tx_handoff(sc, txq, bf);
- ATH_TXQ_UNLOCK(txq);
/* Track outstanding buffer count to hardware */
/* aggregates are "one" buffer */
- ATH_TXQ_LOCK(atid);
atid->hwq_depth++;
- ATH_TXQ_UNLOCK(atid);
/*
* Break out if ath_tx_form_aggr() indicated
@@ -3167,13 +3124,15 @@ void
ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, int tid)
{
struct ath_buf *bf;
- struct ath_txq *txq;
struct ath_tid *atid = &an->an_tid[tid];
+ struct ath_txq *txq = sc->sc_ac2q[atid->ac];
struct ieee80211_node *ni = &an->an_node;
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
__func__, an, tid);
+ ATH_TXQ_LOCK_ASSERT(txq);
+
/* Check - is AMPDU pending or running? then print out something */
if (ath_tx_ampdu_pending(sc, an, tid))
device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n",
@@ -3183,7 +3142,6 @@ ath_tx_tid_hw_queue_norm(struct ath_soft
__func__, tid);
for (;;) {
- ATH_TXQ_LOCK(atid);
/*
* If the upper layers have paused the TID, don't
@@ -3194,14 +3152,12 @@ ath_tx_tid_hw_queue_norm(struct ath_soft
bf = TAILQ_FIRST(&atid->axq_q);
if (bf == NULL) {
- ATH_TXQ_UNLOCK(atid);
break;
}
ATH_TXQ_REMOVE(atid, bf, bf_list);
- ATH_TXQ_UNLOCK(atid);
- txq = bf->bf_state.bfs_txq;
+ KASSERT(txq == bf->bf_state.bfs_txq, ("txqs not equal!\n"));
/* Sanity check! */
if (tid != bf->bf_state.bfs_tid) {
@@ -3219,14 +3175,10 @@ ath_tx_tid_hw_queue_norm(struct ath_soft
/* Track outstanding buffer count to hardware */
/* aggregates are "one" buffer */
- ATH_TXQ_LOCK(atid);
atid->hwq_depth++;
- ATH_TXQ_UNLOCK(atid);
/* Punt to hardware or software txq */
- ATH_TXQ_LOCK(txq);
ath_tx_handoff(sc, txq, bf);
- ATH_TXQ_UNLOCK(txq);
}
}
@@ -3236,27 +3188,20 @@ ath_tx_tid_hw_queue_norm(struct ath_soft
* This function walks the list of TIDs (ie, ath_node TIDs
* with queued traffic) and attempts to schedule traffic
* from them.
- *
- * XXX I haven't locked this code yet, but I need to!
- * XXX walking the tidq requires the TXQ lock, checking
- * XXX for how busy the queues are require the relevant
- * XXX lock!
*/
void
ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_tid *atid, *next;
+ ATH_TXQ_LOCK_ASSERT(txq);
+
/*
* Don't schedule if the hardware queue is busy.
* This (hopefully) gives some more time to aggregate
* some packets in the aggregation queue.
- * XXX TODO: only do this based on AMPDU buffers, not
- * XXX normal ones.
*/
- ATH_TXQ_LOCK(txq);
if (txq->axq_aggr_depth >= sc->sc_hwq_limit) {
- ATH_TXQ_UNLOCK(txq);
return;
}
@@ -3265,9 +3210,7 @@ ath_txq_sched(struct ath_softc *sc, stru
* or the like. That's a later problem. Just throw
* packets at the hardware.
*/
- /* XXX txq is already locked */
TAILQ_FOREACH_SAFE(atid, &txq->axq_tidq, axq_qelem, next) {
- ATH_TXQ_UNLOCK(txq);
/*
* Suspend paused queues here; they'll be resumed
* once the addba completes or times out.
@@ -3277,33 +3220,24 @@ ath_txq_sched(struct ath_softc *sc, stru
*/
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
__func__, atid->tid, atid->paused);
- ATH_TXQ_LOCK(atid);
if (atid->paused) {
- ATH_TXQ_UNLOCK(atid);
- ATH_TXQ_LOCK(txq);
ath_tx_tid_unsched(sc, atid->an, atid->tid);
- //ATH_TXQ_UNLOCK(txq);
continue;
}
- ATH_TXQ_UNLOCK(atid);
if (ath_tx_ampdu_running(sc, atid->an, atid->tid))
ath_tx_tid_hw_queue_aggr(sc, atid->an, atid->tid);
else
ath_tx_tid_hw_queue_norm(sc, atid->an, atid->tid);
/* Empty? Remove */
- ATH_TXQ_LOCK(txq);
if (atid->axq_depth == 0)
ath_tx_tid_unsched(sc, atid->an, atid->tid);
/* Give the software queue time to aggregate more packets */
if (txq->axq_aggr_depth >= sc->sc_hwq_limit) {
- //ATH_TXQ_UNLOCK(txq);
break;
}
- //ATH_TXQ_UNLOCK(txq);
}
- ATH_TXQ_UNLOCK(txq);
}
/*
@@ -3500,12 +3434,14 @@ ath_addba_stop(struct ieee80211_node *ni
/* There's no need to hold the TXQ lock here */
sc->sc_addba_stop(ni, tap);
- ath_tx_cleanup(sc, an, tid);
+ ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]);
/*
* ath_tx_cleanup will resume the TID if possible, otherwise
* it'll set the cleanup flag, and it'll be unpaused once
* things have been cleaned up.
*/
+ ath_tx_cleanup(sc, an, tid);
+ ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]);
}
/*
Modified: user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx_ht.c
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx_ht.c Sun Aug 28 00:14:40 2011 (r225223)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_ath_tx_ht.c Sun Aug 28 03:05:16 2011 (r225224)
@@ -655,6 +655,8 @@ ath_tx_form_aggr(struct ath_softc *sc, s
int prev_frames = 0; /* XXX for AR5416 burst, not done here */
int prev_al = 0; /* XXX also for AR5416 burst */
+ ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]);
+
tap = ath_tx_get_tx_tid(an, tid->tid);
if (tap == NULL) {
status = ATH_AGGR_ERROR;
@@ -664,12 +666,10 @@ ath_tx_form_aggr(struct ath_softc *sc, s
h_baw = tap->txa_wnd / 2;
for (;;) {
- ATH_TXQ_LOCK(tid);
bf = TAILQ_FIRST(&tid->axq_q);
if (bf_first == NULL)
bf_first = bf;
if (bf == NULL) {
- ATH_TXQ_UNLOCK(tid);
status = ATH_AGGR_DONE;
break;
} else {
@@ -697,7 +697,6 @@ ath_tx_form_aggr(struct ath_softc *sc, s
* TX the frame individually.
*/
if (! bf->bf_state.bfs_dobaw) {
- ATH_TXQ_UNLOCK(tid);
status = ATH_AGGR_NONAGGR;
break;
}
@@ -715,7 +714,6 @@ ath_tx_form_aggr(struct ath_softc *sc, s
*/
if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
SEQNO(bf->bf_state.bfs_seqno))) {
- ATH_TXQ_UNLOCK(tid);
status = ATH_AGGR_BAW_CLOSED;
break;
}
@@ -734,7 +732,6 @@ ath_tx_form_aggr(struct ath_softc *sc, s
al_delta = ATH_AGGR_DELIM_SZ + bf->bf_state.bfs_pktlen;
if (nframes &&
(aggr_limit < (al + bpad + al_delta + prev_al))) {
- ATH_TXQ_UNLOCK(tid);
status = ATH_AGGR_LIMITED;
break;
}
@@ -744,7 +741,6 @@ ath_tx_form_aggr(struct ath_softc *sc, s
*/
if ((nframes + prev_frames) >= MIN((h_baw),
IEEE80211_AMPDU_SUBFRAME_DEFAULT)) {
- ATH_TXQ_UNLOCK(tid);
status = ATH_AGGR_LIMITED;
break;
}
@@ -757,7 +753,6 @@ ath_tx_form_aggr(struct ath_softc *sc, s
/* The TID lock is required for the BAW update */
ath_tx_addto_baw(sc, an, tid, bf);
bf->bf_state.bfs_addedbaw = 1;
- ATH_TXQ_UNLOCK(tid);
/*
* Add the now owned buffer (which isn't
Modified: user/adrian/if_ath_tx/sys/dev/ath/if_athvar.h
==============================================================================
--- user/adrian/if_ath_tx/sys/dev/ath/if_athvar.h Sun Aug 28 00:14:40 2011 (r225223)
+++ user/adrian/if_ath_tx/sys/dev/ath/if_athvar.h Sun Aug 28 03:05:16 2011 (r225224)
@@ -92,7 +92,6 @@ struct ath_buf;
*/
struct ath_tid {
TAILQ_HEAD(,ath_buf) axq_q; /* pending buffers */
- struct mtx axq_lock; /* lock on q and link */
u_int axq_depth; /* SW queue depth */
char axq_name[48]; /* lock name */
struct ath_node *an; /* pointer to parent */
More information about the svn-src-user
mailing list