summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/xmit.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c463
1 files changed, 199 insertions, 264 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 859aa4ab0769..f2ade2402ce2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -61,6 +61,8 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int txok);
static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc);
+static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ int seqno);
enum {
MCS_HT20,
@@ -120,26 +122,14 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
list_add_tail(&ac->list, &txq->axq_acq);
}
-static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
-{
- struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
-
- spin_lock_bh(&txq->axq_lock);
- tid->paused++;
- spin_unlock_bh(&txq->axq_lock);
-}
-
static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
- BUG_ON(tid->paused <= 0);
- spin_lock_bh(&txq->axq_lock);
-
- tid->paused--;
+ WARN_ON(!tid->paused);
- if (tid->paused > 0)
- goto unlock;
+ spin_lock_bh(&txq->axq_lock);
+ tid->paused = false;
if (list_empty(&tid->buf_q))
goto unlock;
@@ -155,23 +145,23 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
struct ath_buf *bf;
struct list_head bf_head;
+ struct ath_tx_status ts;
+
INIT_LIST_HEAD(&bf_head);
- BUG_ON(tid->paused <= 0);
+ memset(&ts, 0, sizeof(ts));
spin_lock_bh(&txq->axq_lock);
- tid->paused--;
-
- if (tid->paused > 0) {
- spin_unlock_bh(&txq->axq_lock);
- return;
- }
-
while (!list_empty(&tid->buf_q)) {
bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
- BUG_ON(bf_isretried(bf));
list_move_tail(&bf->list, &bf_head);
- ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
+
+ if (bf_isretried(bf)) {
+ ath_tx_update_baw(sc, tid, bf->bf_seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
+ } else {
+ ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
+ }
}
spin_unlock_bh(&txq->axq_lock);
@@ -185,9 +175,9 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
- tid->tx_buf[cindex] = NULL;
+ __clear_bit(cindex, tid->tx_buf);
- while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
+ while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
INCR(tid->baw_head, ATH_TID_MAX_BUFS);
}
@@ -203,9 +193,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
-
- BUG_ON(tid->tx_buf[cindex] != NULL);
- tid->tx_buf[cindex] = bf;
+ __set_bit(cindex, tid->tx_buf);
if (index >= ((tid->baw_tail - tid->baw_head) &
(ATH_TID_MAX_BUFS - 1))) {
@@ -306,7 +294,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
tbf->bf_buf_addr = bf->bf_buf_addr;
memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
tbf->bf_state = bf->bf_state;
- tbf->bf_dmacontext = bf->bf_dmacontext;
return tbf;
}
@@ -328,6 +315,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
+ struct ieee80211_tx_rate rates[4];
+ int nframes;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -335,18 +324,44 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
tx_info = IEEE80211_SKB_CB(skb);
hw = bf->aphy->hw;
+ memcpy(rates, tx_info->control.rates, sizeof(rates));
+ nframes = bf->bf_nframes;
+
rcu_read_lock();
- /* XXX: use ieee80211_find_sta! */
- sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
+ sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
if (!sta) {
rcu_read_unlock();
+
+ INIT_LIST_HEAD(&bf_head);
+ while (bf) {
+ bf_next = bf->bf_next;
+
+ bf->bf_state.bf_type |= BUF_XRETRY;
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
+ !bf->bf_stale || bf_next != NULL)
+ list_move_tail(&bf->list, &bf_head);
+
+ ath_tx_rc_status(bf, ts, 1, 0, false);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+ 0, 0);
+
+ bf = bf_next;
+ }
return;
}
an = (struct ath_node *)sta->drv_priv;
tid = ATH_AN_2_TID(an, bf->bf_tidno);
+ /*
+ * The hardware occasionally sends a tx status for the wrong TID.
+ * In this case, the BA status cannot be considered valid and all
+ * subframes need to be retransmitted
+ */
+ if (bf->bf_tidno != ts->tid)
+ txok = false;
+
isaggr = bf_isaggr(bf);
memset(ba, 0, WME_BA_BMP_SIZE >> 3);
@@ -375,6 +390,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
txfail = txpending = 0;
bf_next = bf->bf_next;
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+
if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
/* transmit completion, subframe is
* acked by block ack */
@@ -418,7 +436,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
list_move_tail(&bf->list, &bf_head);
}
- if (!txpending) {
+ if (!txpending || (tid->state & AGGR_CLEANUP)) {
/*
* complete the acked-ones/xretried ones; update
* block-ack window
@@ -428,6 +446,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
+ memcpy(tx_info->control.rates, rates, sizeof(rates));
+ bf->bf_nframes = nframes;
ath_tx_rc_status(bf, ts, nbad, txok, true);
rc_update = false;
} else {
@@ -487,18 +507,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf = bf_next;
}
- if (tid->state & AGGR_CLEANUP) {
- if (tid->baw_head == tid->baw_tail) {
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_CLEANUP;
-
- /* send buffered frames as singles */
- ath_tx_flush_tid(sc, tid);
- }
- rcu_read_unlock();
- return;
- }
-
/* prepend un-acked frames to the beginning of the pending frame queue */
if (!list_empty(&bf_pending)) {
spin_lock_bh(&txq->axq_lock);
@@ -507,6 +515,15 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
}
+ if (tid->state & AGGR_CLEANUP) {
+ ath_tx_flush_tid(sc, tid);
+
+ if (tid->baw_head == tid->baw_tail) {
+ tid->state &= ~AGGR_ADDBA_COMPLETE;
+ tid->state &= ~AGGR_CLEANUP;
+ }
+ }
+
rcu_read_unlock();
if (needreset)
@@ -656,6 +673,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+ struct ieee80211_tx_info *tx_info;
bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
@@ -682,6 +700,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break;
}
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
+ break;
+
/* do not exceed subframe limit */
if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
status = ATH_AGGR_LIMITED;
@@ -771,17 +794,23 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
status != ATH_AGGR_BAW_CLOSED);
}
-void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn)
{
struct ath_atx_tid *txtid;
struct ath_node *an;
an = (struct ath_node *)sta->drv_priv;
txtid = ATH_AN_2_TID(an, tid);
+
+ if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
+ return -EAGAIN;
+
txtid->state |= AGGR_ADDBA_PROGRESS;
- ath_tx_pause_tid(sc, txtid);
+ txtid->paused = true;
*ssn = txtid->seq_start;
+
+ return 0;
}
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
@@ -789,12 +818,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
- struct ath_tx_status ts;
- struct ath_buf *bf;
- struct list_head bf_head;
-
- memset(&ts, 0, sizeof(ts));
- INIT_LIST_HEAD(&bf_head);
if (txtid->state & AGGR_CLEANUP)
return;
@@ -804,32 +827,22 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
return;
}
- ath_tx_pause_tid(sc, txtid);
-
- /* drop all software retried frames and mark this TID */
spin_lock_bh(&txq->axq_lock);
- while (!list_empty(&txtid->buf_q)) {
- bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
- if (!bf_isretried(bf)) {
- /*
- * NB: it's based on the assumption that
- * software retried frame will always stay
- * at the head of software queue.
- */
- break;
- }
- list_move_tail(&bf->list, &bf_head);
- ath_tx_update_baw(sc, txtid, bf->bf_seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- }
- spin_unlock_bh(&txq->axq_lock);
+ txtid->paused = true;
- if (txtid->baw_head != txtid->baw_tail) {
+ /*
+ * If frames are still being transmitted for this TID, they will be
+ * cleaned up during tx completion. To prevent race conditions, this
+ * TID can only be reused after all in-progress subframes have been
+ * completed.
+ */
+ if (txtid->baw_head != txtid->baw_tail)
txtid->state |= AGGR_CLEANUP;
- } else {
+ else
txtid->state &= ~AGGR_ADDBA_COMPLETE;
- ath_tx_flush_tid(sc, txtid);
- }
+ spin_unlock_bh(&txq->axq_lock);
+
+ ath_tx_flush_tid(sc, txtid);
}
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
@@ -849,20 +862,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
}
}
-bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
-{
- struct ath_atx_tid *txtid;
-
- if (!(sc->sc_flags & SC_OP_TXAGGR))
- return false;
-
- txtid = ATH_AN_2_TID(an, tidno);
-
- if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
- return true;
- return false;
-}
-
/********************/
/* Queue Management */
/********************/
@@ -941,6 +940,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
if (!ATH_TXQ_SETUP(sc, qnum)) {
struct ath_txq *txq = &sc->tx.txq[qnum];
+ txq->axq_class = subtype;
txq->axq_qnum = qnum;
txq->axq_link = NULL;
INIT_LIST_HEAD(&txq->axq_q);
@@ -958,58 +958,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
return &sc->tx.txq[qnum];
}
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
-{
- int qnum;
-
- switch (qtype) {
- case ATH9K_TX_QUEUE_DATA:
- if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "HAL AC %u out of range, max %zu!\n",
- haltype, ARRAY_SIZE(sc->tx.hwq_map));
- return -1;
- }
- qnum = sc->tx.hwq_map[haltype];
- break;
- case ATH9K_TX_QUEUE_BEACON:
- qnum = sc->beacon.beaconq;
- break;
- case ATH9K_TX_QUEUE_CAB:
- qnum = sc->beacon.cabq->axq_qnum;
- break;
- default:
- qnum = -1;
- }
- return qnum;
-}
-
-struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
-{
- struct ath_txq *txq = NULL;
- u16 skb_queue = skb_get_queue_mapping(skb);
- int qnum;
-
- qnum = ath_get_hal_qnum(skb_queue, sc);
- txq = &sc->tx.txq[qnum];
-
- spin_lock_bh(&txq->axq_lock);
-
- if (txq->axq_depth >= (ATH_TXBUF - 20)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
- "TX queue: %d is full, depth: %d\n",
- qnum, txq->axq_depth);
- ath_mac80211_stop_queue(sc, skb_queue);
- txq->stopped = 1;
- spin_unlock_bh(&txq->axq_lock);
- return NULL;
- }
-
- spin_unlock_bh(&txq->axq_lock);
-
- return txq;
-}
-
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *qinfo)
{
@@ -1141,15 +1089,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
- /* flush any pending frames if aggregation is enabled */
- if (sc->sc_flags & SC_OP_TXAGGR) {
- if (!retry_tx) {
- spin_lock_bh(&txq->axq_lock);
- ath_txq_drain_pending_buffers(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
- }
- }
-
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
spin_lock_bh(&txq->axq_lock);
while (!list_empty(&txq->txq_fifo_pending)) {
@@ -1170,6 +1109,15 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
}
spin_unlock_bh(&txq->axq_lock);
}
+
+ /* flush any pending frames if aggregation is enabled */
+ if (sc->sc_flags & SC_OP_TXAGGR) {
+ if (!retry_tx) {
+ spin_lock_bh(&txq->axq_lock);
+ ath_txq_drain_pending_buffers(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+ }
}
void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1201,7 +1149,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
"Failed to stop TX DMA. Resetting hardware!\n");
spin_lock_bh(&sc->sc_resetlock);
- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
+ r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
if (r)
ath_print(common, ATH_DBG_FATAL,
"Unable to reset hardware; reset status %d\n",
@@ -1445,22 +1393,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
return htype;
}
-static int get_hw_crypto_keytype(struct sk_buff *skb)
-{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-
- if (tx_info->control.hw_key) {
- if (tx_info->control.hw_key->alg == ALG_WEP)
- return ATH9K_KEY_TYPE_WEP;
- else if (tx_info->control.hw_key->alg == ALG_TKIP)
- return ATH9K_KEY_TYPE_TKIP;
- else if (tx_info->control.hw_key->alg == ALG_CCMP)
- return ATH9K_KEY_TYPE_AES;
- }
-
- return ATH9K_KEY_TYPE_CLEAR;
-}
-
static void assign_aggr_tid_seqno(struct sk_buff *skb,
struct ath_buf *bf)
{
@@ -1688,15 +1620,18 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf)) {
+ if (!txctl->paprd && conf_is_ht(&hw->conf)) {
bf->bf_state.bf_type |= BUF_HT;
if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
use_ldpc = true;
}
+ bf->bf_state.bfs_paprd = txctl->paprd;
+ if (txctl->paprd)
+ bf->bf_state.bfs_paprd_timestamp = jiffies;
bf->bf_flags = setup_tx_flags(skb, use_ldpc);
- bf->bf_keytype = get_hw_crypto_keytype(skb);
+ bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
bf->bf_frmlen += tx_info->control.hw_key->icv_len;
bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
@@ -1710,24 +1645,16 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_mpdu = skb;
- bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
bf->bf_mpdu = NULL;
+ bf->bf_buf_addr = 0;
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
"dma_mapping_error() on TX\n");
return -ENOMEM;
}
- bf->bf_buf_addr = bf->bf_dmacontext;
-
- /* tag if this is a nullfunc frame to enable PS when AP acks it */
- if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
- bf->bf_isnullfunc = true;
- sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
- } else
- bf->bf_isnullfunc = false;
-
bf->bf_tx_aborted = false;
return 0;
@@ -1768,6 +1695,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
bf->bf_buf_addr,
txctl->txq->axq_qnum);
+ if (bf->bf_state.bfs_paprd)
+ ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
+
spin_lock_bh(&txctl->txq->axq_lock);
if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
@@ -1809,8 +1739,9 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_txq *txq = txctl->txq;
struct ath_buf *bf;
- int r;
+ int q, r;
bf = ath_tx_get_buffer(sc);
if (!bf) {
@@ -1820,8 +1751,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
r = ath_tx_setup_buffer(hw, bf, skb, txctl);
if (unlikely(r)) {
- struct ath_txq *txq = txctl->txq;
-
ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
/* upon ath_tx_processq() this TX queue will be resumed, we
@@ -1829,7 +1758,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
* we will at least have to run TX completionon one buffer
* on the queue */
spin_lock_bh(&txq->axq_lock);
- if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
+ if (!txq->stopped && txq->axq_depth > 1) {
ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
txq->stopped = 1;
}
@@ -1840,6 +1769,17 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
return r;
}
+ q = skb_get_queue_mapping(skb);
+ if (q >= 4)
+ q = 0;
+
+ spin_lock_bh(&txq->axq_lock);
+ if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
+ ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
+ txq->stopped = 1;
+ }
+ spin_unlock_bh(&txq->axq_lock);
+
ath_tx_start_dma(sc, bf, txctl);
return 0;
@@ -1909,7 +1849,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
- int padpos, padsize;
+ int q, padpos, padsize;
ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
@@ -1948,8 +1888,16 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
ath9k_tx_status(hw, skb);
- else
+ else {
+ q = skb_get_queue_mapping(skb);
+ if (q >= 4)
+ q = 0;
+
+ if (--sc->tx.pending_frames[q] < 0)
+ sc->tx.pending_frames[q] = 0;
+
ieee80211_tx_status(hw, skb);
+ }
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1970,9 +1918,24 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
tx_flags |= ATH_TX_XRETRY;
}
- dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
- ath_tx_complete(sc, skb, bf->aphy, tx_flags);
- ath_debug_stat_tx(sc, txq, bf, ts);
+ dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
+ bf->bf_buf_addr = 0;
+
+ if (bf->bf_state.bfs_paprd) {
+ if (time_after(jiffies,
+ bf->bf_state.bfs_paprd_timestamp +
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
+ dev_kfree_skb_any(skb);
+ else
+ complete(&sc->paprd_complete);
+ } else {
+ ath_debug_stat_tx(sc, txq, bf, ts);
+ ath_tx_complete(sc, skb, bf->aphy, tx_flags);
+ }
+ /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
+ * accidentally reference it later.
+ */
+ bf->bf_mpdu = NULL;
/*
* Return the list of ath_buf of this mpdu to free queue
@@ -2028,9 +1991,15 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
if (ts->ts_status & ATH9K_TXERR_FILT)
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+ BUG_ON(nbad > bf->bf_nframes);
+
+ tx_info->status.ampdu_len = bf->bf_nframes;
+ tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
+ }
+
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
if (ieee80211_is_data(hdr->frame_control)) {
@@ -2040,8 +2009,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
(ts->ts_status & ATH9K_TXERR_FIFO))
tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
- tx_info->status.ampdu_len = bf->bf_nframes;
- tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
}
}
@@ -2050,21 +2017,21 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
tx_info->status.rates[i].idx = -1;
}
- tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
+ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
{
int qnum;
+ qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
+ if (qnum == -1)
+ return;
+
spin_lock_bh(&txq->axq_lock);
- if (txq->stopped &&
- sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
- qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
- if (qnum != -1) {
- ath_mac80211_start_queue(sc, qnum);
+ if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
+ if (ath_mac80211_start_queue(sc, qnum))
txq->stopped = 0;
- }
}
spin_unlock_bh(&txq->axq_lock);
}
@@ -2124,18 +2091,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
}
/*
- * We now know the nullfunc frame has been ACKed so we
- * can disable RX.
- */
- if (bf->bf_isnullfunc &&
- (ts.ts_status & ATH9K_TX_ACKED)) {
- if ((sc->ps_flags & PS_ENABLED))
- ath9k_enable_ps(sc);
- else
- sc->ps_flags |= PS_NULLFUNC_COMPLETED;
- }
-
- /*
* Remove ath_buf's of the same transmit unit from txq,
* however leave the last descriptor back as the holding
* descriptor for hw.
@@ -2161,10 +2116,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* This frame is sent out as a single frame.
* Use hardware retry status for this frame.
*/
- bf->bf_retries = ts.ts_longretry;
if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &ts, 0, txok, true);
+ ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
}
if (bf_isampdu(bf))
@@ -2209,7 +2163,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
"tx hung, resetting the chip\n");
ath9k_ps_wakeup(sc);
- ath_reset(sc, false);
+ ath_reset(sc, true);
ath9k_ps_restore(sc);
}
@@ -2280,10 +2234,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
txok = !(txs.ts_status & ATH9K_TXERR_MASK);
if (!bf_isampdu(bf)) {
- bf->bf_retries = txs.ts_longretry;
if (txs.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &txs, 0, txok, true);
+ ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
}
if (bf_isampdu(bf))
@@ -2424,62 +2377,44 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
for (acno = 0, ac = &an->ac[acno];
acno < WME_NUM_AC; acno++, ac++) {
ac->sched = false;
+ ac->qnum = sc->tx.hwq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
-
- switch (acno) {
- case WME_AC_BE:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- break;
- case WME_AC_BK:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
- break;
- case WME_AC_VI:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
- break;
- case WME_AC_VO:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
- break;
- }
}
}
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
{
- int i;
- struct ath_atx_ac *ac, *ac_tmp;
- struct ath_atx_tid *tid, *tid_tmp;
+ struct ath_atx_ac *ac;
+ struct ath_atx_tid *tid;
struct ath_txq *txq;
+ int i, tidno;
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i)) {
- txq = &sc->tx.txq[i];
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < WME_NUM_TID; tidno++, tid++) {
+ i = tid->ac->qnum;
- spin_lock_bh(&txq->axq_lock);
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
- list_for_each_entry_safe(ac,
- ac_tmp, &txq->axq_acq, list) {
- tid = list_first_entry(&ac->tid_q,
- struct ath_atx_tid, list);
- if (tid && tid->an != an)
- continue;
- list_del(&ac->list);
- ac->sched = false;
-
- list_for_each_entry_safe(tid,
- tid_tmp, &ac->tid_q, list) {
- list_del(&tid->list);
- tid->sched = false;
- ath_tid_drain(sc, txq, tid);
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_CLEANUP;
- }
- }
+ txq = &sc->tx.txq[i];
+ ac = tid->ac;
- spin_unlock_bh(&txq->axq_lock);
+ spin_lock_bh(&txq->axq_lock);
+
+ if (tid->sched) {
+ list_del(&tid->list);
+ tid->sched = false;
}
+
+ if (ac->sched) {
+ list_del(&ac->list);
+ tid->ac->sched = false;
+ }
+
+ ath_tid_drain(sc, txq, tid);
+ tid->state &= ~AGGR_ADDBA_COMPLETE;
+ tid->state &= ~AGGR_CLEANUP;
+
+ spin_unlock_bh(&txq->axq_lock);
}
}