+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -61,6 +61,8 @@ static int ath_tx_num_badfrms(struct ath
+ struct ath_tx_status *ts, int txok);
+ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
+ int nbad, int txok, bool update_rc);
++static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
++ int seqno);
+
+ enum {
+ MCS_HT20,
+@@ -144,18 +146,23 @@ static void ath_tx_flush_tid(struct ath_
+ struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
+ struct ath_buf *bf;
+ struct list_head bf_head;
+- INIT_LIST_HEAD(&bf_head);
++ struct ath_tx_status ts;
+
+- WARN_ON(!tid->paused);
++ INIT_LIST_HEAD(&bf_head);
+
++ memset(&ts, 0, sizeof(ts));
+ spin_lock_bh(&txq->axq_lock);
+- tid->paused = false;
+
+ while (!list_empty(&tid->buf_q)) {
+ bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
+- BUG_ON(bf_isretried(bf));
+ list_move_tail(&bf->list, &bf_head);
+- ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
++
++ if (bf_isretried(bf)) {
++ ath_tx_update_baw(sc, tid, bf->bf_seqno);
++ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
++ } else {
++ ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
++ }
+ }
+
+ spin_unlock_bh(&txq->axq_lock);
+@@ -430,7 +437,7 @@ static void ath_tx_complete_aggr(struct
+ list_move_tail(&bf->list, &bf_head);
+ }
+
+- if (!txpending) {
++ if (!txpending || (tid->state & AGGR_CLEANUP)) {
+ /*
+ * complete the acked-ones/xretried ones; update
+ * block-ack window
+@@ -451,6 +458,7 @@ static void ath_tx_complete_aggr(struct
+ !txfail, sendbar);
+ } else {
+ /* retry the un-acked ones */
++
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
+ if (bf->bf_next == NULL && bf_last->bf_stale) {
+ struct ath_buf *tbf;
+@@ -509,15 +517,12 @@ static void ath_tx_complete_aggr(struct
+ }
+
+ if (tid->state & AGGR_CLEANUP) {
++ ath_tx_flush_tid(sc, tid);
++
+ if (tid->baw_head == tid->baw_tail) {
+ tid->state &= ~AGGR_ADDBA_COMPLETE;
+ tid->state &= ~AGGR_CLEANUP;
+-
+- /* send buffered frames as singles */
+- ath_tx_flush_tid(sc, tid);
+ }
+- rcu_read_unlock();
+- return;
+ }
+
+ rcu_read_unlock();
+@@ -808,12 +813,6 @@ void ath_tx_aggr_stop(struct ath_softc *
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
+ struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
+- struct ath_tx_status ts;
+- struct ath_buf *bf;
+- struct list_head bf_head;
+-
+- memset(&ts, 0, sizeof(ts));
+- INIT_LIST_HEAD(&bf_head);
+
+ if (txtid->state & AGGR_CLEANUP)
+ return;
+@@ -823,31 +822,22 @@ void ath_tx_aggr_stop(struct ath_softc *
+ return;
+ }
+
+- /* drop all software retried frames and mark this TID */
+ spin_lock_bh(&txq->axq_lock);
+ txtid->paused = true;
+- while (!list_empty(&txtid->buf_q)) {
+- bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
+- if (!bf_isretried(bf)) {
+- /*
+- * NB: it's based on the assumption that
+- * software retried frame will always stay
+- * at the head of software queue.
+- */
+- break;
+- }
+- list_move_tail(&bf->list, &bf_head);
+- ath_tx_update_baw(sc, txtid, bf->bf_seqno);
+- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
+- }
+- spin_unlock_bh(&txq->axq_lock);
+
+- if (txtid->baw_head != txtid->baw_tail) {
++ /*
++ * If frames are still being transmitted for this TID, they will be
++ * cleaned up during tx completion. To prevent race conditions, this
++ * TID can only be reused after all in-progress subframes have been
++ * completed.
++ */
++ if (txtid->baw_head != txtid->baw_tail)
+ txtid->state |= AGGR_CLEANUP;
+- } else {
++ else
+ txtid->state &= ~AGGR_ADDBA_COMPLETE;
+- ath_tx_flush_tid(sc, txtid);
+- }
++ spin_unlock_bh(&txq->axq_lock);
++
++ ath_tx_flush_tid(sc, txtid);
+ }
+
+ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)