1 --- a/drivers/net/wireless/ath/ath9k/xmit.c
2 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
3 @@ -169,13 +169,11 @@ static void ath_tx_flush_tid(struct ath_
4 INIT_LIST_HEAD(&bf_head);
6 memset(&ts, 0, sizeof(ts));
7 - spin_lock_bh(&txq->axq_lock);
9 while ((skb = __skb_dequeue(&tid->buf_q))) {
10 fi = get_frame_info(skb);
13 - spin_unlock_bh(&txq->axq_lock);
14 if (bf && fi->retries) {
15 list_add_tail(&bf->list, &bf_head);
16 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
17 @@ -184,7 +182,6 @@ static void ath_tx_flush_tid(struct ath_
19 ath_tx_send_normal(sc, txq, NULL, skb);
21 - spin_lock_bh(&txq->axq_lock);
24 if (tid->baw_head == tid->baw_tail) {
25 @@ -192,8 +189,6 @@ static void ath_tx_flush_tid(struct ath_
26 tid->state &= ~AGGR_CLEANUP;
29 - spin_unlock_bh(&txq->axq_lock);
32 ath_send_bar(tid, tid->seq_start);
34 @@ -254,9 +249,7 @@ static void ath_tid_drain(struct ath_sof
38 - spin_unlock(&txq->axq_lock);
39 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
40 - spin_lock(&txq->axq_lock);
44 @@ -265,9 +258,7 @@ static void ath_tid_drain(struct ath_sof
46 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
48 - spin_unlock(&txq->axq_lock);
49 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
50 - spin_lock(&txq->axq_lock);
53 tid->seq_next = tid->seq_start;
54 @@ -525,9 +516,7 @@ static void ath_tx_complete_aggr(struct
55 * complete the acked-ones/xretried ones; update
58 - spin_lock_bh(&txq->axq_lock);
59 ath_tx_update_baw(sc, tid, seqno);
60 - spin_unlock_bh(&txq->axq_lock);
62 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
63 memcpy(tx_info->control.rates, rates, sizeof(rates));
64 @@ -550,9 +539,7 @@ static void ath_tx_complete_aggr(struct
68 - spin_lock_bh(&txq->axq_lock);
69 ath_tx_update_baw(sc, tid, seqno);
70 - spin_unlock_bh(&txq->axq_lock);
72 ath_tx_complete_buf(sc, bf, txq,
74 @@ -582,7 +569,6 @@ static void ath_tx_complete_aggr(struct
76 ieee80211_sta_set_buffered(sta, tid->tidno, true);
78 - spin_lock_bh(&txq->axq_lock);
79 skb_queue_splice(&bf_pending, &tid->buf_q);
81 ath_tx_queue_tid(txq, tid);
82 @@ -590,7 +576,6 @@ static void ath_tx_complete_aggr(struct
83 if (ts->ts_status & ATH9K_TXERR_FILT)
84 tid->ac->clear_ps_filter = true;
86 - spin_unlock_bh(&txq->axq_lock);
89 if (tid->state & AGGR_CLEANUP)
90 @@ -1190,9 +1175,9 @@ void ath_tx_aggr_stop(struct ath_softc *
91 txtid->state |= AGGR_CLEANUP;
93 txtid->state &= ~AGGR_ADDBA_COMPLETE;
94 - spin_unlock_bh(&txq->axq_lock);
96 ath_tx_flush_tid(sc, txtid);
97 + spin_unlock_bh(&txq->axq_lock);
100 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
101 @@ -1434,8 +1419,6 @@ static bool bf_is_ampdu_not_probing(stru
103 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
104 struct list_head *list, bool retry_tx)
105 - __releases(txq->axq_lock)
106 - __acquires(txq->axq_lock)
108 struct ath_buf *bf, *lastbf;
109 struct list_head bf_head;
110 @@ -1462,13 +1445,11 @@ static void ath_drain_txq_list(struct at
111 if (bf_is_ampdu_not_probing(bf))
112 txq->axq_ampdu_depth--;
114 - spin_unlock_bh(&txq->axq_lock);
116 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
119 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
120 - spin_lock_bh(&txq->axq_lock);
124 @@ -1847,8 +1828,6 @@ static void ath_tx_start_dma(struct ath_
125 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
128 - spin_lock_bh(&txctl->txq->axq_lock);
130 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
132 * Try aggregation if it's a unicast data frame
133 @@ -1858,7 +1837,7 @@ static void ath_tx_start_dma(struct ath_
135 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
140 bf->bf_state.bfs_paprd = txctl->paprd;
142 @@ -1867,9 +1846,6 @@ static void ath_tx_start_dma(struct ath_
144 ath_tx_send_normal(sc, txctl->txq, tid, skb);
148 - spin_unlock_bh(&txctl->txq->axq_lock);
151 /* Upon failure caller should free skb */
152 @@ -1949,15 +1925,19 @@ int ath_tx_start(struct ieee80211_hw *hw
155 q = skb_get_queue_mapping(skb);
157 spin_lock_bh(&txq->axq_lock);
159 if (txq == sc->tx.txq_map[q] &&
160 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
161 ieee80211_stop_queue(sc->hw, q);
164 - spin_unlock_bh(&txq->axq_lock);
166 ath_tx_start_dma(sc, skb, txctl, tid);
168 + spin_unlock_bh(&txq->axq_lock);
173 @@ -2003,7 +1983,6 @@ static void ath_tx_complete(struct ath_s
175 q = skb_get_queue_mapping(skb);
176 if (txq == sc->tx.txq_map[q]) {
177 - spin_lock_bh(&txq->axq_lock);
178 if (WARN_ON(--txq->pending_frames < 0))
179 txq->pending_frames = 0;
181 @@ -2011,7 +1990,6 @@ static void ath_tx_complete(struct ath_s
182 ieee80211_wake_queue(sc->hw, q);
185 - spin_unlock_bh(&txq->axq_lock);
188 ieee80211_tx_status(hw, skb);
189 @@ -2117,8 +2095,6 @@ static void ath_tx_rc_status(struct ath_
190 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
191 struct ath_tx_status *ts, struct ath_buf *bf,
192 struct list_head *bf_head)
193 - __releases(txq->axq_lock)
194 - __acquires(txq->axq_lock)
198 @@ -2128,16 +2104,12 @@ static void ath_tx_process_buffer(struct
199 if (bf_is_ampdu_not_probing(bf))
200 txq->axq_ampdu_depth--;
202 - spin_unlock_bh(&txq->axq_lock);
204 if (!bf_isampdu(bf)) {
205 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
206 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
208 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
210 - spin_lock_bh(&txq->axq_lock);
212 if (sc->sc_flags & SC_OP_TXAGGR)
213 ath_txq_schedule(sc, txq);
215 @@ -2281,6 +2253,7 @@ void ath_tx_edma_tasklet(struct ath_soft
216 struct list_head bf_head;
219 + spin_lock_bh(&txq->axq_lock);
221 if (work_pending(&sc->hw_reset_work))
223 @@ -2300,12 +2273,8 @@ void ath_tx_edma_tasklet(struct ath_soft
225 txq = &sc->tx.txq[ts.qid];
227 - spin_lock_bh(&txq->axq_lock);
229 - if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
230 - spin_unlock_bh(&txq->axq_lock);
233 + if (list_empty(&txq->txq_fifo[txq->txq_tailidx]))
236 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
237 struct ath_buf, list);
238 @@ -2329,8 +2298,8 @@ void ath_tx_edma_tasklet(struct ath_soft
241 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
242 - spin_unlock_bh(&txq->axq_lock);
244 + spin_unlock_bh(&txq->axq_lock);