1 Index: madwifi-trunk-r3280/ath/if_ath.c
2 ===================================================================
3 --- madwifi-trunk-r3280.orig/ath/if_ath.c 2008-01-28 17:29:22.989895792 +0100
4 +++ madwifi-trunk-r3280/ath/if_ath.c 2008-01-28 20:00:09.319637124 +0100
6 struct sk_buff *, int, int, u_int64_t);
7 static void ath_setdefantenna(struct ath_softc *, u_int);
8 static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
9 -static void ath_rx_tasklet(TQUEUE_ARG);
10 +static int ath_rx_poll(struct net_device *dev, int *budget);
11 static int ath_hardstart(struct sk_buff *, struct net_device *);
12 static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
13 #ifdef ATH_SUPERG_COMP
15 ATH_TXBUF_LOCK_INIT(sc);
16 ATH_RXBUF_LOCK_INIT(sc);
18 - ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev);
19 ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev);
20 ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev);
21 ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet, dev);
23 dev->set_mac_address = ath_set_mac_address;
24 dev->change_mtu = ath_change_mtu;
25 dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
26 + dev->poll = ath_rx_poll;
28 #ifdef USE_HEADERLEN_RESV
29 dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
32 (status & HAL_INT_GLOBAL) ? " HAL_INT_GLOBAL" : ""
35 + sc->sc_isr = status;
36 status &= sc->sc_imask; /* discard unasked for bits */
37 /* As soon as we know we have a real interrupt we intend to service,
38 * we will check to see if we need an initial hardware TSF reading.
39 @@ -2263,7 +2265,14 @@
41 if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
42 ath_uapsd_processtriggers(sc, hw_tsf);
43 - ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
44 + sc->sc_isr &= ~HAL_INT_RX;
45 + if (netif_rx_schedule_prep(dev)) {
46 +#ifndef ATH_PRECISE_TSF
47 + sc->sc_imask &= ~HAL_INT_RX;
48 + ath_hal_intrset(ah, sc->sc_imask);
50 + __netif_rx_schedule(dev);
53 if (status & HAL_INT_TX) {
54 #ifdef ATH_SUPERG_DYNTURBO
55 @@ -2289,6 +2298,11 @@
59 + /* disable transmit interrupt */
60 + sc->sc_isr &= ~HAL_INT_TX;
61 + ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
62 + sc->sc_imask &= ~HAL_INT_TX;
64 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
66 if (status & HAL_INT_BMISS) {
67 @@ -4011,10 +4025,10 @@
69 * XXX Using in_softirq is not right since we might
70 * be called from other soft irq contexts than
75 - tasklet_disable(&sc->sc_rxtq);
76 + netif_poll_disable(dev);
77 netif_stop_queue(dev);
81 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "End\n");
82 netif_wake_queue(dev);
83 if (!in_softirq()) /* NB: see above */
84 - tasklet_enable(&sc->sc_rxtq);
85 + netif_poll_enable(dev);
89 @@ -6329,13 +6343,12 @@
90 sc->sc_rxotherant = 0;
94 -ath_rx_tasklet(TQUEUE_ARG data)
96 +ath_rx_poll(struct net_device *dev, int *budget)
98 #define PA2DESC(_sc, _pa) \
99 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
100 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
101 - struct net_device *dev = (struct net_device *)data;
103 struct ath_softc *sc = dev->priv;
104 struct ieee80211com *ic = &sc->sc_ic;
105 @@ -6347,8 +6360,11 @@
109 + u_int processed = 0, early_stop = 0;
110 + u_int rx_limit = dev->quota;
112 DPRINTF(sc, ATH_DEBUG_RX_PROC, "invoked\n");
115 bf = STAILQ_FIRST(&sc->sc_rxbuf);
116 if (bf == NULL) { /* XXX ??? can this happen */
117 @@ -6372,6 +6388,13 @@
118 /* NB: never process the self-linked entry at the end */
123 + if (rx_limit-- < 0) {
130 EPRINTF(sc, "Dropping; buffer contains NULL skbuff.\n");
131 @@ -6419,6 +6442,7 @@
132 sc->sc_stats.ast_rx_phyerr++;
133 phyerr = rs->rs_phyerr & 0x1f;
134 sc->sc_stats.ast_rx_phy[phyerr]++;
137 if (rs->rs_status & HAL_RXERR_DECRYPT) {
139 @@ -6614,9 +6638,35 @@
140 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
141 ATH_RXBUF_UNLOCK_IRQ(sc);
142 } while (ath_rxbuf_init(sc, bf) == 0);
144 + unsigned long flags;
145 + /* Check if more data is received while we were
146 + * processing the descriptor chain.
148 +#ifndef ATH_PRECISE_TSF
149 + local_irq_save(flags);
150 + if (sc->sc_isr & HAL_INT_RX) {
151 + u_int64_t hw_tsf = ath_hal_gettsf64(ah);
152 + sc->sc_isr &= ~HAL_INT_RX;
153 + local_irq_restore(flags);
154 + ath_uapsd_processtriggers(sc, hw_tsf);
155 + goto process_rx_again;
158 + netif_rx_complete(dev);
160 +#ifndef ATH_PRECISE_TSF
161 + sc->sc_imask |= HAL_INT_RX;
162 + ath_hal_intrset(ah, sc->sc_imask);
163 + local_irq_restore(flags);
167 + *budget -= processed;
169 /* rx signal state monitoring */
170 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
175 @@ -8267,12 +8317,24 @@
177 struct net_device *dev = (struct net_device *)data;
178 struct ath_softc *sc = dev->priv;
179 + unsigned long flags;
182 if (txqactive(sc->sc_ah, 0))
183 ath_tx_processq(sc, &sc->sc_txq[0]);
184 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
185 ath_tx_processq(sc, sc->sc_cabq);
187 + local_irq_save(flags);
188 + if (sc->sc_isr & HAL_INT_TX) {
189 + sc->sc_isr &= ~HAL_INT_TX;
190 + local_irq_restore(flags);
191 + goto process_tx_again;
193 + sc->sc_imask |= HAL_INT_TX;
194 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
195 + local_irq_restore(flags);
197 netif_wake_queue(dev);
200 @@ -8288,7 +8350,9 @@
202 struct net_device *dev = (struct net_device *)data;
203 struct ath_softc *sc = dev->priv;
204 + unsigned long flags;
208 * Process each active queue.
210 @@ -8309,6 +8373,16 @@
211 if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
212 ath_tx_processq(sc, sc->sc_uapsdq);
214 + local_irq_save(flags);
215 + if (sc->sc_isr & HAL_INT_TX) {
216 + sc->sc_isr &= ~HAL_INT_TX;
217 + local_irq_restore(flags);
218 + goto process_tx_again;
220 + sc->sc_imask |= HAL_INT_TX;
221 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
222 + local_irq_restore(flags);
224 netif_wake_queue(dev);
227 @@ -8324,13 +8398,25 @@
228 struct net_device *dev = (struct net_device *)data;
229 struct ath_softc *sc = dev->priv;
231 + unsigned long flags;
233 /* Process each active queue. This includes sc_cabq, sc_xrtq and
236 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
237 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
238 ath_tx_processq(sc, &sc->sc_txq[i]);
240 + local_irq_save(flags);
241 + if (sc->sc_isr & HAL_INT_TX) {
242 + sc->sc_isr &= ~HAL_INT_TX;
243 + local_irq_restore(flags);
244 + goto process_tx_again;
246 + sc->sc_imask |= HAL_INT_TX;
247 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
248 + local_irq_restore(flags);
250 netif_wake_queue(dev);
253 @@ -8405,6 +8491,7 @@
254 ath_draintxq(struct ath_softc *sc)
256 struct ath_hal *ah = sc->sc_ah;
260 /* XXX return value */
261 @@ -10261,9 +10348,9 @@
263 if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
264 /* NB: the rx buffers may need to be reallocated */
265 - tasklet_disable(&sc->sc_rxtq);
266 + netif_poll_disable(dev);
267 error = ath_reset(dev);
268 - tasklet_enable(&sc->sc_rxtq);
269 + netif_poll_enable(dev);
273 Index: madwifi-trunk-r3280/ath/if_athvar.h
274 ===================================================================
275 --- madwifi-trunk-r3280.orig/ath/if_athvar.h 2008-01-28 17:29:22.997896245 +0100
276 +++ madwifi-trunk-r3280/ath/if_athvar.h 2008-01-28 17:45:06.903383316 +0100
279 #include <linux/list.h>
281 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
282 +#define irqs_disabled() 0
286 * Deduce if tasklets are available. If not then
287 * fall back to using the immediate work queue.
289 struct ath_buf *sc_rxbufcur; /* current rx buffer */
290 u_int32_t *sc_rxlink; /* link ptr in last RX desc */
291 spinlock_t sc_rxbuflock;
292 - struct ATH_TQ_STRUCT sc_rxtq; /* rx intr tasklet */
293 struct ATH_TQ_STRUCT sc_rxorntq; /* rxorn intr tasklet */
294 u_int8_t sc_defant; /* current default antenna */
295 u_int8_t sc_rxotherant; /* RXs on non-default antenna */
297 u_int sc_txintrperiod; /* tx interrupt batching */
298 struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
299 struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
300 + HAL_INT sc_isr; /* unmasked ISR state */
301 struct ATH_TQ_STRUCT sc_txtq; /* tx intr tasklet */
302 u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
303 struct ath_descdma sc_bdma; /* beacon descriptors */
305 #define ATH_TXBUF_LOCK_CHECK(_sc)
308 +#define ATH_DISABLE_INTR local_irq_disable
309 +#define ATH_ENABLE_INTR local_irq_enable
311 #define ATH_RXBUF_LOCK_INIT(_sc) spin_lock_init(&(_sc)->sc_rxbuflock)
312 #define ATH_RXBUF_LOCK_DESTROY(_sc)
313 Index: madwifi-trunk-r3280/net80211/ieee80211_input.c
314 ===================================================================
315 --- madwifi-trunk-r3280.orig/net80211/ieee80211_input.c 2008-01-28 17:29:23.005896702 +0100
316 +++ madwifi-trunk-r3280/net80211/ieee80211_input.c 2008-01-28 19:52:50.586635164 +0100
317 @@ -1197,7 +1197,7 @@
318 /* attach vlan tag */
319 struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
320 if (vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan) == NET_RX_DROP) {
321 - /* If netif_rx dropped the packet because
322 + /* If netif_receive_skb dropped the packet because
323 * device was too busy */
324 if (ni_tmp != NULL) {
325 /* node reference was leaked */
326 @@ -1208,8 +1208,8 @@
327 skb = NULL; /* SKB is no longer ours */
329 struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
330 - if (netif_rx(skb) == NET_RX_DROP) {
331 - /* If netif_rx dropped the packet because
332 + if (netif_receive_skb(skb) == NET_RX_DROP) {
333 + /* If netif_receive_skb dropped the packet because
334 * device was too busy */
335 if (ni_tmp != NULL) {
336 /* node reference was leaked */
337 @@ -2314,8 +2314,8 @@
338 skb1->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */
340 ni_tmp = SKB_CB(skb1)->ni;
341 - if (netif_rx(skb1) == NET_RX_DROP) {
342 - /* If netif_rx dropped the packet because
343 + if (netif_receive_skb(skb1) == NET_RX_DROP) {
344 + /* If netif_receive_skb dropped the packet because
345 * device was too busy */
346 if (ni_tmp != NULL) {
347 /* node reference was leaked */
348 Index: madwifi-trunk-r3280/net80211/ieee80211_monitor.c
349 ===================================================================
350 --- madwifi-trunk-r3280.orig/net80211/ieee80211_monitor.c 2008-01-28 17:29:23.013897159 +0100
351 +++ madwifi-trunk-r3280/net80211/ieee80211_monitor.c 2008-01-28 17:29:26.430091834 +0100
354 __constant_htons(0x0019); /* ETH_P_80211_RAW */
356 - if (netif_rx(skb1) == NET_RX_DROP) {
357 - /* If netif_rx dropped the packet because
358 + if (netif_receive_skb(skb1) == NET_RX_DROP) {
359 + /* If netif_receive_skb dropped the packet because
360 * device was too busy, reclaim the ref. in
362 if (SKB_CB(skb1)->ni != NULL)
363 Index: madwifi-trunk-r3280/net80211/ieee80211_skb.c
364 ===================================================================
365 --- madwifi-trunk-r3280.orig/net80211/ieee80211_skb.c 2008-01-28 17:29:23.017897384 +0100
366 +++ madwifi-trunk-r3280/net80211/ieee80211_skb.c 2008-01-28 17:29:26.446092748 +0100
368 #undef dev_queue_xmit
370 #undef kfree_skb_fast
372 +#undef netif_receive_skb
380 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line) {
381 - return netif_rx(untrack_skb(skb, 0, func, line, __func__, __LINE__));
382 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line) {
383 + return netif_receive_skb(untrack_skb(skb, 0, func, line, __func__, __LINE__));
386 struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
390 EXPORT_SYMBOL(vlan_hwaccel_receive_skb_debug);
391 -EXPORT_SYMBOL(netif_rx_debug);
392 +EXPORT_SYMBOL(netif_receive_skb_debug);
393 EXPORT_SYMBOL(alloc_skb_debug);
394 EXPORT_SYMBOL(dev_alloc_skb_debug);
395 EXPORT_SYMBOL(skb_clone_debug);
396 Index: madwifi-trunk-r3280/net80211/ieee80211_skb.h
397 ===================================================================
398 --- madwifi-trunk-r3280.orig/net80211/ieee80211_skb.h 2008-01-28 17:29:23.029898072 +0100
399 +++ madwifi-trunk-r3280/net80211/ieee80211_skb.h 2008-01-28 17:29:26.458093432 +0100
401 int vlan_hwaccel_receive_skb_debug(struct sk_buff *skb,
402 struct vlan_group *grp, unsigned short vlan_tag,
403 const char* func, int line);
404 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line);
405 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line);
406 struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
407 const char *func, int line);
408 struct sk_buff * dev_alloc_skb_debug(unsigned int length,
410 #undef dev_queue_xmit
412 #undef kfree_skb_fast
414 +#undef netif_receive_skb
419 skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
420 #define vlan_hwaccel_receive_skb(_skb, _grp, _tag) \
421 vlan_hwaccel_receive_skb_debug(_skb, _grp, _tag, __func__, __LINE__)
422 -#define netif_rx(_skb) \
423 - netif_rx_debug(_skb, __func__, __LINE__)
424 +#define netif_receive_skb(_skb) \
425 + netif_receive_skb_debug(_skb, __func__, __LINE__)
426 #define alloc_skb(_length, _gfp_mask) \
427 alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
428 #define dev_alloc_skb(_length) \