enable TCP ECN by default (see #3001 for more information)
[openwrt.git] / package / madwifi / patches / 300-napi_polling.patch
1 --- a/ath/if_ath.c
2 +++ b/ath/if_ath.c
3 @@ -184,7 +184,11 @@
4 struct sk_buff *, int, int, u_int64_t);
5 static void ath_setdefantenna(struct ath_softc *, u_int);
6 static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
7 -static void ath_rx_tasklet(TQUEUE_ARG);
8 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
9 +static int ath_rx_poll(struct napi_struct *napi, int budget);
10 +#else
11 +static int ath_rx_poll(struct net_device *dev, int *budget);
12 +#endif
13 static int ath_hardstart(struct sk_buff *, struct net_device *);
14 static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
15 #ifdef ATH_SUPERG_COMP
16 @@ -376,6 +380,9 @@
17 u_int32_t new_clamped_maxtxpower);
18 static u_int32_t ath_get_real_maxtxpower(struct ath_softc *sc);
19
20 +static void ath_poll_disable(struct net_device *dev);
21 +static void ath_poll_enable(struct net_device *dev);
22 +
23 /* calibrate every 30 secs in steady state but check every second at first. */
24 static int ath_calinterval = ATH_SHORT_CALINTERVAL;
25 static int ath_countrycode = CTRY_DEFAULT; /* country code */
26 @@ -547,7 +554,6 @@
27
28 atomic_set(&sc->sc_txbuf_counter, 0);
29
30 - ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev);
31 ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev);
32 ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev);
33 ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet, dev);
34 @@ -821,6 +827,12 @@
35 dev->set_mac_address = ath_set_mac_address;
36 dev->change_mtu = ath_change_mtu;
37 dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
38 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
39 + netif_napi_add(dev, &sc->sc_napi, ath_rx_poll, 64);
40 +#else
41 + dev->poll = ath_rx_poll;
42 + dev->weight = 64;
43 +#endif
44 #ifdef USE_HEADERLEN_RESV
45 dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
46 sizeof(struct llc) +
47 @@ -2220,6 +2232,7 @@
48 (status & HAL_INT_GLOBAL) ? " HAL_INT_GLOBAL" : ""
49 );
50
51 + sc->sc_isr = status;
52 status &= sc->sc_imask; /* discard unasked for bits */
53 /* As soon as we know we have a real interrupt we intend to service,
54 * we will check to see if we need an initial hardware TSF reading.
55 @@ -2277,7 +2290,23 @@
56 }
57 if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
58 ath_uapsd_processtriggers(sc, hw_tsf);
59 - ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
60 + sc->sc_isr &= ~HAL_INT_RX;
61 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
62 + if (netif_rx_schedule_prep(dev, &sc->sc_napi))
63 +#else
64 + if (netif_rx_schedule_prep(dev))
65 +#endif
66 + {
67 +#ifndef ATH_PRECISE_TSF
68 + sc->sc_imask &= ~HAL_INT_RX;
69 + ath_hal_intrset(ah, sc->sc_imask);
70 +#endif
71 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
72 + __netif_rx_schedule(dev, &sc->sc_napi);
73 +#else
74 + __netif_rx_schedule(dev);
75 +#endif
76 + }
77 }
78 if (status & HAL_INT_TX) {
79 #ifdef ATH_SUPERG_DYNTURBO
80 @@ -2303,6 +2332,11 @@
81 }
82 }
83 #endif
84 + /* disable transmit interrupt */
85 + sc->sc_isr &= ~HAL_INT_TX;
86 + ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
87 + sc->sc_imask &= ~HAL_INT_TX;
88 +
89 ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
90 }
91 if (status & HAL_INT_BMISS) {
92 @@ -2515,6 +2549,7 @@
93 if (sc->sc_tx99 != NULL)
94 sc->sc_tx99->start(sc->sc_tx99);
95 #endif
96 + ath_poll_enable(dev);
97
98 done:
99 ATH_UNLOCK(sc);
100 @@ -2555,6 +2590,9 @@
101 if (sc->sc_tx99 != NULL)
102 sc->sc_tx99->stop(sc->sc_tx99);
103 #endif
104 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
105 + ath_poll_disable(dev);
106 +#endif
107 netif_stop_queue(dev); /* XXX re-enabled by ath_newstate */
108 dev->flags &= ~IFF_RUNNING; /* NB: avoid recursion */
109 ieee80211_stop_running(ic); /* stop all VAPs */
110 @@ -4013,12 +4051,47 @@
111 return ath_keyset(sc, k, mac, vap->iv_bss);
112 }
113
114 +static void ath_poll_disable(struct net_device *dev)
115 +{
116 + struct ath_softc *sc = dev->priv;
117 +
118 + /*
119 + * XXX Using in_softirq is not right since we might
120 + * be called from other soft irq contexts than
121 + * ath_rx_poll
122 + */
123 + if (!in_softirq()) {
124 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
125 + napi_disable(&sc->sc_napi);
126 +#else
127 + netif_poll_disable(dev);
128 +#endif
129 + }
130 +}
131 +
132 +static void ath_poll_enable(struct net_device *dev)
133 +{
134 + struct ath_softc *sc = dev->priv;
135 +
136 + /* NB: see above */
137 + if (!in_softirq()) {
138 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
139 + napi_enable(&sc->sc_napi);
140 +#else
141 + netif_poll_enable(dev);
142 +#endif
143 + }
144 +}
145 +
146 +
147 /*
148 * Block/unblock tx+rx processing while a key change is done.
149 * We assume the caller serializes key management operations
150 * so we only need to worry about synchronization with other
151 * uses that originate in the driver.
152 */
153 +#define IS_UP(_dev) \
154 + (((_dev)->flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP))
155 static void
156 ath_key_update_begin(struct ieee80211vap *vap)
157 {
158 @@ -4032,14 +4105,9 @@
159 * When called from the rx tasklet we cannot use
160 * tasklet_disable because it will block waiting
161 * for us to complete execution.
162 - *
163 - * XXX Using in_softirq is not right since we might
164 - * be called from other soft irq contexts than
165 - * ath_rx_tasklet.
166 */
167 - if (!in_softirq())
168 - tasklet_disable(&sc->sc_rxtq);
169 - netif_stop_queue(dev);
170 + if (IS_UP(vap->iv_dev))
171 + netif_stop_queue(dev);
172 }
173
174 static void
175 @@ -4051,9 +4119,9 @@
176 #endif
177
178 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "End\n");
179 - netif_wake_queue(dev);
180 - if (!in_softirq()) /* NB: see above */
181 - tasklet_enable(&sc->sc_rxtq);
182 +
183 + if (IS_UP(vap->iv_dev))
184 + netif_wake_queue(dev);
185 }
186
187 /*
188 @@ -6360,15 +6428,25 @@
189 sc->sc_rxotherant = 0;
190 }
191
192 -static void
193 -ath_rx_tasklet(TQUEUE_ARG data)
194 +static int
195 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
196 +ath_rx_poll(struct napi_struct *napi, int budget)
197 +#else
198 +ath_rx_poll(struct net_device *dev, int *budget)
199 +#endif
200 {
201 #define PA2DESC(_sc, _pa) \
202 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
203 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
204 - struct net_device *dev = (struct net_device *)data;
205 - struct ath_buf *bf;
206 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
207 + struct ath_softc *sc = container_of(napi, struct ath_softc, sc_napi);
208 + struct net_device *dev = sc->sc_dev;
209 + u_int rx_limit = budget;
210 +#else
211 struct ath_softc *sc = dev->priv;
212 + u_int rx_limit = min(dev->quota, *budget);
213 +#endif
214 + struct ath_buf *bf;
215 struct ieee80211com *ic = &sc->sc_ic;
216 struct ath_hal *ah = sc ? sc->sc_ah : NULL;
217 struct ath_desc *ds;
218 @@ -6378,8 +6456,10 @@
219 unsigned int len;
220 int type;
221 u_int phyerr;
222 + u_int processed = 0, early_stop = 0;
223
224 DPRINTF(sc, ATH_DEBUG_RX_PROC, "invoked\n");
225 +process_rx_again:
226 do {
227 bf = STAILQ_FIRST(&sc->sc_rxbuf);
228 if (bf == NULL) { /* XXX ??? can this happen */
229 @@ -6403,6 +6483,15 @@
230 /* NB: never process the self-linked entry at the end */
231 break;
232 }
233 +
234 + if (rx_limit-- < 2) {
235 + early_stop = 1;
236 + break;
237 + }
238 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
239 + processed++;
240 +#endif
241 +
242 skb = bf->bf_skb;
243 if (skb == NULL) {
244 EPRINTF(sc, "Dropping; buffer contains NULL skbuff.\n");
245 @@ -6450,6 +6539,7 @@
246 sc->sc_stats.ast_rx_phyerr++;
247 phyerr = rs->rs_phyerr & 0x1f;
248 sc->sc_stats.ast_rx_phy[phyerr]++;
249 + goto rx_next;
250 }
251 if (rs->rs_status & HAL_RXERR_DECRYPT) {
252 /*
253 @@ -6645,9 +6735,43 @@
254 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
255 ATH_RXBUF_UNLOCK_IRQ(sc);
256 } while (ath_rxbuf_init(sc, bf) == 0);
257 + if (!early_stop) {
258 + unsigned long flags;
259 + /* Check if more data is received while we were
260 + * processing the descriptor chain.
261 + */
262 +#ifndef ATH_PRECISE_TSF
263 + local_irq_save(flags);
264 + if (sc->sc_isr & HAL_INT_RX) {
265 + u_int64_t hw_tsf = ath_hal_gettsf64(ah);
266 + sc->sc_isr &= ~HAL_INT_RX;
267 + local_irq_restore(flags);
268 + ath_uapsd_processtriggers(sc, hw_tsf);
269 + goto process_rx_again;
270 + }
271 +#endif
272 +#ifndef ATH_PRECISE_TSF
273 + sc->sc_imask |= HAL_INT_RX;
274 + ath_hal_intrset(ah, sc->sc_imask);
275 + local_irq_restore(flags);
276 +#endif
277 + }
278 +
279 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
280 + netif_rx_complete(dev, napi);
281 +#else
282 + netif_rx_complete(dev);
283 + *budget -= processed;
284 + dev->quota -= processed;
285 +#endif
286
287 /* rx signal state monitoring */
288 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
289 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
290 + return processed;
291 +#else
292 + return early_stop;
293 +#endif
294 #undef PA2DESC
295 }
296
297 @@ -8298,12 +8422,24 @@
298 {
299 struct net_device *dev = (struct net_device *)data;
300 struct ath_softc *sc = dev->priv;
301 + unsigned long flags;
302
303 +process_tx_again:
304 if (txqactive(sc->sc_ah, 0))
305 ath_tx_processq(sc, &sc->sc_txq[0]);
306 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
307 ath_tx_processq(sc, sc->sc_cabq);
308
309 + local_irq_save(flags);
310 + if (sc->sc_isr & HAL_INT_TX) {
311 + sc->sc_isr &= ~HAL_INT_TX;
312 + local_irq_restore(flags);
313 + goto process_tx_again;
314 + }
315 + sc->sc_imask |= HAL_INT_TX;
316 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
317 + local_irq_restore(flags);
318 +
319 netif_wake_queue(dev);
320
321 if (sc->sc_softled)
322 @@ -8319,7 +8455,9 @@
323 {
324 struct net_device *dev = (struct net_device *)data;
325 struct ath_softc *sc = dev->priv;
326 + unsigned long flags;
327
328 +process_tx_again:
329 /*
330 * Process each active queue.
331 */
332 @@ -8340,6 +8478,16 @@
333 if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
334 ath_tx_processq(sc, sc->sc_uapsdq);
335
336 + local_irq_save(flags);
337 + if (sc->sc_isr & HAL_INT_TX) {
338 + sc->sc_isr &= ~HAL_INT_TX;
339 + local_irq_restore(flags);
340 + goto process_tx_again;
341 + }
342 + sc->sc_imask |= HAL_INT_TX;
343 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
344 + local_irq_restore(flags);
345 +
346 netif_wake_queue(dev);
347
348 if (sc->sc_softled)
349 @@ -8355,13 +8503,25 @@
350 struct net_device *dev = (struct net_device *)data;
351 struct ath_softc *sc = dev->priv;
352 unsigned int i;
353 + unsigned long flags;
354
355 /* Process each active queue. This includes sc_cabq, sc_xrtq and
356 * sc_uapsdq */
357 +process_tx_again:
358 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
359 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
360 ath_tx_processq(sc, &sc->sc_txq[i]);
361
362 + local_irq_save(flags);
363 + if (sc->sc_isr & HAL_INT_TX) {
364 + sc->sc_isr &= ~HAL_INT_TX;
365 + local_irq_restore(flags);
366 + goto process_tx_again;
367 + }
368 + sc->sc_imask |= HAL_INT_TX;
369 + ath_hal_intrset(sc->sc_ah, sc->sc_imask);
370 + local_irq_restore(flags);
371 +
372 netif_wake_queue(dev);
373
374 if (sc->sc_softled)
375 @@ -10296,9 +10456,9 @@
376 dev->mtu = mtu;
377 if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
378 /* NB: the rx buffers may need to be reallocated */
379 - tasklet_disable(&sc->sc_rxtq);
380 + ath_poll_disable(dev);
381 error = ath_reset(dev);
382 - tasklet_enable(&sc->sc_rxtq);
383 + ath_poll_enable(dev);
384 }
385 ATH_UNLOCK(sc);
386
387 --- a/ath/if_athvar.h
388 +++ b/ath/if_athvar.h
389 @@ -53,6 +53,10 @@
390 # include <asm/bitops.h>
391 #endif
392
393 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
394 +#define irqs_disabled() 0
395 +#endif
396 +
397 /*
398 * Deduce if tasklets are available. If not then
399 * fall back to using the immediate work queue.
400 @@ -616,6 +620,9 @@
401 struct ath_softc {
402 struct ieee80211com sc_ic; /* NB: must be first */
403 struct net_device *sc_dev;
404 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
405 + struct napi_struct sc_napi;
406 +#endif
407 void __iomem *sc_iobase; /* address of the device */
408 struct semaphore sc_lock; /* dev-level lock */
409 struct net_device_stats sc_devstats; /* device statistics */
410 @@ -730,7 +737,6 @@
411 struct ath_buf *sc_rxbufcur; /* current rx buffer */
412 u_int32_t *sc_rxlink; /* link ptr in last RX desc */
413 spinlock_t sc_rxbuflock;
414 - struct ATH_TQ_STRUCT sc_rxtq; /* rx intr tasklet */
415 struct ATH_TQ_STRUCT sc_rxorntq; /* rxorn intr tasklet */
416 u_int8_t sc_defant; /* current default antenna */
417 u_int8_t sc_rxotherant; /* RXs on non-default antenna */
418 @@ -745,6 +751,7 @@
419 u_int sc_txintrperiod; /* tx interrupt batching */
420 struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
421 struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
422 + HAL_INT sc_isr; /* unmasked ISR state */
423 struct ATH_TQ_STRUCT sc_txtq; /* tx intr tasklet */
424 u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
425 struct ath_descdma sc_bdma; /* beacon descriptors */
426 @@ -858,6 +865,8 @@
427 #define ATH_TXBUF_LOCK_CHECK(_sc)
428 #endif
429
430 +#define ATH_DISABLE_INTR local_irq_disable
431 +#define ATH_ENABLE_INTR local_irq_enable
432
433 #define ATH_RXBUF_LOCK_INIT(_sc) spin_lock_init(&(_sc)->sc_rxbuflock)
434 #define ATH_RXBUF_LOCK_DESTROY(_sc)
435 --- a/net80211/ieee80211_input.c
436 +++ b/net80211/ieee80211_input.c
437 @@ -1198,7 +1198,7 @@
438 /* attach vlan tag */
439 struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
440 if (vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan) == NET_RX_DROP) {
441 - /* If netif_rx dropped the packet because
442 + /* If netif_receive_skb dropped the packet because
443 * device was too busy */
444 if (ni_tmp != NULL) {
445 /* node reference was leaked */
446 @@ -1209,8 +1209,8 @@
447 skb = NULL; /* SKB is no longer ours */
448 } else {
449 struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
450 - if (netif_rx(skb) == NET_RX_DROP) {
451 - /* If netif_rx dropped the packet because
452 + if (netif_receive_skb(skb) == NET_RX_DROP) {
453 + /* If netif_receive_skb dropped the packet because
454 * device was too busy */
455 if (ni_tmp != NULL) {
456 /* node reference was leaked */
457 @@ -2322,8 +2322,8 @@
458 skb1->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */
459
460 ni_tmp = SKB_CB(skb1)->ni;
461 - if (netif_rx(skb1) == NET_RX_DROP) {
462 - /* If netif_rx dropped the packet because
463 + if (netif_receive_skb(skb1) == NET_RX_DROP) {
464 + /* If netif_receive_skb dropped the packet because
465 * device was too busy */
466 if (ni_tmp != NULL) {
467 /* node reference was leaked */
468 --- a/net80211/ieee80211_monitor.c
469 +++ b/net80211/ieee80211_monitor.c
470 @@ -584,8 +584,8 @@
471 skb1->protocol =
472 __constant_htons(0x0019); /* ETH_P_80211_RAW */
473
474 - if (netif_rx(skb1) == NET_RX_DROP) {
475 - /* If netif_rx dropped the packet because
476 + if (netif_receive_skb(skb1) == NET_RX_DROP) {
477 + /* If netif_receive_skb dropped the packet because
478 * device was too busy, reclaim the ref. in
479 * the skb. */
480 if (SKB_CB(skb1)->ni != NULL)
481 --- a/net80211/ieee80211_skb.c
482 +++ b/net80211/ieee80211_skb.c
483 @@ -73,7 +73,7 @@
484 #undef dev_queue_xmit
485 #undef kfree_skb
486 #undef kfree_skb_fast
487 -#undef netif_rx
488 +#undef netif_receive_skb
489 #undef pskb_copy
490 #undef skb_clone
491 #undef skb_copy
492 @@ -638,8 +638,8 @@
493 grp, vlan_tag);
494 }
495
496 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line) {
497 - return netif_rx(untrack_skb(skb, 0, func, line, __func__, __LINE__));
498 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line) {
499 + return netif_receive_skb(untrack_skb(skb, 0, func, line, __func__, __LINE__));
500 }
501
502 struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
503 @@ -760,7 +760,7 @@
504 }
505
506 EXPORT_SYMBOL(vlan_hwaccel_receive_skb_debug);
507 -EXPORT_SYMBOL(netif_rx_debug);
508 +EXPORT_SYMBOL(netif_receive_skb_debug);
509 EXPORT_SYMBOL(alloc_skb_debug);
510 EXPORT_SYMBOL(dev_alloc_skb_debug);
511 EXPORT_SYMBOL(skb_clone_debug);
512 --- a/net80211/ieee80211_skb.h
513 +++ b/net80211/ieee80211_skb.h
514 @@ -116,7 +116,7 @@
515 int vlan_hwaccel_receive_skb_debug(struct sk_buff *skb,
516 struct vlan_group *grp, unsigned short vlan_tag,
517 const char* func, int line);
518 -int netif_rx_debug(struct sk_buff *skb, const char* func, int line);
519 +int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line);
520 struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
521 const char *func, int line);
522 struct sk_buff * dev_alloc_skb_debug(unsigned int length,
523 @@ -151,7 +151,7 @@
524 #undef dev_queue_xmit
525 #undef kfree_skb
526 #undef kfree_skb_fast
527 -#undef netif_rx
528 +#undef netif_receive_skb
529 #undef pskb_copy
530 #undef skb_clone
531 #undef skb_copy
532 @@ -168,8 +168,8 @@
533 skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
534 #define vlan_hwaccel_receive_skb(_skb, _grp, _tag) \
535 vlan_hwaccel_receive_skb_debug(_skb, _grp, _tag, __func__, __LINE__)
536 -#define netif_rx(_skb) \
537 - netif_rx_debug(_skb, __func__, __LINE__)
538 +#define netif_receive_skb(_skb) \
539 + netif_receive_skb_debug(_skb, __func__, __LINE__)
540 #define alloc_skb(_length, _gfp_mask) \
541 alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
542 #define dev_alloc_skb(_length) \
This page took 0.066513 seconds and 5 git commands to generate.