-diff -urN madwifi-ng-refcount-r2313-20070505.old/ath/if_ath.c madwifi-ng-refcount-r2313-20070505.dev/ath/if_ath.c
---- madwifi-ng-refcount-r2313-20070505.old/ath/if_ath.c 2007-05-13 18:17:56.576968032 +0200
-+++ madwifi-ng-refcount-r2313-20070505.dev/ath/if_ath.c 2007-05-13 18:17:56.594965296 +0200
-@@ -170,7 +170,7 @@
- int, u_int32_t);
+--- a/ath/if_ath.c
++++ b/ath/if_ath.c
+@@ -184,7 +184,11 @@
+ struct sk_buff *, int, int, u_int64_t);
static void ath_setdefantenna(struct ath_softc *, u_int);
static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
-static void ath_rx_tasklet(TQUEUE_ARG);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++static int ath_rx_poll(struct napi_struct *napi, int budget);
++#else
+static int ath_rx_poll(struct net_device *dev, int *budget);
++#endif
static int ath_hardstart(struct sk_buff *, struct net_device *);
static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
#ifdef ATH_SUPERG_COMP
-@@ -420,7 +420,6 @@
- ATH_TXBUF_LOCK_INIT(sc);
- ATH_RXBUF_LOCK_INIT(sc);
+@@ -376,6 +380,9 @@
+ u_int32_t new_clamped_maxtxpower);
+ static u_int32_t ath_get_real_maxtxpower(struct ath_softc *sc);
+
++static void ath_poll_disable(struct net_device *dev);
++static void ath_poll_enable(struct net_device *dev);
++
+ /* calibrate every 30 secs in steady state but check every second at first. */
+ static int ath_calinterval = ATH_SHORT_CALINTERVAL;
+ static int ath_countrycode = CTRY_DEFAULT; /* country code */
+@@ -547,7 +554,6 @@
+
+ atomic_set(&sc->sc_txbuf_counter, 0);
- ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev);
ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev);
ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev);
ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet, dev);
-@@ -674,6 +673,8 @@
+@@ -821,6 +827,12 @@
dev->set_mac_address = ath_set_mac_address;
- dev->change_mtu = ath_change_mtu;
- dev->tx_queue_len = ATH_TXBUF - 1; /* 1 for mgmt frame */
+ dev->change_mtu = ath_change_mtu;
+ dev->tx_queue_len = ATH_TXBUF - ATH_TXBUF_MGT_RESERVED;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ netif_napi_add(dev, &sc->sc_napi, ath_rx_poll, 64);
++#else
+ dev->poll = ath_rx_poll;
+ dev->weight = 64;
++#endif
#ifdef USE_HEADERLEN_RESV
dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
sizeof(struct llc) +
-@@ -1645,6 +1646,7 @@
- */
- ath_hal_getisr(ah, &status); /* NB: clears ISR too */
- DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
+@@ -2220,6 +2232,7 @@
+ (status & HAL_INT_GLOBAL) ? " HAL_INT_GLOBAL" : ""
+ );
+
+ sc->sc_isr = status;
status &= sc->sc_imask; /* discard unasked for bits */
- if (status & HAL_INT_FATAL) {
- sc->sc_stats.ast_hardware++;
-@@ -1684,7 +1686,12 @@
- * might take too long to fire */
- ath_hal_process_noisefloor(ah);
- sc->sc_channoise = ath_hal_get_channel_noise(ah, &(sc->sc_curchan));
+ /* As soon as we know we have a real interrupt we intend to service,
+ * we will check to see if we need an initial hardware TSF reading.
+@@ -2277,7 +2290,23 @@
+ }
+ if (status & (HAL_INT_RX | HAL_INT_RXPHY)) {
+ ath_uapsd_processtriggers(sc, hw_tsf);
- ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
+ sc->sc_isr &= ~HAL_INT_RX;
-+ if (netif_rx_schedule_prep(dev)) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ if (netif_rx_schedule_prep(dev, &sc->sc_napi))
++#else
++ if (netif_rx_schedule_prep(dev))
++#endif
++ {
++#ifndef ATH_PRECISE_TSF
+ sc->sc_imask &= ~HAL_INT_RX;
+ ath_hal_intrset(ah, sc->sc_imask);
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ __netif_rx_schedule(dev, &sc->sc_napi);
++#else
+ __netif_rx_schedule(dev);
++#endif
+ }
}
if (status & HAL_INT_TX) {
#ifdef ATH_SUPERG_DYNTURBO
-@@ -1710,6 +1717,11 @@
+@@ -2303,6 +2332,11 @@
}
- }
+ }
#endif
+ /* disable transmit interrupt */
+ sc->sc_isr &= ~HAL_INT_TX;
+ sc->sc_imask &= ~HAL_INT_TX;
+
ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
- sc->sc_tx_start = 0;
- }
-@@ -2221,12 +2233,13 @@
- * Insert the frame on the outbound list and
- * pass it on to the hardware.
- */
-- ATH_TXQ_LOCK(txq);
-+ ATH_TXQ_LOCK_BH(txq);
- if (ni && ni->ni_vap && txq == &ATH_VAP(ni->ni_vap)->av_mcastq) {
- /*
- * The CAB queue is started from the SWBA handler since
- * frames only go out on DTIM and to avoid possible races.
- */
-+ sc->sc_imask &= ~HAL_INT_SWBA;
- ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_SWBA);
- ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
- DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth);
-@@ -2242,6 +2255,7 @@
- ito64(bf->bf_daddr), bf->bf_desc);
}
- txq->axq_link = &lastds->ds_link;
-+ sc->sc_imask |= HAL_INT_SWBA;
- ath_hal_intrset(ah, sc->sc_imask);
- } else {
- ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
-@@ -2275,7 +2289,7 @@
- sc->sc_tx_start = jiffies;
- }
- }
-- ATH_TXQ_UNLOCK(txq);
-+ ATH_TXQ_UNLOCK_BH(txq);
+ if (status & HAL_INT_BMISS) {
+@@ -2515,6 +2549,7 @@
+ if (sc->sc_tx99 != NULL)
+ sc->sc_tx99->start(sc->sc_tx99);
+ #endif
++ ath_poll_enable(dev);
- sc->sc_devstats.tx_packets++;
- sc->sc_devstats.tx_bytes += framelen;
-@@ -2426,8 +2440,14 @@
- unsigned int pktlen;
- int framecnt;
+ done:
+ ATH_UNLOCK(sc);
+@@ -2555,6 +2590,9 @@
+ if (sc->sc_tx99 != NULL)
+ sc->sc_tx99->stop(sc->sc_tx99);
+ #endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ ath_poll_disable(dev);
++#endif
+ netif_stop_queue(dev); /* XXX re-enabled by ath_newstate */
+ dev->flags &= ~IFF_RUNNING; /* NB: avoid recursion */
+ ieee80211_stop_running(ic); /* stop all VAPs */
+@@ -4013,12 +4051,47 @@
+ return ath_keyset(sc, k, mac, vap->iv_bss);
+ }
++static void ath_poll_disable(struct net_device *dev)
++{
++ struct ath_softc *sc = dev->priv;
++
+ /*
-+ * NB: using _BH style locking even though this function may be called
-+ * at interrupt time (within tasklet or bh). This should be harmless
-+ * and this function calls others (i.e., ath_tx_start()) which do
-+ * the same.
++ * XXX Using in_softirq is not right since we might
++ * be called from other soft irq contexts than
++ * ath_rx_poll
+ */
- for (;;) {
-- ATH_TXQ_LOCK(txq);
-+ ATH_TXQ_LOCK_BH(txq);
-
- bf_ff = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
- if ((!bf_ff) || ath_ff_flushdonetest(txq, bf_ff)) {
-@@ -2441,7 +2461,7 @@
- ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority] = NULL;
- TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
-
-- ATH_TXQ_UNLOCK(txq);
-+ ATH_TXQ_UNLOCK_BH(txq);
-
- /* encap and xmit */
- bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
-@@ -2462,15 +2482,16 @@
- }
- bf_ff->bf_node = NULL;
-
-- ATH_TXBUF_LOCK_IRQ(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
-- ATH_TXBUF_UNLOCK_IRQ(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
- }
-+ ATH_TXQ_UNLOCK_BH(txq);
- }
- #endif
-
- #define ATH_HARDSTART_GET_TX_BUF_WITH_LOCK \
-- ATH_TXBUF_LOCK_IRQ(sc); \
-+ ATH_TXBUF_LOCK_BH(sc); \
- bf = STAILQ_FIRST(&sc->sc_txbuf); \
- if (bf != NULL) { \
- STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); \
-@@ -2485,10 +2506,21 @@
- sc->sc_devstopped = 1; \
- ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL); \
- } \
-- ATH_TXBUF_UNLOCK_IRQ(sc); \
++ if (!in_softirq()) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ napi_disable(&sc->sc_napi);
++#else
++ netif_poll_disable(dev);
++#endif
++ }
++}
++
++static void ath_poll_enable(struct net_device *dev)
++{
++ struct ath_softc *sc = dev->priv;
+
-+#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF \
-+ ATH_TXBUF_UNLOCK_BH(sc); \
-+ if (bf == NULL) { /* NB: should not happen */ \
-+ DPRINTF(sc,ATH_DEBUG_XMIT,"%s: discard, no xmit buf\n", __func__); \
-+ sc->sc_stats.ast_tx_nobuf++; \
-+ goto hardstart_fail; \
++ /* NB: see above */
++ if (!in_softirq()) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ napi_enable(&sc->sc_napi);
++#else
++ netif_poll_enable(dev);
++#endif
+ }
++}
++
+
-+#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON \
-+ ATH_TXBUF_UNLOCK_BH(sc); \
- if (bf == NULL) { /* NB: should not happen */ \
- DPRINTF(sc,ATH_DEBUG_XMIT, \
- "%s: discard, no xmit buf\n", __func__); \
-+ ATH_TXQ_UNLOCK_BH(txq); \
- sc->sc_stats.ast_tx_nobuf++; \
- goto hardstart_fail; \
- }
-@@ -2490,6 +2490,7 @@
- DPRINTF(sc,ATH_DEBUG_XMIT, \
- "%s: discard, no xmit buf\n", __func__); \
- sc->sc_stats.ast_tx_nobuf++; \
-+ goto hardstart_fail; \
- }
-
/*
-@@ -2552,6 +2584,7 @@
- if (M_FLAG_GET(skb, M_UAPSD)) {
- /* bypass FF handling */
- ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
-+ ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
- if (bf == NULL)
- goto hardstart_fail;
- goto ff_bypass;
-@@ -2573,7 +2606,7 @@
- /* NB: use this lock to protect an->an_ff_txbuf in athff_can_aggregate()
- * call too.
- */
-- ATH_TXQ_LOCK(txq);
-+ ATH_TXQ_LOCK_BH(txq);
- if (athff_can_aggregate(sc, eh, an, skb, vap->iv_fragthreshold, &ff_flush)) {
-
- if (an->an_tx_ffbuf[skb->priority]) { /* i.e., frame on the staging queue */
-@@ -2583,7 +2616,7 @@
- TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
- an->an_tx_ffbuf[skb->priority] = NULL;
-
-- ATH_TXQ_UNLOCK(txq);
-+ ATH_TXQ_UNLOCK_BH(txq);
-
- /*
- * chain skbs and add FF magic
-@@ -2610,6 +2643,7 @@
- * to give the buffer back.
- */
- ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
-+ ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON;
- if (bf == NULL) {
- ATH_TXQ_UNLOCK(txq);
- goto hardstart_fail;
-@@ -2624,7 +2658,7 @@
-
- TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
-
-- ATH_TXQ_UNLOCK(txq);
-+ ATH_TXQ_UNLOCK_BH(txq);
-
- return 0;
- }
-@@ -2635,7 +2669,7 @@
- TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
- an->an_tx_ffbuf[skb->priority] = NULL;
-
-- ATH_TXQ_UNLOCK(txq);
-+ ATH_TXQ_UNLOCK_BH(txq);
-
- /* encap and xmit */
- bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
-@@ -2665,9 +2699,9 @@
- }
- bf_ff->bf_node = NULL;
-
-- ATH_TXBUF_LOCK(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
-- ATH_TXBUF_UNLOCK(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
- goto ff_flushdone;
- }
- /*
-@@ -2676,14 +2677,13 @@
- else if (an->an_tx_ffbuf[skb->priority]) {
- DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
- "%s: Out-Of-Order fast-frame\n", __func__);
-- ATH_TXQ_UNLOCK(txq);
-+ ATH_TXQ_UNLOCK_BH(txq);
- } else
-- ATH_TXQ_UNLOCK(txq);
-+ ATH_TXQ_UNLOCK_BH(txq);
-
- ff_flushdone:
- ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
-- if (bf == NULL)
-- goto hardstart_fail;
-+ ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
- }
-
- ff_bypass:
-@@ -2691,6 +2725,7 @@
- #else /* ATH_SUPERG_FF */
-
- ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
-+ ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
-
- #endif /* ATH_SUPERG_FF */
-
-@@ -2712,7 +2747,7 @@
- * Allocate 1 ath_buf for each frame given 1 was
- * already alloc'd
- */
-- ATH_TXBUF_LOCK(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- for (bfcnt = 1; bfcnt < framecnt; ++bfcnt) {
- if ((tbf = STAILQ_FIRST(&sc->sc_txbuf)) != NULL) {
- STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
-@@ -2733,11 +2768,11 @@
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
- }
- }
-- ATH_TXBUF_UNLOCK(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
- STAILQ_INIT(&bf_head);
- goto hardstart_fail;
- }
-- ATH_TXBUF_UNLOCK(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
-
- while ((bf = STAILQ_FIRST(&bf_head)) != NULL && skb != NULL) {
- unsigned int nextfraglen = 0;
-@@ -2773,7 +2808,7 @@
-
- hardstart_fail:
- if (!STAILQ_EMPTY(&bf_head)) {
-- ATH_TXBUF_LOCK(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) {
- tbf->bf_skb = NULL;
- tbf->bf_node = NULL;
-@@ -2783,7 +2818,7 @@
-
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
- }
-- ATH_TXBUF_UNLOCK(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
- }
-
- /* free sk_buffs */
-@@ -2826,7 +2861,7 @@
- /*
- * Grab a TX buffer and associated resources.
- */
-- ATH_TXBUF_LOCK_IRQ(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- bf = STAILQ_FIRST(&sc->sc_txbuf);
- if (bf != NULL)
- STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
-@@ -2837,7 +2872,7 @@
- sc->sc_devstopped=1;
- ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);
- }
-- ATH_TXBUF_UNLOCK_IRQ(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
- if (bf == NULL) {
- printk("ath_mgtstart: discard, no xmit buf\n");
- sc->sc_stats.ast_tx_nobufmgt++;
-@@ -2866,9 +2901,9 @@
- bf->bf_skb = NULL;
- bf->bf_node = NULL;
-
-- ATH_TXBUF_LOCK_IRQ(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
-- ATH_TXBUF_UNLOCK_IRQ(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
- }
- dev_kfree_skb_any(skb);
- skb = NULL;
-@@ -3336,10 +3371,10 @@
- *
- * XXX Using in_softirq is not right since we might
- * be called from other soft irq contexts than
+ * Block/unblock tx+rx processing while a key change is done.
+ * We assume the caller serializes key management operations
+ * so we only need to worry about synchronization with other
+ * uses that originate in the driver.
+ */
++#define IS_UP(_dev) \
++ (((_dev)->flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP))
+ static void
+ ath_key_update_begin(struct ieee80211vap *vap)
+ {
+@@ -4032,14 +4105,9 @@
+ * When called from the rx tasklet we cannot use
+ * tasklet_disable because it will block waiting
+ * for us to complete execution.
+- *
+- * XXX Using in_softirq is not right since we might
+- * be called from other soft irq contexts than
- * ath_rx_tasklet.
-+ * ath_rx_poll
*/
- if (!in_softirq())
+- if (!in_softirq())
- tasklet_disable(&sc->sc_rxtq);
-+ netif_poll_disable(dev);
- netif_stop_queue(dev);
+- netif_stop_queue(dev);
++ if (IS_UP(vap->iv_dev))
++ netif_stop_queue(dev);
}
-@@ -3352,7 +3387,7 @@
- DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
- netif_start_queue(dev);
- if (!in_softirq()) /* NB: see above */
+ static void
+@@ -4051,9 +4119,9 @@
+ #endif
+
+ DPRINTF(sc, ATH_DEBUG_KEYCACHE, "End\n");
+- netif_wake_queue(dev);
+- if (!in_softirq()) /* NB: see above */
- tasklet_enable(&sc->sc_rxtq);
-+ netif_poll_enable(dev);
++
++ if (IS_UP(vap->iv_dev))
++ netif_wake_queue(dev);
}
/*
-@@ -4912,9 +4947,9 @@
- bf->bf_node = NULL;
- bf->bf_desc->ds_link = 0;
-
-- ATH_TXBUF_LOCK_IRQ(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
-- ATH_TXBUF_UNLOCK_IRQ(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
-
- an->an_uapsd_overflowqdepth--;
- }
-@@ -5585,13 +5620,12 @@
+@@ -6360,15 +6428,25 @@
sc->sc_rxotherant = 0;
}
-static void
-ath_rx_tasklet(TQUEUE_ARG data)
+static int
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ath_rx_poll(struct napi_struct *napi, int budget)
++#else
+ath_rx_poll(struct net_device *dev, int *budget)
++#endif
{
#define PA2DESC(_sc, _pa) \
((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
- struct net_device *dev = (struct net_device *)data;
- struct ath_buf *bf;
+- struct ath_buf *bf;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ struct ath_softc *sc = container_of(napi, struct ath_softc, sc_napi);
++ struct net_device *dev = sc->sc_dev;
++ u_int rx_limit = budget;
++#else
struct ath_softc *sc = dev->priv;
++ u_int rx_limit = min(dev->quota, *budget);
++#endif
++ struct ath_buf *bf;
struct ieee80211com *ic = &sc->sc_ic;
-@@ -5602,11 +5636,15 @@
+ struct ath_hal *ah = sc ? sc->sc_ah : NULL;
+ struct ath_desc *ds;
+@@ -6378,8 +6456,10 @@
unsigned int len;
int type;
u_int phyerr;
-+ int processed = 0, early_stop = 0;
-+ int rx_limit = dev->quota;
++ u_int processed = 0, early_stop = 0;
- /* Let the 802.11 layer know about the new noise floor */
- ic->ic_channoise = sc->sc_channoise;
-
- DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__);
-+
+ DPRINTF(sc, ATH_DEBUG_RX_PROC, "invoked\n");
+process_rx_again:
do {
bf = STAILQ_FIRST(&sc->sc_rxbuf);
if (bf == NULL) { /* XXX ??? can this happen */
-@@ -5630,6 +5668,13 @@
+@@ -6403,6 +6483,15 @@
/* NB: never process the self-linked entry at the end */
break;
}
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+ processed++;
-+ if (rx_limit-- < 0) {
++#endif
++ if (rx_limit-- < 2) {
+ early_stop = 1;
+ break;
+ }
+
skb = bf->bf_skb;
- if (skb == NULL) { /* XXX ??? can this happen */
- printk("%s: no skbuff (%s)\n", dev->name, __func__);
-@@ -5668,6 +5668,7 @@
+ if (skb == NULL) {
+ EPRINTF(sc, "Dropping; buffer contains NULL skbuff.\n");
+@@ -6450,6 +6539,7 @@
sc->sc_stats.ast_rx_phyerr++;
- phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
+ phyerr = rs->rs_phyerr & 0x1f;
sc->sc_stats.ast_rx_phy[phyerr]++;
+ goto rx_next;
}
- if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) {
+ if (rs->rs_status & HAL_RXERR_DECRYPT) {
/*
-@@ -5878,6 +5923,25 @@
+@@ -6645,9 +6735,43 @@
STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
ATH_RXBUF_UNLOCK_IRQ(sc);
} while (ath_rxbuf_init(sc, bf) == 0);
+ if (!early_stop) {
++ unsigned long flags;
+ /* Check if more data is received while we were
+ * processing the descriptor chain.
+ */
-+ ATH_DISABLE_INTR();
++#ifndef ATH_PRECISE_TSF
++ local_irq_save(flags);
+ if (sc->sc_isr & HAL_INT_RX) {
++ u_int64_t hw_tsf = ath_hal_gettsf64(ah);
+ sc->sc_isr &= ~HAL_INT_RX;
-+ ATH_ENABLE_INTR();
-+ ath_uapsd_processtriggers(sc);
++ local_irq_restore(flags);
++ ath_uapsd_processtriggers(sc, hw_tsf);
+ goto process_rx_again;
+ }
-+ netif_rx_complete(dev);
-+
++#endif
++#ifndef ATH_PRECISE_TSF
+ sc->sc_imask |= HAL_INT_RX;
+ ath_hal_intrset(ah, sc->sc_imask);
-+ ATH_ENABLE_INTR();
++ local_irq_restore(flags);
++#endif
+ }
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ netif_rx_complete(dev, napi);
++#else
++ netif_rx_complete(dev);
+ *budget -= processed;
-
++ dev->quota -= processed;
++#endif
+
/* rx signal state monitoring */
ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
-@@ -5885,6 +5949,7 @@
- sc->sc_rtasksched = 1;
- schedule_work(&sc->sc_radartask);
- }
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return processed;
++#else
+ return early_stop;
++#endif
#undef PA2DESC
}
-@@ -6160,22 +6225,22 @@
- }
- }
-
-- ATH_TXBUF_LOCK_IRQ(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- bf = STAILQ_FIRST(&sc->sc_grppollbuf);
- if (bf != NULL)
- STAILQ_REMOVE_HEAD(&sc->sc_grppollbuf, bf_list);
- else {
- DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs\n", __func__);
-- ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
- return;
- }
- /* XXX use a counter and leave at least one for mgmt frames */
- if (STAILQ_EMPTY(&sc->sc_grppollbuf)) {
- DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs left\n", __func__);
-- ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
- return;
- }
-- ATH_TXBUF_UNLOCK_IRQ(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
-
- bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
- skb->data, skb->len, BUS_DMA_TODEVICE);
-@@ -6641,9 +6706,9 @@
- dev_kfree_skb(lastbuf->bf_skb);
- lastbuf->bf_skb = NULL;
- ieee80211_unref_node(&lastbuf->bf_node);
-- ATH_TXBUF_LOCK_IRQ(sc);
-+ ATH_TXBUF_LOCK_BH(sc);
- STAILQ_INSERT_TAIL(&sc->sc_txbuf, lastbuf, bf_list);
-- ATH_TXBUF_UNLOCK_IRQ(sc);
-+ ATH_TXBUF_UNLOCK_BH(sc);
-
- /*
- * move oldest from overflow to delivery
-@@ -7462,9 +7527,6 @@
- if (sc->sc_reapcount > ATH_TXBUF_FREE_THRESHOLD) {
- if (!sc->sc_dfswait)
- netif_start_queue(sc->sc_dev);
-- DPRINTF(sc, ATH_DEBUG_TX_PROC,
-- "%s: tx tasklet restart the queue\n",
-- __func__);
- sc->sc_reapcount = 0;
- sc->sc_devstopped = 0;
- } else
-@@ -7499,11 +7561,22 @@
+@@ -8298,12 +8422,24 @@
+ {
struct net_device *dev = (struct net_device *)data;
struct ath_softc *sc = dev->priv;
++ unsigned long flags;
+process_tx_again:
if (txqactive(sc->sc_ah, 0))
if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
ath_tx_processq(sc, sc->sc_cabq);
-+ ATH_DISABLE_INTR();
++ local_irq_save(flags);
+ if (sc->sc_isr & HAL_INT_TX) {
+ sc->sc_isr &= ~HAL_INT_TX;
-+ ATH_ENABLE_INTR();
++ local_irq_restore(flags);
+ goto process_tx_again;
+ }
+ sc->sc_imask |= HAL_INT_TX;
+ ath_hal_intrset(sc->sc_ah, sc->sc_imask);
-+ ATH_ENABLE_INTR();
++ local_irq_restore(flags);
+
netif_wake_queue(dev);
if (sc->sc_softled)
-@@ -7520,6 +7593,7 @@
+@@ -8319,7 +8455,9 @@
+ {
struct net_device *dev = (struct net_device *)data;
struct ath_softc *sc = dev->priv;
++ unsigned long flags;
+process_tx_again:
/*
* Process each active queue.
*/
-@@ -7540,6 +7614,16 @@
+@@ -8340,6 +8478,16 @@
if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
ath_tx_processq(sc, sc->sc_uapsdq);
-+ ATH_DISABLE_INTR();
++ local_irq_save(flags);
+ if (sc->sc_isr & HAL_INT_TX) {
+ sc->sc_isr &= ~HAL_INT_TX;
-+ ATH_ENABLE_INTR();
++ local_irq_restore(flags);
+ goto process_tx_again;
+ }
+ sc->sc_imask |= HAL_INT_TX;
+ ath_hal_intrset(sc->sc_ah, sc->sc_imask);
-+ ATH_ENABLE_INTR();
++ local_irq_restore(flags);
+
netif_wake_queue(dev);
if (sc->sc_softled)
-@@ -7557,6 +7641,7 @@
+@@ -8355,13 +8503,25 @@
+ struct net_device *dev = (struct net_device *)data;
+ struct ath_softc *sc = dev->priv;
unsigned int i;
++ unsigned long flags;
- /* Process each active queue. */
+ /* Process each active queue. This includes sc_cabq, sc_xrtq and
+ * sc_uapsdq */
+process_tx_again:
for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
ath_tx_processq(sc, &sc->sc_txq[i]);
-@@ -7565,6 +7650,16 @@
- ath_tx_processq(sc, sc->sc_xrtxq);
- #endif
-+ ATH_DISABLE_INTR();
++ local_irq_save(flags);
+ if (sc->sc_isr & HAL_INT_TX) {
+ sc->sc_isr &= ~HAL_INT_TX;
-+ ATH_ENABLE_INTR();
++ local_irq_restore(flags);
+ goto process_tx_again;
+ }
+ sc->sc_imask |= HAL_INT_TX;
+ ath_hal_intrset(sc->sc_ah, sc->sc_imask);
-+ ATH_ENABLE_INTR();
++ local_irq_restore(flags);
+
netif_wake_queue(dev);
if (sc->sc_softled)
-@@ -7662,6 +7663,7 @@
- ath_draintxq(struct ath_softc *sc)
- {
- struct ath_hal *ah = sc->sc_ah;
-+ int npend = 0;
- unsigned int i;
-
- /* XXX return value */
-@@ -9221,9 +9316,9 @@
+@@ -10296,9 +10456,9 @@
dev->mtu = mtu;
if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
/* NB: the rx buffers may need to be reallocated */
- tasklet_disable(&sc->sc_rxtq);
-+ netif_poll_disable(dev);
++ ath_poll_disable(dev);
error = ath_reset(dev);
- tasklet_enable(&sc->sc_rxtq);
-+ netif_poll_enable(dev);
++ ath_poll_enable(dev);
}
ATH_UNLOCK(sc);
-diff -urN madwifi-ng-refcount-r2313-20070505.old/ath/if_athvar.h madwifi-ng-refcount-r2313-20070505.dev/ath/if_athvar.h
---- madwifi-ng-refcount-r2313-20070505.old/ath/if_athvar.h 2007-05-13 18:17:56.363000560 +0200
-+++ madwifi-ng-refcount-r2313-20070505.dev/ath/if_athvar.h 2007-05-13 18:17:56.595965144 +0200
-@@ -47,6 +47,10 @@
- #include "if_athioctl.h"
- #include "net80211/ieee80211.h" /* XXX for WME_NUM_AC */
+--- a/ath/if_athvar.h
++++ b/ath/if_athvar.h
+@@ -53,6 +53,10 @@
+ # include <asm/bitops.h>
+ #endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define irqs_disabled() 0
/*
* Deduce if tasklets are available. If not then
* fall back to using the immediate work queue.
-@@ -477,8 +481,12 @@
- #define ATH_TXQ_LOCK_DESTROY(_tq)
- #define ATH_TXQ_LOCK(_tq) spin_lock(&(_tq)->axq_lock)
- #define ATH_TXQ_UNLOCK(_tq) spin_unlock(&(_tq)->axq_lock)
--#define ATH_TXQ_LOCK_BH(_tq) spin_lock_bh(&(_tq)->axq_lock)
--#define ATH_TXQ_UNLOCK_BH(_tq) spin_unlock_bh(&(_tq)->axq_lock)
-+#define ATH_TXQ_LOCK_BH(_tq) \
-+ if (!irqs_disabled()) \
-+ spin_lock_bh(&(_tq)->axq_lock)
-+#define ATH_TXQ_UNLOCK_BH(_tq) \
-+ if (!irqs_disabled()) \
-+ spin_unlock_bh(&(_tq)->axq_lock)
- #define ATH_TXQ_LOCK_IRQ(_tq) do { \
- unsigned long __axq_lockflags; \
- spin_lock_irqsave(&(_tq)->axq_lock, __axq_lockflags);
-@@ -627,7 +635,6 @@
+@@ -616,6 +620,9 @@
+ struct ath_softc {
+ struct ieee80211com sc_ic; /* NB: must be first */
+ struct net_device *sc_dev;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ struct napi_struct sc_napi;
++#endif
+ void __iomem *sc_iobase; /* address of the device */
+ struct semaphore sc_lock; /* dev-level lock */
+ struct net_device_stats sc_devstats; /* device statistics */
+@@ -730,7 +737,6 @@
struct ath_buf *sc_rxbufcur; /* current rx buffer */
u_int32_t *sc_rxlink; /* link ptr in last RX desc */
- spinlock_t sc_rxbuflock;
+ spinlock_t sc_rxbuflock;
- struct ATH_TQ_STRUCT sc_rxtq; /* rx intr tasklet */
struct ATH_TQ_STRUCT sc_rxorntq; /* rxorn intr tasklet */
u_int8_t sc_defant; /* current default antenna */
- u_int8_t sc_rxotherant; /* rx's on non-default antenna*/
-@@ -640,6 +647,7 @@
+ u_int8_t sc_rxotherant; /* RXs on non-default antenna */
+@@ -745,6 +751,7 @@
u_int sc_txintrperiod; /* tx interrupt batching */
struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
- struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
+ struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
+ HAL_INT sc_isr; /* unmasked ISR state */
struct ATH_TQ_STRUCT sc_txtq; /* tx intr tasklet */
- u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
+ u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
struct ath_descdma sc_bdma; /* beacon descriptors */
-@@ -706,8 +714,12 @@
- #define ATH_TXBUF_LOCK_DESTROY(_sc)
- #define ATH_TXBUF_LOCK(_sc) spin_lock(&(_sc)->sc_txbuflock)
- #define ATH_TXBUF_UNLOCK(_sc) spin_unlock(&(_sc)->sc_txbuflock)
--#define ATH_TXBUF_LOCK_BH(_sc) spin_lock_bh(&(_sc)->sc_txbuflock)
--#define ATH_TXBUF_UNLOCK_BH(_sc) spin_unlock_bh(&(_sc)->sc_txbuflock)
-+#define ATH_TXBUF_LOCK_BH(_sc) \
-+ if (!irqs_disabled()) \
-+ spin_lock_bh(&(_sc)->sc_txbuflock)
-+#define ATH_TXBUF_UNLOCK_BH(_sc) \
-+ if (!irqs_disabled()) \
-+ spin_unlock_bh(&(_sc)->sc_txbuflock)
- #define ATH_TXBUF_LOCK_IRQ(_sc) do { \
- unsigned long __txbuflockflags; \
- spin_lock_irqsave(&(_sc)->sc_txbuflock, __txbuflockflags);
-@@ -725,8 +737,12 @@
- #define ATH_RXBUF_LOCK_DESTROY(_sc)
- #define ATH_RXBUF_LOCK(_sc) spin_lock(&(_sc)->sc_rxbuflock)
- #define ATH_RXBUF_UNLOCK(_sc) spin_unlock(&(_sc)->sc_rxbuflock)
--#define ATH_RXBUF_LOCK_BH(_sc) spin_lock_bh(&(_sc)->sc_rxbuflock)
--#define ATH_RXBUF_UNLOCK_BH(_sc) spin_unlock_bh(&(_sc)->sc_rxbuflock)
-+#define ATH_RXBUF_LOCK_BH(_sc) \
-+ if (!irqs_disabled()) \
-+ spin_lock_bh(&(_sc)->sc_rxbuflock)
-+#define ATH_RXBUF_UNLOCK_BH(_sc) \
-+ if (!irqs_disabled()) \
-+ spin_unlock_bh(&(_sc)->sc_rxbuflock)
- #define ATH_RXBUF_LOCK_IRQ(_sc) do { \
- unsigned long __rxbuflockflags; \
- spin_lock_irqsave(&(_sc)->sc_rxbuflock, __rxbuflockflags);
-@@ -736,6 +752,8 @@
- #define ATH_RXBUF_UNLOCK_IRQ_EARLY(_sc) \
- spin_unlock_irqrestore(&(_sc)->sc_rxbuflock, __rxbuflockflags);
+@@ -858,6 +865,8 @@
+ #define ATH_TXBUF_LOCK_CHECK(_sc)
+ #endif
+#define ATH_DISABLE_INTR local_irq_disable
+#define ATH_ENABLE_INTR local_irq_enable
- /* Protects the device from concurrent accesses */
- #define ATH_LOCK_INIT(_sc) init_MUTEX(&(_sc)->sc_lock)
-diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_beacon.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_beacon.c
---- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_beacon.c 2007-01-30 05:01:29.000000000 +0100
-+++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_beacon.c 2007-05-13 18:17:56.596964992 +0200
-@@ -286,7 +286,7 @@
- int len_changed = 0;
- u_int16_t capinfo;
-
-- IEEE80211_LOCK(ic);
-+ IEEE80211_BEACON_LOCK(ic);
-
- if ((ic->ic_flags & IEEE80211_F_DOTH) &&
- (vap->iv_flags & IEEE80211_F_CHANSWITCH) &&
-@@ -547,7 +547,7 @@
- vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
- }
-
-- IEEE80211_UNLOCK(ic);
-+ IEEE80211_BEACON_UNLOCK(ic);
-
- return len_changed;
- }
-diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_input.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_input.c
---- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_input.c 2007-05-13 18:17:56.106039624 +0200
-+++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_input.c 2007-05-13 18:17:56.597964840 +0200
-@@ -1148,8 +1148,9 @@
- if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) {
+ #define ATH_RXBUF_LOCK_INIT(_sc) spin_lock_init(&(_sc)->sc_rxbuflock)
+ #define ATH_RXBUF_LOCK_DESTROY(_sc)
+--- a/net80211/ieee80211_input.c
++++ b/net80211/ieee80211_input.c
+@@ -1198,7 +1198,7 @@
/* attach vlan tag */
- vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan);
-- } else
-- netif_rx(skb);
-+ } else {
-+ netif_receive_skb(skb);
-+ }
- dev->last_rx = jiffies;
- }
- }
-@@ -3623,9 +3624,9 @@
- }
-
- /* Okay, take the first queued packet and put it out... */
-- IEEE80211_NODE_SAVEQ_LOCK(ni);
-+ IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
- IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
-- IEEE80211_NODE_SAVEQ_UNLOCK(ni);
-+ IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
- if (skb == NULL) {
- IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2,
- "%s", "recv ps-poll, but queue empty");
-diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_linux.h madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_linux.h
---- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_linux.h 2007-05-04 02:10:06.000000000 +0200
-+++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_linux.h 2007-05-13 18:17:56.598964688 +0200
-@@ -31,6 +31,10 @@
-
- #include <linux/wireless.h>
-
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+#define irqs_disabled() 0
-+#endif
-+
- /*
- * Task deferral
- *
-@@ -86,8 +90,12 @@
- } while (0)
- #define IEEE80211_UNLOCK_IRQ_EARLY(_ic) \
- spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags);
--#define IEEE80211_LOCK_BH(_ic) spin_lock_bh(&(_ic)->ic_comlock)
--#define IEEE80211_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_comlock)
-+#define IEEE80211_LOCK_BH(_ic) \
-+ if (!irqs_disabled()) \
-+ spin_lock_bh(&(_ic)->ic_comlock)
-+#define IEEE80211_UNLOCK_BH(_ic) \
-+ if (!irqs_disabled()) \
-+ spin_unlock_bh(&(_ic)->ic_comlock)
- #define IEEE80211_LOCK(_ic) spin_lock(&(_ic)->ic_comlock)
- #define IEEE80211_UNLOCK(_ic) spin_unlock(&(_ic)->ic_comlock)
-
-@@ -104,15 +112,22 @@
- #define IEEE80211_VAPS_LOCK_DESTROY(_ic)
- #define IEEE80211_VAPS_LOCK(_ic) spin_lock(&(_ic)->ic_vapslock);
- #define IEEE80211_VAPS_UNLOCK(_ic) spin_unlock(&(_ic)->ic_vapslock);
--#define IEEE80211_VAPS_LOCK_BH(_ic) spin_lock_bh(&(_ic)->ic_vapslock);
--#define IEEE80211_VAPS_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_vapslock);
--#define IEEE80211_VAPS_LOCK_IRQ(_ic) do { \
-- int _vaps_lockflags; \
-- spin_lock_irqsave(&(_ic)->ic_vapslock, _vaps_lockflags);
--#define IEEE80211_VAPS_UNLOCK_IRQ(_ic) \
-- spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags); \
--} while (0)
--#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic) spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
-+#define IEEE80211_VAPS_LOCK_BH(_ic) \
-+ if (!irqs_disabled()) \
-+ spin_lock_bh(&(_ic)->ic_vapslock);
-+#define IEEE80211_VAPS_UNLOCK_BH(_ic) \
-+ if (!irqs_disabled()) \
-+ spin_unlock_bh(&(_ic)->ic_vapslock);
-+#define IEEE80211_VAPS_LOCK_IRQ(_ic) do { \
-+ unsigned long __vlockflags=0; \
-+ unsigned int __vlocked=0; \
-+ __vlocked=spin_is_locked(&(_ic)->ic_vapslock); \
-+ if(!__vlocked) spin_lock_irqsave(&(_ic)->ic_vapslock, __vlockflags);
-+#define IEEE80211_VAPS_UNLOCK_IRQ(_ic) \
-+ if(!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, __vlockflags); \
-+} while (0);
-+#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic) \
-+ if (!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
-
- #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
- #define IEEE80211_VAPS_LOCK_ASSERT(_ic) \
-@@ -122,6 +137,11 @@
- #define IEEE80211_VAPS_LOCK_ASSERT(_ic)
- #endif
-
-+/*
-+ * Beacon locking definitions; piggyback on com lock.
-+ */
-+#define IEEE80211_BEACON_LOCK(_ic) IEEE80211_LOCK_IRQ(_ic)
-+#define IEEE80211_BEACON_UNLOCK(_ic) IEEE80211_UNLOCK_IRQ(_ic)
-
- /*
- * Node locking definitions.
-@@ -191,8 +211,12 @@
- typedef spinlock_t ieee80211_scan_lock_t;
- #define IEEE80211_SCAN_LOCK_INIT(_nt, _name) spin_lock_init(&(_nt)->nt_scanlock)
- #define IEEE80211_SCAN_LOCK_DESTROY(_nt)
--#define IEEE80211_SCAN_LOCK_BH(_nt) spin_lock_bh(&(_nt)->nt_scanlock)
--#define IEEE80211_SCAN_UNLOCK_BH(_nt) spin_unlock_bh(&(_nt)->nt_scanlock)
-+#define IEEE80211_SCAN_LOCK_BH(_nt) \
-+ if (!irqs_disabled()) \
-+ spin_lock_bh(&(_nt)->nt_scanlock)
-+#define IEEE80211_SCAN_UNLOCK_BH(_nt) \
-+ if (!irqs_disabled()) \
-+ spin_unlock_bh(&(_nt)->nt_scanlock)
- #define IEEE80211_SCAN_LOCK_IRQ(_nt) do { \
- unsigned long __scan_lockflags; \
- spin_lock_irqsave(&(_nt)->nt_scanlock, __scan_lockflags);
-@@ -217,8 +241,12 @@
- #define ACL_LOCK_DESTROY(_as)
- #define ACL_LOCK(_as) spin_lock(&(_as)->as_lock)
- #define ACL_UNLOCK(_as) spin_unlock(&(_as)->as_lock)
--#define ACL_LOCK_BH(_as) spin_lock_bh(&(_as)->as_lock)
--#define ACL_UNLOCK_BH(_as) spin_unlock_bh(&(_as)->as_lock)
-+#define ACL_LOCK_BH(_as) \
-+ if (!irqs_disabled()) \
-+ spin_lock_bh(&(_as)->as_lock)
-+#define ACL_UNLOCK_BH(_as) \
-+ if (!irqs_disabled()) \
-+ spin_unlock_bh(&(_as)->as_lock)
-
- #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
- #define ACL_LOCK_ASSERT(_as) \
-diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_node.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_node.c
---- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_node.c 2007-05-13 18:17:56.273014240 +0200
-+++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_node.c 2007-05-13 18:17:56.599964536 +0200
-@@ -1567,7 +1567,7 @@
- struct ieee80211_node *ni;
- u_int gen;
-
-- IEEE80211_SCAN_LOCK_IRQ(nt);
-+ IEEE80211_SCAN_LOCK_BH(nt);
- gen = ++nt->nt_scangen;
-
- restart:
-@@ -1587,7 +1587,7 @@
- }
- IEEE80211_NODE_TABLE_UNLOCK_IRQ(nt);
-
-- IEEE80211_SCAN_UNLOCK_IRQ(nt);
-+ IEEE80211_SCAN_UNLOCK_BH(nt);
- }
- EXPORT_SYMBOL(ieee80211_iterate_dev_nodes);
-
-diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_power.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_power.c
---- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_power.c 2007-04-25 22:29:55.000000000 +0200
-+++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_power.c 2007-05-13 18:17:56.599964536 +0200
-@@ -147,7 +147,7 @@
- #endif
- struct sk_buff *skb;
-
-- IEEE80211_NODE_SAVEQ_LOCK(ni);
-+ IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
- while ((skb = skb_peek(&ni->ni_savedq)) != NULL &&
- M_AGE_GET(skb) < IEEE80211_INACT_WAIT) {
- IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
-@@ -159,7 +159,7 @@
- }
- if (skb != NULL)
- M_AGE_SUB(skb, IEEE80211_INACT_WAIT);
-- IEEE80211_NODE_SAVEQ_UNLOCK(ni);
-+ IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
-
- IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
- "discard %u frames for age", discard);
-@@ -185,7 +185,7 @@
- KASSERT(aid < vap->iv_max_aid,
- ("bogus aid %u, max %u", aid, vap->iv_max_aid));
-
-- IEEE80211_LOCK(ni->ni_ic);
-+ IEEE80211_BEACON_LOCK(ni->ni_ic);
- if (set != (isset(vap->iv_tim_bitmap, aid) != 0)) {
- if (set) {
- setbit(vap->iv_tim_bitmap, aid);
-@@ -196,7 +196,7 @@
- }
- vap->iv_flags |= IEEE80211_F_TIMUPDATE;
- }
-- IEEE80211_UNLOCK(ni->ni_ic);
-+ IEEE80211_BEACON_UNLOCK(ni->ni_ic);
- }
-
- /*
-@@ -297,9 +297,9 @@
- struct sk_buff *skb;
- int qlen;
-
-- IEEE80211_NODE_SAVEQ_LOCK(ni);
-+ IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
- IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
-- IEEE80211_NODE_SAVEQ_UNLOCK(ni);
-+ IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
- if (skb == NULL)
- break;
- /*
-@@ -363,9 +363,9 @@
- for (;;) {
- struct sk_buff *skb;
-
-- IEEE80211_NODE_SAVEQ_LOCK(ni);
-+ IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
- skb = __skb_dequeue(&ni->ni_savedq);
-- IEEE80211_NODE_SAVEQ_UNLOCK(ni);
-+ IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
- if (skb == NULL)
- break;
- ieee80211_parent_queue_xmit(skb);
-diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_proto.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_proto.c
---- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_proto.c 2007-05-13 18:17:56.578967728 +0200
-+++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_proto.c 2007-05-13 18:17:56.600964384 +0200
-@@ -635,9 +635,9 @@
- {
- struct ieee80211com *ic = vap->iv_ic;
-
-- IEEE80211_LOCK(ic);
-+ IEEE80211_BEACON_LOCK(ic);
- ieee80211_wme_initparams_locked(vap);
-- IEEE80211_UNLOCK(ic);
-+ IEEE80211_BEACON_UNLOCK(ic);
+ struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
+ if (vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan) == NET_RX_DROP) {
+- /* If netif_rx dropped the packet because
++ /* If netif_receive_skb dropped the packet because
+ * device was too busy */
+ if (ni_tmp != NULL) {
+ /* node reference was leaked */
+@@ -1209,8 +1209,8 @@
+ skb = NULL; /* SKB is no longer ours */
+ } else {
+ struct ieee80211_node *ni_tmp = SKB_CB(skb)->ni;
+- if (netif_rx(skb) == NET_RX_DROP) {
+- /* If netif_rx dropped the packet because
++ if (netif_receive_skb(skb) == NET_RX_DROP) {
++ /* If netif_receive_skb dropped the packet because
+ * device was too busy */
+ if (ni_tmp != NULL) {
+ /* node reference was leaked */
+@@ -2322,8 +2322,8 @@
+ skb1->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */
+
+ ni_tmp = SKB_CB(skb1)->ni;
+- if (netif_rx(skb1) == NET_RX_DROP) {
+- /* If netif_rx dropped the packet because
++ if (netif_receive_skb(skb1) == NET_RX_DROP) {
++ /* If netif_receive_skb dropped the packet because
+ * device was too busy */
+ if (ni_tmp != NULL) {
+ /* node reference was leaked */
+--- a/net80211/ieee80211_monitor.c
++++ b/net80211/ieee80211_monitor.c
+@@ -584,8 +584,8 @@
+ skb1->protocol =
+ __constant_htons(0x0019); /* ETH_P_80211_RAW */
+
+- if (netif_rx(skb1) == NET_RX_DROP) {
+- /* If netif_rx dropped the packet because
++ if (netif_receive_skb(skb1) == NET_RX_DROP) {
++ /* If netif_receive_skb dropped the packet because
+ * device was too busy, reclaim the ref. in
+ * the skb. */
+ if (SKB_CB(skb1)->ni != NULL)
+--- a/net80211/ieee80211_skb.c
++++ b/net80211/ieee80211_skb.c
+@@ -73,7 +73,7 @@
+ #undef dev_queue_xmit
+ #undef kfree_skb
+ #undef kfree_skb_fast
+-#undef netif_rx
++#undef netif_receive_skb
+ #undef pskb_copy
+ #undef skb_clone
+ #undef skb_copy
+@@ -638,8 +638,8 @@
+ grp, vlan_tag);
}
- void
-@@ -920,9 +920,9 @@
- struct ieee80211com *ic = vap->iv_ic;
-
- if (ic->ic_caps & IEEE80211_C_WME) {
-- IEEE80211_LOCK(ic);
-+ IEEE80211_BEACON_LOCK(ic);
- ieee80211_wme_updateparams_locked(vap);
-- IEEE80211_UNLOCK(ic);
-+ IEEE80211_BEACON_UNLOCK(ic);
- }
- }
-
-diff -urN madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_scan_sta.c madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_scan_sta.c
---- madwifi-ng-refcount-r2313-20070505.old/net80211/ieee80211_scan_sta.c 2007-02-01 21:49:37.000000000 +0100
-+++ madwifi-ng-refcount-r2313-20070505.dev/net80211/ieee80211_scan_sta.c 2007-05-13 18:17:56.601964232 +0200
-@@ -163,9 +163,11 @@
- {
- struct sta_table *st = ss->ss_priv;
-
-- spin_lock(&st->st_lock);
-+ if (!irqs_disabled())
-+ spin_lock_bh(&st->st_lock);
- sta_flush_table(st);
-- spin_unlock(&st->st_lock);
-+ if (!irqs_disabled())
-+ spin_unlock_bh(&st->st_lock);
- ss->ss_last = 0;
- return 0;
+-int netif_rx_debug(struct sk_buff *skb, const char* func, int line) {
+- return netif_rx(untrack_skb(skb, 0, func, line, __func__, __LINE__));
++int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line) {
++ return netif_receive_skb(untrack_skb(skb, 0, func, line, __func__, __LINE__));
}
-@@ -215,7 +217,8 @@
- int hash;
-
- hash = STA_HASH(macaddr);
-- spin_lock(&st->st_lock);
-+ if (!irqs_disabled())
-+ spin_lock_bh(&st->st_lock);
- LIST_FOREACH(se, &st->st_hash[hash], se_hash)
- if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr) &&
- sp->ssid[1] == se->base.se_ssid[1] &&
-@@ -225,7 +228,7 @@
- MALLOC(se, struct sta_entry *, sizeof(struct sta_entry),
- M_80211_SCAN, M_NOWAIT | M_ZERO);
- if (se == NULL) {
-- spin_unlock(&st->st_lock);
-+ spin_unlock_bh(&st->st_lock);
- return 0;
- }
- se->se_scangen = st->st_scangen-1;
-@@ -287,7 +290,8 @@
- se->se_seen = 1;
- se->se_notseen = 0;
-
-- spin_unlock(&st->st_lock);
-+ if (!irqs_disabled())
-+ spin_unlock_bh(&st->st_lock);
-
- /*
- * If looking for a quick choice and nothing's
-@@ -1063,7 +1067,8 @@
- u_int gen;
- int res = 0;
-- spin_lock(&st->st_scanlock);
-+ if (!irqs_disabled())
-+ spin_lock_bh(&st->st_scanlock);
- gen = st->st_scangen++;
- restart:
- spin_lock(&st->st_lock);
-@@ -1086,7 +1091,8 @@
- spin_unlock(&st->st_lock);
-
- done:
-- spin_unlock(&st->st_scanlock);
-+ if (!irqs_disabled())
-+ spin_unlock_bh(&st->st_scanlock);
-
- return res;
+ struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
+@@ -760,7 +760,7 @@
}
-@@ -1235,7 +1241,8 @@
- bestchan = NULL;
- bestrssi = -1;
-- spin_lock(&st->st_lock);
-+ if (!irqs_disabled())
-+ spin_lock_bh(&st->st_lock);
- for (i = 0; i < ss->ss_last; i++) {
- c = ss->ss_chans[i];
- maxrssi = 0;
-@@ -1248,7 +1255,8 @@
- if (bestchan == NULL || maxrssi < bestrssi)
- bestchan = c;
- }
-- spin_unlock(&st->st_lock);
-+ if (!irqs_disabled())
-+ spin_unlock_bh(&st->st_lock);
-
- return bestchan;
- }
+ EXPORT_SYMBOL(vlan_hwaccel_receive_skb_debug);
+-EXPORT_SYMBOL(netif_rx_debug);
++EXPORT_SYMBOL(netif_receive_skb_debug);
+ EXPORT_SYMBOL(alloc_skb_debug);
+ EXPORT_SYMBOL(dev_alloc_skb_debug);
+ EXPORT_SYMBOL(skb_clone_debug);
+--- a/net80211/ieee80211_skb.h
++++ b/net80211/ieee80211_skb.h
+@@ -116,7 +116,7 @@
+ int vlan_hwaccel_receive_skb_debug(struct sk_buff *skb,
+ struct vlan_group *grp, unsigned short vlan_tag,
+ const char* func, int line);
+-int netif_rx_debug(struct sk_buff *skb, const char* func, int line);
++int netif_receive_skb_debug(struct sk_buff *skb, const char* func, int line);
+ struct sk_buff * alloc_skb_debug(unsigned int length, gfp_t gfp_mask,
+ const char *func, int line);
+ struct sk_buff * dev_alloc_skb_debug(unsigned int length,
+@@ -151,7 +151,7 @@
+ #undef dev_queue_xmit
+ #undef kfree_skb
+ #undef kfree_skb_fast
+-#undef netif_rx
++#undef netif_receive_skb
+ #undef pskb_copy
+ #undef skb_clone
+ #undef skb_copy
+@@ -168,8 +168,8 @@
+ skb_copy_expand_debug(_skb, _newheadroom, _newtailroom, _gfp_mask, __func__, __LINE__)
+ #define vlan_hwaccel_receive_skb(_skb, _grp, _tag) \
+ vlan_hwaccel_receive_skb_debug(_skb, _grp, _tag, __func__, __LINE__)
+-#define netif_rx(_skb) \
+- netif_rx_debug(_skb, __func__, __LINE__)
++#define netif_receive_skb(_skb) \
++ netif_receive_skb_debug(_skb, __func__, __LINE__)
+ #define alloc_skb(_length, _gfp_mask) \
+ alloc_skb_debug(_length, _gfp_mask, __func__, __LINE__)
+ #define dev_alloc_skb(_length) \