-+ PKTSETLINK(prev, NULL);
-+ q->tail = prev;
-+ q->len--;
-+ p = next;
-+
-+ return (p);
-+}
-+#endif
-+
-+
-diff -Naur linux.old/drivers/net/wl/hnddma.c linux.dev/drivers/net/wl/hnddma.c
---- linux.old/drivers/net/wl/hnddma.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux.dev/drivers/net/wl/hnddma.c 2006-04-06 16:58:24.000000000 +0200
-@@ -0,0 +1,1453 @@
-+/*
-+ * Generic Broadcom Home Networking Division (HND) DMA module.
-+ * This supports the following chips: BCM42xx, 44xx, 47xx .
-+ *
-+ * Copyright 2005, Broadcom Corporation
-+ * All Rights Reserved.
-+ *
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+ *
-+ * $Id$
-+ */
-+
-+#include <typedefs.h>
-+#include <osl.h>
-+#include <bcmendian.h>
-+#include <sbconfig.h>
-+#include <bcmutils.h>
-+#include <bcmdevs.h>
-+#include <sbutils.h>
-+
-+struct dma_info; /* forward declaration */
-+#define di_t struct dma_info
-+
-+#include "sbhnddma.h"
-+#include "hnddma.h"
-+
-+/* debug/trace */
-+#define DMA_ERROR(args)
-+#define DMA_TRACE(args)
-+
-+/* default dma message level (if input msg_level pointer is null in dma_attach()) */
-+static uint dma_msg_level =
-+ 0;
-+
-+#define MAXNAMEL 8
-+
-+/* dma engine software state */
-+typedef struct dma_info {
-+ hnddma_t hnddma; /* exported structure */
-+ uint *msg_level; /* message level pointer */
-+ char name[MAXNAMEL]; /* callers name for diag msgs */
-+
-+ void *osh; /* os handle */
-+ sb_t *sbh; /* sb handle */
-+
-+ bool dma64; /* dma64 enabled */
-+ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
-+
-+ dma32regs_t *d32txregs; /* 32 bits dma tx engine registers */
-+ dma32regs_t *d32rxregs; /* 32 bits dma rx engine registers */
-+ dma64regs_t *d64txregs; /* 64 bits dma tx engine registers */
-+ dma64regs_t *d64rxregs; /* 64 bits dma rx engine registers */
-+
-+ uint32 dma64align; /* either 8k or 4k depends on number of dd */
-+ dma32dd_t *txd32; /* pointer to dma32 tx descriptor ring */
-+ dma64dd_t *txd64; /* pointer to dma64 tx descriptor ring */
-+ uint ntxd; /* # tx descriptors tunable */
-+ uint txin; /* index of next descriptor to reclaim */
-+ uint txout; /* index of next descriptor to post */
-+ uint txavail; /* # free tx descriptors */
-+ void **txp; /* pointer to parallel array of pointers to packets */
-+ ulong txdpa; /* physical address of descriptor ring */
-+ uint txdalign; /* #bytes added to alloc'd mem to align txd */
-+ uint txdalloc; /* #bytes allocated for the ring */
-+
-+ dma32dd_t *rxd32; /* pointer to dma32 rx descriptor ring */
-+ dma64dd_t *rxd64; /* pointer to dma64 rx descriptor ring */
-+ uint nrxd; /* # rx descriptors tunable */
-+ uint rxin; /* index of next descriptor to reclaim */
-+ uint rxout; /* index of next descriptor to post */
-+ void **rxp; /* pointer to parallel array of pointers to packets */
-+ ulong rxdpa; /* physical address of descriptor ring */
-+ uint rxdalign; /* #bytes added to alloc'd mem to align rxd */
-+ uint rxdalloc; /* #bytes allocated for the ring */
-+
-+ /* tunables */
-+ uint rxbufsize; /* rx buffer size in bytes */
-+ uint nrxpost; /* # rx buffers to keep posted */
-+ uint rxoffset; /* rxcontrol offset */
-+ uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
-+ uint ddoffsethigh; /* add to get dma address of descriptor ring, high 32 bits */
-+ uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
-+ uint dataoffsethigh; /* add to get dma address of data buffer, high 32 bits */
-+} dma_info_t;
-+
-+#ifdef BCMDMA64
-+#define DMA64_ENAB(di) ((di)->dma64)
-+#else
-+#define DMA64_ENAB(di) (0)
-+#endif
-+
-+/* descriptor bumping macros */
-+#define XXD(x, n) ((x) & ((n) - 1))
-+#define TXD(x) XXD((x), di->ntxd)
-+#define RXD(x) XXD((x), di->nrxd)
-+#define NEXTTXD(i) TXD(i + 1)
-+#define PREVTXD(i) TXD(i - 1)
-+#define NEXTRXD(i) RXD(i + 1)
-+#define NTXDACTIVE(h, t) TXD(t - h)
-+#define NRXDACTIVE(h, t) RXD(t - h)
-+
-+/* macros to convert between byte offsets and indexes */
-+#define B2I(bytes, type) ((bytes) / sizeof(type))
-+#define I2B(index, type) ((index) * sizeof(type))
-+
-+#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
-+#define PCI32ADDR_HIGH_SHIFT 30
-+
-+
-+/* prototypes */
-+static bool dma_isaddrext(dma_info_t *di);
-+static bool dma_alloc(dma_info_t *di, uint direction);
-+
-+static bool dma32_alloc(dma_info_t *di, uint direction);
-+static void dma32_txreset(dma_info_t *di);
-+static void dma32_rxreset(dma_info_t *di);
-+static bool dma32_txsuspendedidle(dma_info_t *di);
-+static int dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags);
-+static void* dma32_getnexttxp(dma_info_t *di, bool forceall);
-+static void* dma32_getnextrxp(dma_info_t *di, bool forceall);
-+static void dma32_txrotate(di_t *di);
-+
-+/* prototype or stubs */
-+#ifdef BCMDMA64
-+static bool dma64_alloc(dma_info_t *di, uint direction);
-+static void dma64_txreset(dma_info_t *di);
-+static void dma64_rxreset(dma_info_t *di);
-+static bool dma64_txsuspendedidle(dma_info_t *di);
-+static int dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags);
-+static void* dma64_getnexttxp(dma_info_t *di, bool forceall);
-+static void* dma64_getnextrxp(dma_info_t *di, bool forceall);
-+static void dma64_txrotate(di_t *di);
-+#else
-+static bool dma64_alloc(dma_info_t *di, uint direction) { return TRUE; }
-+static void dma64_txreset(dma_info_t *di) {}
-+static void dma64_rxreset(dma_info_t *di) {}
-+static bool dma64_txsuspendedidle(dma_info_t *di) { return TRUE;}
-+static int dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags) { return 0; }
-+static void* dma64_getnexttxp(dma_info_t *di, bool forceall) { return NULL; }
-+static void* dma64_getnextrxp(dma_info_t *di, bool forceall) { return NULL; }
-+static void dma64_txrotate(di_t *di) { return; }
-+#endif
-+
-+/* old dmaregs struct for compatibility */
-+typedef volatile struct {
-+ /* transmit channel */
-+ uint32 xmtcontrol; /* enable, et al */
-+ uint32 xmtaddr; /* descriptor ring base address (4K aligned) */
-+ uint32 xmtptr; /* last descriptor posted to chip */
-+ uint32 xmtstatus; /* current active descriptor, et al */
-+
-+ /* receive channel */
-+ uint32 rcvcontrol; /* enable, et al */
-+ uint32 rcvaddr; /* descriptor ring base address (4K aligned) */
-+ uint32 rcvptr; /* last descriptor posted to chip */
-+ uint32 rcvstatus; /* current active descriptor, et al */
-+} dmaregs_t;
-+
-+void*
-+dma_attach(void *drv, void *osh, char *name, dmaregs_t *regs, uint ntxd, uint nrxd,
-+ uint rxbufsize, uint nrxpost, uint rxoffset, uint ddoffset, uint dataoffset, uint *msg_level)
-+{
-+ dma_info_t *di;
-+ uint size;
-+ dma32regs_t *dmaregstx = regs;
-+ dma32regs_t *dmaregsrx = dmaregstx + 1;
-+
-+ /* allocate private info structure */
-+ if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) {
-+ return (NULL);
-+ }
-+ bzero((char*)di, sizeof (dma_info_t));
-+ di->msg_level = msg_level ? msg_level : &dma_msg_level;
-+
-+ /* check arguments */
-+ ASSERT(ISPOWEROF2(ntxd));
-+ ASSERT(ISPOWEROF2(nrxd));
-+ if (nrxd == 0)
-+ ASSERT(dmaregsrx == NULL);
-+ if (ntxd == 0)
-+ ASSERT(dmaregstx == NULL);
-+
-+ ASSERT(ntxd <= D32MAXDD);
-+ ASSERT(nrxd <= D32MAXDD);
-+ di->d32txregs = (dma32regs_t *)dmaregstx;
-+ di->d32rxregs = (dma32regs_t *)dmaregsrx;
-+
-+ /* make a private copy of our callers name */
-+ strncpy(di->name, name, MAXNAMEL);
-+ di->name[MAXNAMEL-1] = '\0';
-+
-+ di->osh = osh;
-+ di->sbh = NULL;
-+
-+ /* save tunables */
-+ di->ntxd = ntxd;
-+ di->nrxd = nrxd;
-+ di->rxbufsize = rxbufsize;
-+ di->nrxpost = nrxpost;
-+ di->rxoffset = rxoffset;
-+
-+ di->ddoffsetlow = ddoffset;
-+ di->dataoffsetlow = dataoffset;
-+ di->ddoffsethigh = 0;
-+ di->dataoffsethigh = 0;
-+
-+ di->addrext = 0;
-+
-+ DMA_TRACE(("%s: dma_attach: osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d rxoffset %d ddoffset 0x%x dataoffset 0x%x\n",
-+ name, osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, di->ddoffsetlow, di->dataoffsetlow));
-+
-+ /* allocate tx packet pointer vector */
-+ if (ntxd) {
-+ size = ntxd * sizeof (void*);
-+ if ((di->txp = MALLOC(osh, size)) == NULL) {
-+ DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
-+ goto fail;
-+ }
-+ bzero((char*)di->txp, size);
-+ }
-+
-+ /* allocate rx packet pointer vector */
-+ if (nrxd) {
-+ size = nrxd * sizeof (void*);
-+ if ((di->rxp = MALLOC(osh, size)) == NULL) {
-+ DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
-+ goto fail;
-+ }
-+ bzero((char*)di->rxp, size);
-+ }
-+
-+ /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
-+ if (ntxd) {
-+ if (!dma_alloc(di, DMA_TX))
-+ goto fail;
-+ }
-+
-+ /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
-+ if (nrxd) {
-+ if (!dma_alloc(di, DMA_RX))
-+ goto fail;
-+ }
-+
-+ if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) {
-+ DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n", di->name, di->txdpa));
-+ goto fail;
-+ }
-+ if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) {
-+ DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n", di->name, di->rxdpa));
-+ goto fail;
-+ }
-+
-+ return ((void*)di);
-+
-+fail:
-+ dma_detach((void*)di);
-+ return (NULL);
-+}
-+
-+
-+static bool
-+dma_alloc(dma_info_t *di, uint direction)
-+{
-+ if (DMA64_ENAB(di)) {
-+ return dma64_alloc(di, direction);
-+ } else {
-+ return dma32_alloc(di, direction);
-+ }
-+}
-+
-+/* may be called with core in reset */
-+void
-+dma_detach(dma_info_t *di)
-+{
-+ if (di == NULL)
-+ return;
-+
-+ DMA_TRACE(("%s: dma_detach\n", di->name));
-+
-+ /* shouldn't be here if descriptors are unreclaimed */
-+ ASSERT(di->txin == di->txout);
-+ ASSERT(di->rxin == di->rxout);
-+
-+ /* free dma descriptor rings */
-+ if (di->txd32)
-+ DMA_FREE_CONSISTENT(di->osh, ((int8*)di->txd32 - di->txdalign), di->txdalloc, (di->txdpa - di->txdalign));
-+ if (di->rxd32)
-+ DMA_FREE_CONSISTENT(di->osh, ((int8*)di->rxd32 - di->rxdalign), di->rxdalloc, (di->rxdpa - di->rxdalign));
-+
-+ /* free packet pointer vectors */
-+ if (di->txp)
-+ MFREE(di->osh, (void*)di->txp, (di->ntxd * sizeof (void*)));
-+ if (di->rxp)
-+ MFREE(di->osh, (void*)di->rxp, (di->nrxd * sizeof (void*)));
-+
-+ /* free our private info structure */
-+ MFREE(di->osh, (void*)di, sizeof (dma_info_t));
-+}
-+
-+/* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
-+static bool
-+dma_isaddrext(dma_info_t *di)
-+{
-+ uint32 w;
-+
-+ if (DMA64_ENAB(di)) {
-+ OR_REG(&di->d64txregs->control, D64_XC_AE);
-+ w = R_REG(&di->d32txregs->control);
-+ AND_REG(&di->d32txregs->control, ~D64_XC_AE);
-+ return ((w & XC_AE) == D64_XC_AE);
-+ } else {
-+ OR_REG(&di->d32txregs->control, XC_AE);
-+ w = R_REG(&di->d32txregs->control);
-+ AND_REG(&di->d32txregs->control, ~XC_AE);
-+ return ((w & XC_AE) == XC_AE);
-+ }
-+}
-+
-+void
-+dma_txreset(dma_info_t *di)
-+{
-+ DMA_TRACE(("%s: dma_txreset\n", di->name));
-+
-+ if (DMA64_ENAB(di))
-+ dma64_txreset(di);
-+ else
-+ dma32_txreset(di);
-+}
-+
-+void
-+dma_rxreset(dma_info_t *di)
-+{
-+ DMA_TRACE(("%s: dma_rxreset\n", di->name));
-+
-+ if (DMA64_ENAB(di))
-+ dma64_rxreset(di);
-+ else
-+ dma32_rxreset(di);
-+}
-+
-+/* initialize descriptor table base address */
-+static void
-+dma_ddtable_init(dma_info_t *di, uint direction, ulong pa)
-+{
-+ if (DMA64_ENAB(di)) {
-+ if (direction == DMA_TX) {
-+ W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
-+ W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
-+ } else {
-+ W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
-+ W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
-+ }
-+ } else {
-+ uint32 offset = di->ddoffsetlow;
-+ if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
-+ if (direction == DMA_TX)
-+ W_REG(&di->d32txregs->addr, (pa + offset));
-+ else
-+ W_REG(&di->d32rxregs->addr, (pa + offset));
-+ } else {
-+ /* dma32 address extension */
-+ uint32 ae;
-+ ASSERT(di->addrext);
-+ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
-+
-+ if (direction == DMA_TX) {
-+ W_REG(&di->d32txregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset));
-+ SET_REG(&di->d32txregs->control, XC_AE, (ae << XC_AE_SHIFT));
-+ } else {
-+ W_REG(&di->d32rxregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset));
-+ SET_REG(&di->d32rxregs->control, RC_AE, (ae << RC_AE_SHIFT));
-+ }
-+ }
-+ }
-+}
-+
-+/* init the tx or rx descriptor */
-+static INLINE void
-+dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, ulong pa, uint outidx, uint32 *ctrl)
-+{
-+ uint offset = di->dataoffsetlow;
-+
-+ if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
-+ W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + offset));
-+ W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*ctrl));
-+ } else {
-+ /* address extension */
-+ uint32 ae;
-+ ASSERT(di->addrext);
-+ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
-+
-+ *ctrl |= (ae << CTRL_AE_SHIFT);
-+ W_SM(&ddring[outidx].addr, BUS_SWAP32((pa & ~PCI32ADDR_HIGH) + offset));
-+ W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*ctrl));
-+ }
-+}
-+
-+/* init the tx or rx descriptor */
-+static INLINE void
-+dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, ulong pa, uint outidx, uint32 *flags, uint32 bufcount)
-+{
-+ uint32 bufaddr_low = pa + di->dataoffsetlow;
-+ uint32 bufaddr_high = 0 + di->dataoffsethigh;
-+
-+ uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
-+
-+ W_SM(&ddring[outidx].addrlow, BUS_SWAP32(bufaddr_low));
-+ W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(bufaddr_high));
-+ W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
-+ W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
-+}
-+
-+void
-+dma_txinit(dma_info_t *di)
-+{
-+ DMA_TRACE(("%s: dma_txinit\n", di->name));
-+
-+ di->txin = di->txout = 0;
-+ di->txavail = di->ntxd - 1;
-+
-+ /* clear tx descriptor ring */
-+ if (DMA64_ENAB(di)) {
-+ BZERO_SM((void*)di->txd64, (di->ntxd * sizeof (dma64dd_t)));
-+ W_REG(&di->d64txregs->control, XC_XE);
-+ dma_ddtable_init(di, DMA_TX, di->txdpa);
-+ } else {
-+ BZERO_SM((void*)di->txd32, (di->ntxd * sizeof (dma32dd_t)));
-+ W_REG(&di->d32txregs->control, XC_XE);
-+ dma_ddtable_init(di, DMA_TX, di->txdpa);
-+ }
-+}
-+
-+bool
-+dma_txenabled(dma_info_t *di)
-+{
-+ uint32 xc;
-+
-+ /* If the chip is dead, it is not enabled :-) */
-+ if (DMA64_ENAB(di)) {
-+ xc = R_REG(&di->d64txregs->control);
-+ return ((xc != 0xffffffff) && (xc & D64_XC_XE));
-+ } else {
-+ xc = R_REG(&di->d32txregs->control);
-+ return ((xc != 0xffffffff) && (xc & XC_XE));
-+ }
-+}
-+
-+void
-+dma_txsuspend(dma_info_t *di)
-+{
-+ DMA_TRACE(("%s: dma_txsuspend\n", di->name));
-+ if (DMA64_ENAB(di))
-+ OR_REG(&di->d64txregs->control, D64_XC_SE);
-+ else
-+ OR_REG(&di->d32txregs->control, XC_SE);
-+}
-+
-+void
-+dma_txresume(dma_info_t *di)
-+{
-+ DMA_TRACE(("%s: dma_txresume\n", di->name));
-+ if (DMA64_ENAB(di))
-+ AND_REG(&di->d64txregs->control, ~D64_XC_SE);
-+ else
-+ AND_REG(&di->d32txregs->control, ~XC_SE);
-+}
-+
-+bool
-+dma_txsuspendedidle(dma_info_t *di)
-+{
-+ if (DMA64_ENAB(di))
-+ return dma64_txsuspendedidle(di);
-+ else
-+ return dma32_txsuspendedidle(di);
-+}
-+
-+bool
-+dma_txsuspended(dma_info_t *di)
-+{
-+ if (DMA64_ENAB(di))
-+ return ((R_REG(&di->d64txregs->control) & D64_XC_SE) == D64_XC_SE);
-+ else
-+ return ((R_REG(&di->d32txregs->control) & XC_SE) == XC_SE);
-+}
-+
-+bool
-+dma_txstopped(dma_info_t *di)
-+{
-+ if (DMA64_ENAB(di))
-+ return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_STOPPED);
-+ else
-+ return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_STOPPED);
-+}
-+
-+bool
-+dma_rxstopped(dma_info_t *di)
-+{
-+ if (DMA64_ENAB(di))
-+ return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) == D64_RS0_RS_STOPPED);
-+ else
-+ return ((R_REG(&di->d32rxregs->status) & RS_RS_MASK) == RS_RS_STOPPED);
-+}
-+
-+void
-+dma_fifoloopbackenable(dma_info_t *di)
-+{
-+ DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
-+ if (DMA64_ENAB(di))
-+ OR_REG(&di->d64txregs->control, D64_XC_LE);
-+ else
-+ OR_REG(&di->d32txregs->control, XC_LE);
-+}
-+
-+void
-+dma_rxinit(dma_info_t *di)
-+{
-+ DMA_TRACE(("%s: dma_rxinit\n", di->name));
-+
-+ di->rxin = di->rxout = 0;
-+
-+ /* clear rx descriptor ring */
-+ if (DMA64_ENAB(di)) {
-+ BZERO_SM((void*)di->rxd64, (di->nrxd * sizeof (dma64dd_t)));
-+ dma_rxenable(di);
-+ dma_ddtable_init(di, DMA_RX, di->rxdpa);
-+ } else {
-+ BZERO_SM((void*)di->rxd32, (di->nrxd * sizeof (dma32dd_t)));
-+ dma_rxenable(di);
-+ dma_ddtable_init(di, DMA_RX, di->rxdpa);
-+ }
-+}
-+
-+void
-+dma_rxenable(dma_info_t *di)
-+{
-+ DMA_TRACE(("%s: dma_rxenable\n", di->name));
-+ if (DMA64_ENAB(di))
-+ W_REG(&di->d64rxregs->control, ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
-+ else
-+ W_REG(&di->d32rxregs->control, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
-+}
-+
-+bool
-+dma_rxenabled(dma_info_t *di)
-+{
-+ uint32 rc;
-+
-+ if (DMA64_ENAB(di)) {
-+ rc = R_REG(&di->d64rxregs->control);
-+ return ((rc != 0xffffffff) && (rc & D64_RC_RE));
-+ } else {
-+ rc = R_REG(&di->d32rxregs->control);
-+ return ((rc != 0xffffffff) && (rc & RC_RE));
-+ }
-+}
-+
-+
-+/* !! tx entry routine */
-+int
-+dma_txfast(dma_info_t *di, void *p0, uint32 coreflags)
-+{
-+ if (DMA64_ENAB(di)) {
-+ return dma64_txfast(di, p0, coreflags);
-+ } else {
-+ return dma32_txfast(di, p0, coreflags);
-+ }
-+}
-+
-+/* !! rx entry routine, returns a pointer to the next frame received, or NULL if there are no more */
-+void*
-+dma_rx(dma_info_t *di)
-+{
-+ void *p;
-+ uint len;
-+ int skiplen = 0;
-+
-+ while ((p = dma_getnextrxp(di, FALSE))) {
-+ /* skip giant packets which span multiple rx descriptors */
-+ if (skiplen > 0) {
-+ skiplen -= di->rxbufsize;
-+ if (skiplen < 0)
-+ skiplen = 0;
-+ PKTFREE(di->osh, p, FALSE);
-+ continue;
-+ }
-+
-+ len = ltoh16(*(uint16*)(PKTDATA(di->osh, p)));
-+ DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
-+
-+ /* bad frame length check */
-+ if (len > (di->rxbufsize - di->rxoffset)) {
-+ DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len));
-+ if (len > 0)
-+ skiplen = len - (di->rxbufsize - di->rxoffset);
-+ PKTFREE(di->osh, p, FALSE);
-+ di->hnddma.rxgiants++;
-+ continue;
-+ }
-+
-+ /* set actual length */
-+ PKTSETLEN(di->osh, p, (di->rxoffset + len));
-+
-+ break;
-+ }
-+
-+ return (p);
-+}
-+
-+/* post receive buffers */
-+void
-+dma_rxfill(dma_info_t *di)
-+{
-+ void *p;
-+ uint rxin, rxout;
-+ uint32 ctrl;
-+ uint n;
-+ uint i;
-+ uint32 pa;
-+ uint rxbufsize;
-+
-+ /*
-+ * Determine how many receive buffers we're lacking
-+ * from the full complement, allocate, initialize,
-+ * and post them, then update the chip rx lastdscr.
-+ */
-+
-+ rxin = di->rxin;
-+ rxout = di->rxout;
-+ rxbufsize = di->rxbufsize;
-+
-+ n = di->nrxpost - NRXDACTIVE(rxin, rxout);
-+
-+ DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
-+
-+ for (i = 0; i < n; i++) {
-+ if ((p = PKTGET(di->osh, rxbufsize, FALSE)) == NULL) {
-+ DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
-+ di->hnddma.rxnobuf++;
-+ break;
-+ }
-+
-+ /* Do a cached write instead of uncached write since DMA_MAP
-+ * will flush the cache. */
-+ *(uint32*)(PKTDATA(di->osh, p)) = 0;
-+
-+ pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->osh, p), rxbufsize, DMA_RX, p);
-+ ASSERT(ISALIGNED(pa, 4));
-+
-+ /* save the free packet pointer */
-+ ASSERT(di->rxp[rxout] == NULL);
-+ di->rxp[rxout] = p;
-+
-+ if (DMA64_ENAB(di)) {
-+ /* prep the descriptor control value */
-+ if (rxout == (di->nrxd - 1))
-+ ctrl = CTRL_EOT;
-+
-+ dma64_dd_upd(di, di->rxd64, pa, rxout, &ctrl, rxbufsize);
-+ } else {
-+ /* prep the descriptor control value */
-+ ctrl = rxbufsize;
-+ if (rxout == (di->nrxd - 1))
-+ ctrl |= CTRL_EOT;
-+ dma32_dd_upd(di, di->rxd32, pa, rxout, &ctrl);
-+ }
-+
-+ rxout = NEXTRXD(rxout);
-+ }
-+
-+ di->rxout = rxout;
-+
-+ /* update the chip lastdscr pointer */
-+ if (DMA64_ENAB(di)) {
-+ W_REG(&di->d64rxregs->ptr, I2B(rxout, dma64dd_t));
-+ } else {
-+ W_REG(&di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
-+ }
-+}
-+
-+void
-+dma_txreclaim(dma_info_t *di, bool forceall)
-+{
-+ void *p;
-+
-+ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
-+
-+ while ((p = dma_getnexttxp(di, forceall)))
-+ PKTFREE(di->osh, p, TRUE);
-+}
-+
-+/*
-+ * Reclaim next completed txd (txds if using chained buffers) and
-+ * return associated packet.
-+ * If 'force' is true, reclaim txd(s) and return associated packet
-+ * regardless of the value of the hardware "curr" pointer.
-+ */
-+void*
-+dma_getnexttxp(dma_info_t *di, bool forceall)
-+{
-+ if (DMA64_ENAB(di)) {
-+ return dma64_getnexttxp(di, forceall);
-+ } else {
-+ return dma32_getnexttxp(di, forceall);
-+ }
-+}
-+
-+/* like getnexttxp but no reclaim */
-+void*
-+dma_peeknexttxp(dma_info_t *di)
-+{
-+ uint end, i;
-+
-+ if (DMA64_ENAB(di)) {
-+ end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
-+ } else {
-+ end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
-+ }
-+
-+ for (i = di->txin; i != end; i = NEXTTXD(i))
-+ if (di->txp[i])
-+ return (di->txp[i]);
-+
-+ return (NULL);
-+}
-+
-+/*
-+ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
-+ */
-+void
-+dma_txrotate(di_t *di)
-+{
-+ if (DMA64_ENAB(di)) {
-+ dma64_txrotate(di);
-+ } else {
-+ dma32_txrotate(di);
-+ }
-+}
-+
-+void
-+dma_rxreclaim(dma_info_t *di)
-+{
-+ void *p;
-+
-+ DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
-+
-+ while ((p = dma_getnextrxp(di, TRUE)))
-+ PKTFREE(di->osh, p, FALSE);
-+}
-+
-+void *
-+dma_getnextrxp(dma_info_t *di, bool forceall)
-+{
-+ if (DMA64_ENAB(di)) {
-+ return dma64_getnextrxp(di, forceall);
-+ } else {
-+ return dma32_getnextrxp(di, forceall);
-+ }
-+}
-+
-+uintptr
-+dma_getvar(dma_info_t *di, char *name)
-+{
-+ if (!strcmp(name, "&txavail"))
-+ return ((uintptr) &di->txavail);
-+ else {
-+ ASSERT(0);
-+ }
-+ return (0);
-+}
-+
-+void
-+dma_txblock(dma_info_t *di)
-+{
-+ di->txavail = 0;
-+}
-+
-+void
-+dma_txunblock(dma_info_t *di)
-+{
-+ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+}
-+
-+uint
-+dma_txactive(dma_info_t *di)
-+{
-+ return (NTXDACTIVE(di->txin, di->txout));
-+}
-+
-+void
-+dma_rxpiomode(dma32regs_t *regs)
-+{
-+ W_REG(®s->control, RC_FM);
-+}
-+
-+void
-+dma_txpioloopback(dma32regs_t *regs)
-+{
-+ OR_REG(®s->control, XC_LE);
-+}
-+
-+
-+
-+
-+/*** 32 bits DMA non-inline functions ***/
-+static bool
-+dma32_alloc(dma_info_t *di, uint direction)
-+{
-+ uint size;
-+ uint ddlen;
-+ void *va;
-+
-+ ddlen = sizeof (dma32dd_t);
-+
-+ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
-+
-+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, D32RINGALIGN))
-+ size += D32RINGALIGN;
-+
-+
-+ if (direction == DMA_TX) {
-+ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
-+ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
-+ return FALSE;
-+ }
-+
-+ di->txd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
-+ di->txdalign = (uint)((int8*)di->txd32 - (int8*)va);
-+ di->txdpa += di->txdalign;
-+ di->txdalloc = size;
-+ ASSERT(ISALIGNED((uintptr)di->txd32, D32RINGALIGN));
-+ } else {
-+ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
-+ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
-+ return FALSE;
-+ }
-+ di->rxd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
-+ di->rxdalign = (uint)((int8*)di->rxd32 - (int8*)va);
-+ di->rxdpa += di->rxdalign;
-+ di->rxdalloc = size;
-+ ASSERT(ISALIGNED((uintptr)di->rxd32, D32RINGALIGN));
-+ }
-+
-+ return TRUE;
-+}
-+
-+static void
-+dma32_txreset(dma_info_t *di)
-+{
-+ uint32 status;
-+
-+ /* suspend tx DMA first */
-+ W_REG(&di->d32txregs->control, XC_SE);
-+ SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED &&
-+ status != XS_XS_IDLE &&
-+ status != XS_XS_STOPPED,
-+ 10000);
-+
-+ W_REG(&di->d32txregs->control, 0);
-+ SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED,
-+ 10000);
-+
-+ if (status != XS_XS_DISABLED) {
-+ DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
-+ }
-+
-+ /* wait for the last transaction to complete */
-+ OSL_DELAY(300);
-+}
-+
-+static void
-+dma32_rxreset(dma_info_t *di)
-+{
-+ uint32 status;
-+
-+ W_REG(&di->d32rxregs->control, 0);
-+ SPINWAIT((status = (R_REG(&di->d32rxregs->status) & RS_RS_MASK)) != RS_RS_DISABLED,
-+ 10000);
-+
-+ if (status != RS_RS_DISABLED) {
-+ DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
-+ }
-+}
-+
-+static bool
-+dma32_txsuspendedidle(dma_info_t *di)
-+{
-+ if (!(R_REG(&di->d32txregs->control) & XC_SE))
-+ return 0;
-+
-+ if ((R_REG(&di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
-+ return 0;
-+
-+ OSL_DELAY(2);
-+ return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_IDLE);
-+}
-+
-+/*
-+ * supports full 32bit dma engine buffer addressing so
-+ * dma buffers can cross 4 Kbyte page boundaries.
-+ */
-+static int
-+dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags)
-+{
-+ void *p, *next;
-+ uchar *data;
-+ uint len;
-+ uint txout;
-+ uint32 ctrl;
-+ uint32 pa;
-+
-+ DMA_TRACE(("%s: dma_txfast\n", di->name));
-+
-+ txout = di->txout;
-+ ctrl = 0;
-+
-+ /*
-+ * Walk the chain of packet buffers
-+ * allocating and initializing transmit descriptor entries.
-+ */
-+ for (p = p0; p; p = next) {
-+ data = PKTDATA(di->osh, p);
-+ len = PKTLEN(di->osh, p);
-+ next = PKTNEXT(di->osh, p);
-+
-+ /* return nonzero if out of tx descriptors */
-+ if (NEXTTXD(txout) == di->txin)
-+ goto outoftxd;
-+
-+ if (len == 0)
-+ continue;
-+
-+ /* get physical address of buffer start */
-+ pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
-+
-+ /* build the descriptor control value */
-+ ctrl = len & CTRL_BC_MASK;
-+
-+ ctrl |= coreflags;
-+
-+ if (p == p0)
-+ ctrl |= CTRL_SOF;
-+ if (next == NULL)
-+ ctrl |= (CTRL_IOC | CTRL_EOF);
-+ if (txout == (di->ntxd - 1))
-+ ctrl |= CTRL_EOT;
-+
-+ if (DMA64_ENAB(di)) {
-+ dma64_dd_upd(di, di->txd64, pa, txout, &ctrl, len);
-+ } else {
-+ dma32_dd_upd(di, di->txd32, pa, txout, &ctrl);
-+ }
-+
-+ ASSERT(di->txp[txout] == NULL);
-+
-+ txout = NEXTTXD(txout);
-+ }
-+
-+ /* if last txd eof not set, fix it */
-+ if (!(ctrl & CTRL_EOF))
-+ W_SM(&di->txd32[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF));
-+
-+ /* save the packet */
-+ di->txp[PREVTXD(txout)] = p0;
-+
-+ /* bump the tx descriptor index */
-+ di->txout = txout;
-+
-+ /* kick the chip */
-+ if (DMA64_ENAB(di)) {
-+ W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
-+ } else {
-+ W_REG(&di->d32txregs->ptr, I2B(txout, dma32dd_t));
-+ }
-+
-+ /* tx flow control */
-+ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+ return (0);
-+
-+ outoftxd:
-+ DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
-+ PKTFREE(di->osh, p0, TRUE);
-+ di->txavail = 0;
-+ di->hnddma.txnobuf++;
-+ return (-1);
-+}
-+
-+static void*
-+dma32_getnexttxp(dma_info_t *di, bool forceall)
-+{
-+ uint start, end, i;
-+ void *txp;
-+
-+ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
-+
-+ txp = NULL;
-+
-+ start = di->txin;
-+ if (forceall)
-+ end = di->txout;
-+ else
-+ end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
-+
-+ if ((start == 0) && (end > di->txout))
-+ goto bogus;
-+
-+ for (i = start; i != end && !txp; i = NEXTTXD(i)) {
-+ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - di->dataoffsetlow),
-+ (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & CTRL_BC_MASK), DMA_TX, di->txp[i]);
-+
-+ W_SM(&di->txd32[i].addr, 0xdeadbeef);
-+ txp = di->txp[i];
-+ di->txp[i] = NULL;
-+ }
-+
-+ di->txin = i;
-+
-+ /* tx flow control */
-+ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+ return (txp);
-+
-+bogus:
-+/*
-+ DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
-+ start, end, di->txout, forceall));
-+*/
-+ return (NULL);
-+}
-+
-+static void *
-+dma32_getnextrxp(dma_info_t *di, bool forceall)
-+{
-+ uint i;
-+ void *rxp;
-+
-+ /* if forcing, dma engine must be disabled */
-+ ASSERT(!forceall || !dma_rxenabled(di));
-+
-+ i = di->rxin;
-+
-+ /* return if no packets posted */
-+ if (i == di->rxout)
-+ return (NULL);
-+
-+ /* ignore curr if forceall */
-+ if (!forceall && (i == B2I(R_REG(&di->d32rxregs->status) & RS_CD_MASK, dma32dd_t)))
-+ return (NULL);
-+
-+ /* get the packet pointer that corresponds to the rx descriptor */
-+ rxp = di->rxp[i];
-+ ASSERT(rxp);
-+ di->rxp[i] = NULL;
-+
-+ /* clear this packet from the descriptor ring */
-+ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - di->dataoffsetlow),
-+ di->rxbufsize, DMA_RX, rxp);
-+ W_SM(&di->rxd32[i].addr, 0xdeadbeef);
-+
-+ di->rxin = NEXTRXD(i);
-+
-+ return (rxp);
-+}
-+
-+static void
-+dma32_txrotate(di_t *di)
-+{
-+ uint ad;
-+ uint nactive;
-+ uint rot;
-+ uint old, new;
-+ uint32 w;
-+ uint first, last;
-+
-+ ASSERT(dma_txsuspendedidle(di));
-+
-+ nactive = dma_txactive(di);
-+ ad = B2I(((R_REG(&di->d32txregs->status) & XS_AD_MASK) >> XS_AD_SHIFT), dma32dd_t);
-+ rot = TXD(ad - di->txin);
-+
-+ ASSERT(rot < di->ntxd);
-+
-+ /* full-ring case is a lot harder - don't worry about this */
-+ if (rot >= (di->ntxd - nactive)) {
-+ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
-+ return;
-+ }
-+
-+ first = di->txin;
-+ last = PREVTXD(di->txout);
-+
-+ /* move entries starting at last and moving backwards to first */
-+ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
-+ new = TXD(old + rot);
-+
-+ /*
-+ * Move the tx dma descriptor.
-+ * EOT is set only in the last entry in the ring.
-+ */
-+ w = R_SM(&di->txd32[old].ctrl) & ~CTRL_EOT;
-+ if (new == (di->ntxd - 1))
-+ w |= CTRL_EOT;
-+ W_SM(&di->txd32[new].ctrl, w);
-+ W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
-+
-+ /* zap the old tx dma descriptor address field */
-+ W_SM(&di->txd32[old].addr, 0xdeadbeef);
-+
-+ /* move the corresponding txp[] entry */
-+ ASSERT(di->txp[new] == NULL);
-+ di->txp[new] = di->txp[old];
-+ di->txp[old] = NULL;
-+ }
-+
-+ /* update txin and txout */
-+ di->txin = ad;
-+ di->txout = TXD(di->txout + rot);
-+ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+ /* kick the chip */
-+ W_REG(&di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
-+}
-+
-+/*** 64 bits DMA non-inline functions ***/
-+
-+#ifdef BCMDMA64
-+
-+static bool
-+dma64_alloc(dma_info_t *di, uint direction)
-+{
-+ uint size;
-+ uint ddlen;
-+ uint32 alignbytes;
-+ void *va;
-+
-+ ddlen = sizeof (dma64dd_t);
-+
-+ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
-+
-+ alignbytes = di->dma64align;
-+
-+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, alignbytes))
-+ size += alignbytes;
-+
-+
-+ if (direction == DMA_TX) {
-+ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
-+ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
-+ return FALSE;
-+ }
-+
-+ di->txd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
-+ di->txdalign = (uint)((int8*)di->txd64 - (int8*)va);
-+ di->txdpa += di->txdalign;
-+ di->txdalloc = size;
-+ ASSERT(ISALIGNED((uintptr)di->txd64, alignbytes));
-+ } else {
-+ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
-+ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
-+ return FALSE;
-+ }
-+ di->rxd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
-+ di->rxdalign = (uint)((int8*)di->rxd64 - (int8*)va);
-+ di->rxdpa += di->rxdalign;
-+ di->rxdalloc = size;
-+ ASSERT(ISALIGNED((uintptr)di->rxd64, alignbytes));
-+ }
-+
-+ return TRUE;
-+}
-+
-+static void
-+dma64_txreset(dma_info_t *di)
-+{
-+ uint32 status;
-+
-+ /* suspend tx DMA first */
-+ W_REG(&di->d64txregs->control, D64_XC_SE);
-+ SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED &&
-+ status != D64_XS0_XS_IDLE &&
-+ status != D64_XS0_XS_STOPPED,
-+ 10000);
-+
-+ W_REG(&di->d64txregs->control, 0);
-+ SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED,
-+ 10000);
-+
-+ if (status != D64_XS0_XS_DISABLED) {
-+ DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
-+ }
-+
-+ /* wait for the last transaction to complete */
-+ OSL_DELAY(300);
-+}
-+
-+static void
-+dma64_rxreset(dma_info_t *di)
-+{
-+ uint32 status;
-+
-+ W_REG(&di->d64rxregs->control, 0);
-+ SPINWAIT((status = (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED,
-+ 10000);
-+
-+ if (status != D64_RS0_RS_DISABLED) {
-+ DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
-+ }
-+}
-+
-+static bool
-+dma64_txsuspendedidle(dma_info_t *di)
-+{
-+
-+ if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
-+ return 0;
-+
-+ if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_IDLE)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/*
-+ * supports full 32bit dma engine buffer addressing so
-+ * dma buffers can cross 4 Kbyte page boundaries.
-+ */
-+static int
-+dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags)
-+{
-+ void *p, *next;
-+ uchar *data;
-+ uint len;
-+ uint txout;
-+ uint32 flags;
-+ uint32 pa;
-+
-+ DMA_TRACE(("%s: dma_txfast\n", di->name));
-+
-+ txout = di->txout;
-+ flags = 0;
-+
-+ /*
-+ * Walk the chain of packet buffers
-+ * allocating and initializing transmit descriptor entries.
-+ */
-+ for (p = p0; p; p = next) {
-+ data = PKTDATA(di->osh, p);
-+ len = PKTLEN(di->osh, p);
-+ next = PKTNEXT(di->osh, p);
-+
-+ /* return nonzero if out of tx descriptors */
-+ if (NEXTTXD(txout) == di->txin)
-+ goto outoftxd;
-+
-+ if (len == 0)
-+ continue;
-+
-+ /* get physical address of buffer start */
-+ pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
-+
-+ flags = coreflags;
-+
-+ if (p == p0)
-+ flags |= D64_CTRL1_SOF;
-+ if (next == NULL)
-+ flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
-+ if (txout == (di->ntxd - 1))
-+ flags |= D64_CTRL1_EOT;
-+
-+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
-+
-+ ASSERT(di->txp[txout] == NULL);
-+
-+ txout = NEXTTXD(txout);
-+ }
-+
-+ /* if last txd eof not set, fix it */
-+ if (!(flags & D64_CTRL1_EOF))
-+ W_SM(&di->txd64[PREVTXD(txout)].ctrl1, BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
-+
-+ /* save the packet */
-+ di->txp[PREVTXD(txout)] = p0;
-+
-+ /* bump the tx descriptor index */
-+ di->txout = txout;
-+
-+ /* kick the chip */
-+ W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
-+
-+ /* tx flow control */
-+ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+ return (0);
-+
-+outoftxd:
-+ DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
-+ PKTFREE(di->osh, p0, TRUE);
-+ di->txavail = 0;
-+ di->hnddma.txnobuf++;
-+ return (-1);
-+}
-+
-+static void*
-+dma64_getnexttxp(dma_info_t *di, bool forceall)
-+{
-+ uint start, end, i;
-+ void *txp;
-+
-+ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
-+
-+ txp = NULL;
-+
-+ start = di->txin;
-+ if (forceall)
-+ end = di->txout;
-+ else
-+ end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
-+
-+ if ((start == 0) && (end > di->txout))
-+ goto bogus;
-+
-+ for (i = start; i != end && !txp; i = NEXTTXD(i)) {
-+ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) - di->dataoffsetlow),
-+ (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) & D64_CTRL2_BC_MASK), DMA_TX, di->txp[i]);
-+
-+ W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
-+ W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
-+
-+ txp = di->txp[i];
-+ di->txp[i] = NULL;
-+ }
-+
-+ di->txin = i;
-+
-+ /* tx flow control */
-+ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+ return (txp);
-+
-+bogus:
-+/*
-+ DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
-+ start, end, di->txout, forceall));
-+*/
-+ return (NULL);
-+}
-+
-+static void *
-+dma64_getnextrxp(dma_info_t *di, bool forceall)
-+{
-+ uint i;
-+ void *rxp;
-+
-+ /* if forcing, dma engine must be disabled */
-+ ASSERT(!forceall || !dma_rxenabled(di));
-+
-+ i = di->rxin;
-+
-+ /* return if no packets posted */
-+ if (i == di->rxout)
-+ return (NULL);
-+
-+ /* ignore curr if forceall */
-+ if (!forceall && (i == B2I(R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK, dma64dd_t)))
-+ return (NULL);
-+
-+ /* get the packet pointer that corresponds to the rx descriptor */
-+ rxp = di->rxp[i];
-+ ASSERT(rxp);
-+ di->rxp[i] = NULL;
-+
-+ /* clear this packet from the descriptor ring */
-+ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) - di->dataoffsetlow),
-+ di->rxbufsize, DMA_RX, rxp);
-+
-+ W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
-+ W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
-+
-+ di->rxin = NEXTRXD(i);
-+
-+ return (rxp);
-+}
-+
-+static void
-+dma64_txrotate(di_t *di)
-+{
-+ uint ad;
-+ uint nactive;
-+ uint rot;
-+ uint old, new;
-+ uint32 w;
-+ uint first, last;
-+
-+ ASSERT(dma_txsuspendedidle(di));
-+
-+ nactive = dma_txactive(di);
-+ ad = B2I((R_REG(&di->d64txregs->status1) & D64_XS1_AD_MASK), dma64dd_t);
-+ rot = TXD(ad - di->txin);
-+
-+ ASSERT(rot < di->ntxd);
-+
-+ /* full-ring case is a lot harder - don't worry about this */
-+ if (rot >= (di->ntxd - nactive)) {
-+ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
-+ return;
-+ }
-+
-+ first = di->txin;
-+ last = PREVTXD(di->txout);
-+
-+ /* move entries starting at last and moving backwards to first */
-+ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
-+ new = TXD(old + rot);
-+
-+ /*
-+ * Move the tx dma descriptor.
-+ * EOT is set only in the last entry in the ring.
-+ */
-+ w = R_SM(&di->txd64[old].ctrl1) & ~D64_CTRL1_EOT;
-+ if (new == (di->ntxd - 1))
-+ w |= D64_CTRL1_EOT;
-+ W_SM(&di->txd64[new].ctrl1, w);
-+
-+ w = R_SM(&di->txd64[old].ctrl2);
-+ W_SM(&di->txd64[new].ctrl2, w);
-+
-+ W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
-+ W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
-+
-+ /* zap the old tx dma descriptor address field */
-+ W_SM(&di->txd64[old].addrlow, 0xdeadbeef);
-+ W_SM(&di->txd64[old].addrhigh, 0xdeadbeef);
-+
-+ /* move the corresponding txp[] entry */
-+ ASSERT(di->txp[new] == NULL);
-+ di->txp[new] = di->txp[old];
-+ di->txp[old] = NULL;
-+ }
-+
-+ /* update txin and txout */
-+ di->txin = ad;
-+ di->txout = TXD(di->txout + rot);
-+ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+ /* kick the chip */
-+ W_REG(&di->d64txregs->ptr, I2B(di->txout, dma64dd_t));
-+}
-+
-+#endif
-+
-diff -Naur linux.old/drivers/net/wl/hnddma.h linux.dev/drivers/net/wl/hnddma.h
---- linux.old/drivers/net/wl/hnddma.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux.dev/drivers/net/wl/hnddma.h 2006-04-06 16:57:44.000000000 +0200
-@@ -0,0 +1,69 @@
-+/*
-+ * Generic Broadcom Home Networking Division (HND) DMA engine SW interface
-+ * This supports the following chips: BCM42xx, 44xx, 47xx .
-+ *
-+ * Copyright 2005, Broadcom Corporation
-+ * All Rights Reserved.
-+ *
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+ * $Id$
-+ */
-+
-+#ifndef _hnddma_h_
-+#define _hnddma_h_
-+
-+/* export structure */
-+typedef volatile struct {
-+ /* rx error counters */
-+ uint rxgiants; /* rx giant frames */
-+ uint rxnobuf; /* rx out of dma descriptors */
-+ /* tx error counters */
-+ uint txnobuf; /* tx out of dma descriptors */
-+} hnddma_t;
-+
-+#ifndef di_t
-+#define di_t void
-+#endif
-+
-+#ifndef osl_t
-+#define osl_t void
-+#endif
-+
-+/* externs */
-+extern void dma_detach(di_t *di);
-+extern void dma_txreset(di_t *di);
-+extern void dma_rxreset(di_t *di);
-+extern void dma_txinit(di_t *di);
-+extern bool dma_txenabled(di_t *di);
-+extern void dma_rxinit(di_t *di);
-+extern void dma_rxenable(di_t *di);
-+extern bool dma_rxenabled(di_t *di);
-+extern void dma_txsuspend(di_t *di);
-+extern void dma_txresume(di_t *di);
-+extern bool dma_txsuspended(di_t *di);
-+extern bool dma_txsuspendedidle(di_t *di);
-+extern bool dma_txstopped(di_t *di);
-+extern bool dma_rxstopped(di_t *di);
-+extern int dma_txfast(di_t *di, void *p, uint32 coreflags);
-+extern void dma_fifoloopbackenable(di_t *di);
-+extern void *dma_rx(di_t *di);
-+extern void dma_rxfill(di_t *di);
-+extern void dma_txreclaim(di_t *di, bool forceall);
-+extern void dma_rxreclaim(di_t *di);
-+extern uintptr dma_getvar(di_t *di, char *name);
-+extern void *dma_getnexttxp(di_t *di, bool forceall);
-+extern void *dma_peeknexttxp(di_t *di);
-+extern void *dma_getnextrxp(di_t *di, bool forceall);
-+extern void dma_txblock(di_t *di);
-+extern void dma_txunblock(di_t *di);
-+extern uint dma_txactive(di_t *di);
-+extern void dma_txrotate(di_t *di);
-+
-+extern void dma_rxpiomode(dma32regs_t *);
-+extern void dma_txpioloopback(dma32regs_t *);
-+
-+
-+#endif /* _hnddma_h_ */
-diff -Naur linux.old/drivers/net/wl/pktq.h linux.dev/drivers/net/wl/pktq.h
---- linux.old/drivers/net/wl/pktq.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux.dev/drivers/net/wl/pktq.h 2006-04-06 17:32:52.000000000 +0200
-@@ -0,0 +1,48 @@
-+/*
-+ * Misc useful os-independent macros and functions.
-+ *
-+ * Copyright 2005, Broadcom Corporation
-+ * All Rights Reserved.
-+ *
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+ * $Id$
-+ */
-+
-+#ifndef _pktq_h_
-+#define _pktq_h_
-+
-+/*** driver-only section ***/
-+#ifdef BCMDRIVER
-+
-+/* generic osl packet queue */
-+struct pktq {
-+ void *head; /* first packet to dequeue */
-+ void *tail; /* last packet to dequeue */
-+ uint len; /* number of queued packets */
-+ uint maxlen; /* maximum number of queued packets */
-+ bool priority; /* enqueue by packet priority */
-+ uint8 prio_map[MAXPRIO+1]; /* user priority to packet enqueue policy map */
-+};
-+#define DEFAULT_QLEN 128
-+
-+#define pktq_len(q) ((q)->len)
-+#define pktq_avail(q) ((q)->maxlen - (q)->len)
-+#define pktq_head(q) ((q)->head)
-+#define pktq_full(q) ((q)->len >= (q)->maxlen)
-+#define _pktq_pri(q, pri) ((q)->prio_map[pri])
-+#define pktq_tailpri(q) ((q)->tail ? _pktq_pri(q, PKTPRIO((q)->tail)) : _pktq_pri(q, 0))
-+
-+/* externs */
-+/* packet */
-+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
-+extern uint pkttotlen(osl_t *osh, void *);
-+extern void pktq_init(struct pktq *q, uint maxlen, const uint8 prio_map[]);
-+extern void pktenq(struct pktq *q, void *p, bool lifo);
-+extern void *pktdeq(struct pktq *q);
-+extern void *pktdeqtail(struct pktq *q);
-+
-+#endif
-+#endif /* _pktq_h_ */
-diff -Naur linux.old/drivers/net/wl/sbhnddma.h linux.dev/drivers/net/wl/sbhnddma.h
---- linux.old/drivers/net/wl/sbhnddma.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux.dev/drivers/net/wl/sbhnddma.h 2006-04-06 15:34:14.000000000 +0200
-@@ -0,0 +1,312 @@
-+/*
-+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
-+ * This supports the following chips: BCM42xx, 44xx, 47xx .
-+ *
-+ * Copyright 2005, Broadcom Corporation
-+ * All Rights Reserved.
-+ *
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+ * $Id$
-+ */
-+
-+#ifndef _sbhnddma_h_
-+#define _sbhnddma_h_
-+
-+
-+/* 2byte-wide pio register set per channel(xmt or rcv) */
-+typedef volatile struct {
-+ uint16 fifocontrol;
-+ uint16 fifodata;
-+ uint16 fifofree; /* only valid in xmt channel, not in rcv channel */
-+ uint16 PAD;
-+} pio2regs_t;
-+
-+/* a pair of pio channels(tx and rx) */
-+typedef volatile struct {
-+ pio2regs_t tx;
-+ pio2regs_t rx;
-+} pio2regp_t;
-+
-+/* 4byte-wide pio register set per channel(xmt or rcv) */
-+typedef volatile struct {
-+ uint32 fifocontrol;
-+ uint32 fifodata;
-+} pio4regs_t;
-+
-+/* a pair of pio channels(tx and rx) */
-+typedef volatile struct {
-+ pio4regs_t tx;
-+ pio4regs_t rx;
-+} pio4regp_t;
-+
-+
-+
-+/* DMA structure:
-+ * support two DMA engines: 32 bits address or 64 bit addressing
-+ * basic DMA register set is per channel(transmit or receive)
-+ * a pair of channels is defined for convenience
-+ */
-+
-+
-+/*** 32 bits addressing ***/
-+
-+/* dma registers per channel(xmt or rcv) */
-+typedef volatile struct {
-+ uint32 control; /* enable, et al */
-+ uint32 addr; /* descriptor ring base address (4K aligned) */
-+ uint32 ptr; /* last descriptor posted to chip */
-+ uint32 status; /* current active descriptor, et al */
-+} dma32regs_t;
-+
-+typedef volatile struct {
-+ dma32regs_t xmt; /* dma tx channel */
-+ dma32regs_t rcv; /* dma rx channel */
-+} dma32regp_t;
-+
-+typedef volatile struct { /* diag access */
-+ uint32 fifoaddr; /* diag address */
-+ uint32 fifodatalow; /* low 32bits of data */
-+ uint32 fifodatahigh; /* high 32bits of data */
-+ uint32 pad; /* reserved */
-+} dma32diag_t;
-+
-+/*
-+ * DMA Descriptor
-+ * Descriptors are only read by the hardware, never written back.
-+ */
-+typedef volatile struct {
-+ uint32 ctrl; /* misc control bits & bufcount */
-+ uint32 addr; /* data buffer address */
-+} dma32dd_t;
-+
-+/*
-+ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
-+ */
-+#define D32MAXRINGSZ 4096
-+#define D32RINGALIGN 4096
-+#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
-+
-+/* transmit channel control */
-+#define XC_XE ((uint32)1 << 0) /* transmit enable */
-+#define XC_SE ((uint32)1 << 1) /* transmit suspend request */
-+#define XC_LE ((uint32)1 << 2) /* loopback enable */
-+#define XC_FL ((uint32)1 << 4) /* flush request */
-+#define XC_AE ((uint32)3 << 16) /* address extension bits */
-+#define XC_AE_SHIFT 16
-+
-+/* transmit descriptor table pointer */
-+#define XP_LD_MASK 0xfff /* last valid descriptor */
-+
-+/* transmit channel status */
-+#define XS_CD_MASK 0x0fff /* current descriptor pointer */
-+#define XS_XS_MASK 0xf000 /* transmit state */
-+#define XS_XS_SHIFT 12
-+#define XS_XS_DISABLED 0x0000 /* disabled */
-+#define XS_XS_ACTIVE 0x1000 /* active */
-+#define XS_XS_IDLE 0x2000 /* idle wait */
-+#define XS_XS_STOPPED 0x3000 /* stopped */
-+#define XS_XS_SUSP 0x4000 /* suspend pending */
-+#define XS_XE_MASK 0xf0000 /* transmit errors */
-+#define XS_XE_SHIFT 16
-+#define XS_XE_NOERR 0x00000 /* no error */
-+#define XS_XE_DPE 0x10000 /* descriptor protocol error */
-+#define XS_XE_DFU 0x20000 /* data fifo underrun */
-+#define XS_XE_BEBR 0x30000 /* bus error on buffer read */
-+#define XS_XE_BEDA 0x40000 /* bus error on descriptor access */
-+#define XS_AD_MASK 0xfff00000 /* active descriptor */
-+#define XS_AD_SHIFT 20
-+
-+/* receive channel control */
-+#define RC_RE ((uint32)1 << 0) /* receive enable */
-+#define RC_RO_MASK 0xfe /* receive frame offset */
-+#define RC_RO_SHIFT 1
-+#define RC_FM ((uint32)1 << 8) /* direct fifo receive (pio) mode */
-+#define RC_AE ((uint32)3 << 16) /* address extension bits */
-+#define RC_AE_SHIFT 16
-+
-+/* receive descriptor table pointer */
-+#define RP_LD_MASK 0xfff /* last valid descriptor */
-+
-+/* receive channel status */
-+#define RS_CD_MASK 0x0fff /* current descriptor pointer */
-+#define RS_RS_MASK 0xf000 /* receive state */
-+#define RS_RS_SHIFT 12
-+#define RS_RS_DISABLED 0x0000 /* disabled */
-+#define RS_RS_ACTIVE 0x1000 /* active */
-+#define RS_RS_IDLE 0x2000 /* idle wait */
-+#define RS_RS_STOPPED 0x3000 /* reserved */
-+#define RS_RE_MASK 0xf0000 /* receive errors */
-+#define RS_RE_SHIFT 16
-+#define RS_RE_NOERR 0x00000 /* no error */
-+#define RS_RE_DPE 0x10000 /* descriptor protocol error */
-+#define RS_RE_DFO 0x20000 /* data fifo overflow */
-+#define RS_RE_BEBW 0x30000 /* bus error on buffer write */
-+#define RS_RE_BEDA 0x40000 /* bus error on descriptor access */
-+#define RS_AD_MASK 0xfff00000 /* active descriptor */
-+#define RS_AD_SHIFT 20
-+
-+/* fifoaddr */
-+#define FA_OFF_MASK 0xffff /* offset */
-+#define FA_SEL_MASK 0xf0000 /* select */
-+#define FA_SEL_SHIFT 16
-+#define FA_SEL_XDD 0x00000 /* transmit dma data */
-+#define FA_SEL_XDP 0x10000 /* transmit dma pointers */
-+#define FA_SEL_RDD 0x40000 /* receive dma data */
-+#define FA_SEL_RDP 0x50000 /* receive dma pointers */
-+#define FA_SEL_XFD 0x80000 /* transmit fifo data */
-+#define FA_SEL_XFP 0x90000 /* transmit fifo pointers */
-+#define FA_SEL_RFD 0xc0000 /* receive fifo data */
-+#define FA_SEL_RFP 0xd0000 /* receive fifo pointers */
-+#define FA_SEL_RSD 0xe0000 /* receive frame status data */
-+#define FA_SEL_RSP 0xf0000 /* receive frame status pointers */
-+
-+/* descriptor control flags */
-+#define CTRL_BC_MASK 0x1fff /* buffer byte count */
-+#define CTRL_AE ((uint32)3 << 16) /* address extension bits */
-+#define CTRL_AE_SHIFT 16
-+#define CTRL_EOT ((uint32)1 << 28) /* end of descriptor table */
-+#define CTRL_IOC ((uint32)1 << 29) /* interrupt on completion */
-+#define CTRL_EOF ((uint32)1 << 30) /* end of frame */
-+#define CTRL_SOF ((uint32)1 << 31) /* start of frame */
-+
-+/* control flags in the range [27:20] are core-specific and not defined here */
-+#define CTRL_CORE_MASK 0x0ff00000
-+
-+/*** 64 bits addressing ***/
-+
-+/* dma registers per channel(xmt or rcv) */
-+typedef volatile struct {
-+ uint32 control; /* enable, et al */
-+ uint32 ptr; /* last descriptor posted to chip */
-+ uint32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */
-+ uint32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */
-+ uint32 status0; /* current descriptor, xmt state */
-+ uint32 status1; /* active descriptor, xmt error */
-+} dma64regs_t;
-+
-+typedef volatile struct {
-+ dma64regs_t tx; /* dma64 tx channel */
-+ dma64regs_t rx; /* dma64 rx channel */
-+} dma64regp_t;
-+
-+typedef volatile struct { /* diag access */
-+ uint32 fifoaddr; /* diag address */
-+ uint32 fifodatalow; /* low 32bits of data */
-+ uint32 fifodatahigh; /* high 32bits of data */
-+ uint32 pad; /* reserved */
-+} dma64diag_t;
-+
-+/*
-+ * DMA Descriptor
-+ * Descriptors are only read by the hardware, never written back.
-+ */
-+typedef volatile struct {
-+ uint32 ctrl1; /* misc control bits & bufcount */
-+ uint32 ctrl2; /* buffer count and address extension */
-+ uint32 addrlow; /* memory address of the first byte of the date buffer, bits 31:0 */
-+ uint32 addrhigh; /* memory address of the first byte of the date buffer, bits 63:32 */
-+} dma64dd_t;
-+
-+/*
-+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
-+ */
-+#define D64MAXRINGSZ 8192
-+#define D64RINGALIGN 8192
-+#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
-+
-+/* transmit channel control */
-+#define D64_XC_XE 0x00000001 /* transmit enable */
-+#define D64_XC_SE 0x00000002 /* transmit suspend request */
-+#define D64_XC_LE 0x00000004 /* loopback enable */
-+#define D64_XC_FL 0x00000010 /* flush request */
-+#define D64_XC_AE 0x00110000 /* address extension bits */
-+#define D64_XC_AE_SHIFT 16
-+
-+/* transmit descriptor table pointer */
-+#define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
-+
-+/* transmit channel status */
-+#define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
-+#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
-+#define D64_XS0_XS_SHIFT 28
-+#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
-+#define D64_XS0_XS_ACTIVE 0x10000000 /* active */
-+#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
-+#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
-+#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
-+
-+#define D64_XS1_AD_MASK 0x0001ffff /* active descriptor */
-+#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
-+#define D64_XS1_XE_SHIFT 28
-+#define D64_XS1_XE_NOERR 0x00000000 /* no error */
-+#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
-+#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
-+#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
-+#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
-+#define D64_XS1_XE_COREE 0x50000000 /* core error */
-+
-+/* receive channel control */
-+#define D64_RC_RE 0x00000001 /* receive enable */
-+#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
-+#define D64_RC_RO_SHIFT 1
-+#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
-+#define D64_RC_AE 0x00110000 /* address extension bits */
-+#define D64_RC_AE_SHIFT 16
-+
-+/* receive descriptor table pointer */
-+#define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
-+
-+/* receive channel status */
-+#define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
-+#define D64_RS0_RS_MASK 0xf0000000 /* receive state */
-+#define D64_RS0_RS_SHIFT 28
-+#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
-+#define D64_RS0_RS_ACTIVE 0x10000000 /* active */
-+#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
-+#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
-+#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
-+
-+#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
-+#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
-+#define D64_RS1_RE_SHIFT 28
-+#define D64_RS1_RE_NOERR 0x00000000 /* no error */
-+#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
-+#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
-+#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
-+#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
-+#define D64_RS1_RE_COREE 0x50000000 /* core error */
-+
-+/* fifoaddr */
-+#define D64_FA_OFF_MASK 0xffff /* offset */
-+#define D64_FA_SEL_MASK 0xf0000 /* select */
-+#define D64_FA_SEL_SHIFT 16
-+#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
-+#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
-+#define D64_FA_SEL_RDD 0x40000 /* receive dma data */
-+#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
-+#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
-+#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
-+#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
-+#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
-+#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
-+#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
-+
-+/* descriptor control flags 1 */
-+#define D64_CTRL1_EOT ((uint32)1 << 28) /* end of descriptor table */
-+#define D64_CTRL1_IOC ((uint32)1 << 29) /* interrupt on completion */
-+#define D64_CTRL1_EOF ((uint32)1 << 30) /* end of frame */
-+#define D64_CTRL1_SOF ((uint32)1 << 31) /* start of frame */
-+
-+/* descriptor control flags 2 */
-+#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count mask */
-+#define D64_CTRL2_AE 0x00110000 /* address extension bits */
-+#define D64_CTRL2_AE_SHIFT 16
-+
-+/* control flags in the range [27:20] are core-specific and not defined here */
-+#define D64_CTRL_CORE_MASK 0x0ff00000
-+
-+
-+#endif /* _sbhnddma_h_ */
-diff -Naur linux.old/drivers/net/wl2/Makefile linux.dev/drivers/net/wl2/Makefile
---- linux.old/drivers/net/wl2/Makefile 1970-01-01 01:00:00.000000000 +0100
-+++ linux.dev/drivers/net/wl2/Makefile 2006-04-06 16:56:27.000000000 +0200
-@@ -0,0 +1,23 @@
-+#
-+# Makefile for the Broadcom wl driver
-+#
-+# Copyright 2004, Broadcom Corporation
-+# All Rights Reserved.
-+#
-+# THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+# KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+# SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+# FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+#
-+# $Id: Makefile,v 1.2 2005/03/29 03:32:18 mbm Exp $
-+
-+EXTRA_CFLAGS += -I$(TOPDIR)/arch/mips/bcm947xx/include -DBCMDRIVER
-+
-+O_TARGET := wl.o
-+
-+obj-y := wl_apsta.o
-+obj-y += compat.o hnddma.o
-+
-+obj-m := $(O_TARGET)
-+
-+include $(TOPDIR)/Rules.make
-diff -Naur linux.old/drivers/net/wl2/compat.c linux.dev/drivers/net/wl2/compat.c
---- linux.old/drivers/net/wl2/compat.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux.dev/drivers/net/wl2/compat.c 2006-04-06 17:08:21.000000000 +0200
-@@ -0,0 +1,376 @@
-+/*
-+ * Misc useful OS-independent routines.
-+ *
-+ * Copyright 2005, Broadcom Corporation
-+ * All Rights Reserved.
-+ *
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+ * $Id$
-+ */
-+
-+#include <typedefs.h>
-+#ifdef BCMDRIVER
-+#include <osl.h>
-+#include <sbutils.h>
-+#include <bcmnvram.h>
-+#else
-+#include <stdio.h>
-+#include <string.h>
-+#endif
-+#include "pktq.h"
-+#include <bcmutils.h>
-+#include <bcmendian.h>
-+#include <bcmdevs.h>
-+
-+#ifdef BCMDRIVER
-+/* copy a pkt buffer chain into a buffer */
-+uint
-+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
-+{
-+ uint n, ret = 0;
-+
-+ if (len < 0)
-+ len = 4096; /* "infinite" */
-+
-+ /* skip 'offset' bytes */
-+ for (; p && offset; p = PKTNEXT(osh, p)) {
-+ if (offset < (uint)PKTLEN(osh, p))
-+ break;
-+ offset -= PKTLEN(osh, p);
-+ }
-+
-+ if (!p)
-+ return 0;
-+
-+ /* copy the data */
-+ for (; p && len; p = PKTNEXT(osh, p)) {
-+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
-+ bcopy(PKTDATA(osh, p) + offset, buf, n);
-+ buf += n;
-+ len -= n;
-+ ret += n;
-+ offset = 0;
-+ }
-+
-+ return ret;
-+}
-+
-+/* return total length of buffer chain */
-+uint
-+pkttotlen(osl_t *osh, void *p)
-+{
-+ uint total;
-+
-+ total = 0;
-+ for (; p; p = PKTNEXT(osh, p))
-+ total += PKTLEN(osh, p);
-+ return (total);
-+}
-+
-+/*
-+ * osl multiple-precedence packet queue
-+ * hi_prec is always >= the number of the highest non-empty queue
-+ */
-+
-+void *
-+pktq_penq(struct pktq *pq, int prec, void *p)
-+{
-+ struct pktq_prec *q;
-+
-+ ASSERT(prec >= 0 && prec < pq->num_prec);
-+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
-+
-+ ASSERT(!pktq_full(pq));
-+ ASSERT(!pktq_pfull(pq, prec));
-+
-+ q = &pq->q[prec];
-+
-+ if (q->head)
-+ PKTSETLINK(q->tail, p);
-+ else
-+ q->head = p;
-+
-+ q->tail = p;
-+ q->len++;
-+
-+ if (pq->hi_prec < prec)
-+ pq->hi_prec = (uint8)prec;
-+
-+ pq->len++;
-+
-+ return p;
-+}
-+
-+void *
-+pktq_penq_head(struct pktq *pq, int prec, void *p)
-+{
-+ struct pktq_prec *q;
-+
-+ ASSERT(prec >= 0 && prec < pq->num_prec);
-+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
-+
-+ ASSERT(!pktq_full(pq));
-+ ASSERT(!pktq_pfull(pq, prec));
-+
-+ q = &pq->q[prec];
-+
-+ if (q->head)
-+ PKTSETLINK(p, q->head);
-+ else
-+ q->tail = p;
-+
-+ q->head = p;
-+ q->len++;
-+
-+ if (pq->hi_prec < prec)
-+ pq->hi_prec = (uint8)prec;
-+
-+ pq->len++;
-+
-+ return p;
-+}
-+
-+void *
-+pktq_pdeq(struct pktq *pq, int prec)