++ if (DMA64_ENAB(di))
++ W_REG(&di->d64rxregs->control, ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
++ else
++ W_REG(&di->d32rxregs->control, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
++}
++
++bool
++dma_rxenabled(dma_info_t *di)
++{
++ uint32 rc;
++
++ if (DMA64_ENAB(di)) {
++ rc = R_REG(&di->d64rxregs->control);
++ return ((rc != 0xffffffff) && (rc & D64_RC_RE));
++ } else {
++ rc = R_REG(&di->d32rxregs->control);
++ return ((rc != 0xffffffff) && (rc & RC_RE));
++ }
++}
++
++
++/* !! tx entry routine */
++int
++dma_txfast(dma_info_t *di, void *p0, uint32 coreflags)
++{
++ if (DMA64_ENAB(di)) {
++ return dma64_txfast(di, p0, coreflags);
++ } else {
++ return dma32_txfast(di, p0, coreflags);
++ }
++}
++
++/* !! rx entry routine, returns a pointer to the next frame received, or NULL if there are no more */
++void*
++dma_rx(dma_info_t *di)
++{
++ void *p;
++ uint len;
++ int skiplen = 0;
++
++ while ((p = dma_getnextrxp(di, FALSE))) {
++ /* skip giant packets which span multiple rx descriptors */
++ if (skiplen > 0) {
++ skiplen -= di->rxbufsize;
++ if (skiplen < 0)
++ skiplen = 0;
++ PKTFREE(di->osh, p, FALSE);
++ continue;
++ }
++
++ len = ltoh16(*(uint16*)(PKTDATA(di->osh, p)));
++ DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
++
++ /* bad frame length check */
++ if (len > (di->rxbufsize - di->rxoffset)) {
++ DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len));
++ if (len > 0)
++ skiplen = len - (di->rxbufsize - di->rxoffset);
++ PKTFREE(di->osh, p, FALSE);
++ di->hnddma.rxgiants++;
++ continue;
++ }
++
++ /* set actual length */
++ PKTSETLEN(di->osh, p, (di->rxoffset + len));
++
++ break;
++ }
++
++ return (p);
++}
++
++/* post receive buffers */
++void
++dma_rxfill(dma_info_t *di)
++{
++ void *p;
++ uint rxin, rxout;
++ uint32 ctrl;
++ uint n;
++ uint i;
++ uint32 pa;
++ uint rxbufsize;
++
++ /*
++ * Determine how many receive buffers we're lacking
++ * from the full complement, allocate, initialize,
++ * and post them, then update the chip rx lastdscr.
++ */
++
++ rxin = di->rxin;
++ rxout = di->rxout;
++ rxbufsize = di->rxbufsize;
++
++ n = di->nrxpost - NRXDACTIVE(rxin, rxout);
++
++ DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
++
++ for (i = 0; i < n; i++) {
++ if ((p = PKTGET(di->osh, rxbufsize, FALSE)) == NULL) {
++ DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
++ di->hnddma.rxnobuf++;
++ break;
++ }
++
++ /* Do a cached write instead of uncached write since DMA_MAP
++ * will flush the cache. */
++ *(uint32*)(PKTDATA(di->osh, p)) = 0;
++
++ pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->osh, p), rxbufsize, DMA_RX, p);
++ ASSERT(ISALIGNED(pa, 4));
++
++ /* save the free packet pointer */
++ ASSERT(di->rxp[rxout] == NULL);
++ di->rxp[rxout] = p;
++
++ if (DMA64_ENAB(di)) {
++ /* prep the descriptor control value */
++ if (rxout == (di->nrxd - 1))
++ ctrl = CTRL_EOT;
++
++ dma64_dd_upd(di, di->rxd64, pa, rxout, &ctrl, rxbufsize);
++ } else {
++ /* prep the descriptor control value */
++ ctrl = rxbufsize;
++ if (rxout == (di->nrxd - 1))
++ ctrl |= CTRL_EOT;
++ dma32_dd_upd(di, di->rxd32, pa, rxout, &ctrl);
++ }
++
++ rxout = NEXTRXD(rxout);
++ }
++
++ di->rxout = rxout;
++
++ /* update the chip lastdscr pointer */
++ if (DMA64_ENAB(di)) {
++ W_REG(&di->d64rxregs->ptr, I2B(rxout, dma64dd_t));
++ } else {
++ W_REG(&di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
++ }
++}
++
++void
++dma_txreclaim(dma_info_t *di, bool forceall)
++{
++ void *p;
++
++ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
++
++ while ((p = dma_getnexttxp(di, forceall)))
++ PKTFREE(di->osh, p, TRUE);
++}
++
++/*
++ * Reclaim next completed txd (txds if using chained buffers) and
++ * return associated packet.
++ * If 'force' is true, reclaim txd(s) and return associated packet
++ * regardless of the value of the hardware "curr" pointer.
++ */
++void*
++dma_getnexttxp(dma_info_t *di, bool forceall)
++{
++ if (DMA64_ENAB(di)) {
++ return dma64_getnexttxp(di, forceall);
++ } else {
++ return dma32_getnexttxp(di, forceall);
++ }
++}
++
++/* like getnexttxp but no reclaim */
++void*
++dma_peeknexttxp(dma_info_t *di)
++{
++ uint end, i;
++
++ if (DMA64_ENAB(di)) {
++ end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
++ } else {
++ end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
++ }
++
++ for (i = di->txin; i != end; i = NEXTTXD(i))
++ if (di->txp[i])
++ return (di->txp[i]);
++
++ return (NULL);
++}
++
++/*
++ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
++ */
++void
++dma_txrotate(di_t *di)
++{
++ if (DMA64_ENAB(di)) {
++ dma64_txrotate(di);
++ } else {
++ dma32_txrotate(di);
++ }
++}
++
++void
++dma_rxreclaim(dma_info_t *di)
++{
++ void *p;
++
++ DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
++
++ while ((p = dma_getnextrxp(di, TRUE)))
++ PKTFREE(di->osh, p, FALSE);
++}
++
++void *
++dma_getnextrxp(dma_info_t *di, bool forceall)
++{
++ if (DMA64_ENAB(di)) {
++ return dma64_getnextrxp(di, forceall);
++ } else {
++ return dma32_getnextrxp(di, forceall);
++ }
++}
++
++uintptr
++dma_getvar(dma_info_t *di, char *name)
++{
++ if (!strcmp(name, "&txavail"))
++ return ((uintptr) &di->txavail);
++ else {
++ ASSERT(0);
++ }
++ return (0);
++}
++
++void
++dma_txblock(dma_info_t *di)
++{
++ di->txavail = 0;
++}
++
++void
++dma_txunblock(dma_info_t *di)
++{
++ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++}
++
++uint
++dma_txactive(dma_info_t *di)
++{
++ return (NTXDACTIVE(di->txin, di->txout));
++}
++
++void
++dma_rxpiomode(dma32regs_t *regs)
++{
++ W_REG(®s->control, RC_FM);
++}
++
++void
++dma_txpioloopback(dma32regs_t *regs)
++{
++ OR_REG(®s->control, XC_LE);
++}
++
++
++
++
++/*** 32 bits DMA non-inline functions ***/
++static bool
++dma32_alloc(dma_info_t *di, uint direction)
++{
++ uint size;
++ uint ddlen;
++ void *va;
++
++ ddlen = sizeof (dma32dd_t);
++
++ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
++
++ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, D32RINGALIGN))
++ size += D32RINGALIGN;
++
++
++ if (direction == DMA_TX) {
++ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
++ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
++ return FALSE;
++ }
++
++ di->txd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
++ di->txdalign = (uint)((int8*)di->txd32 - (int8*)va);
++ di->txdpa += di->txdalign;
++ di->txdalloc = size;
++ ASSERT(ISALIGNED((uintptr)di->txd32, D32RINGALIGN));
++ } else {
++ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
++ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
++ return FALSE;
++ }
++ di->rxd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
++ di->rxdalign = (uint)((int8*)di->rxd32 - (int8*)va);
++ di->rxdpa += di->rxdalign;
++ di->rxdalloc = size;
++ ASSERT(ISALIGNED((uintptr)di->rxd32, D32RINGALIGN));
++ }
++
++ return TRUE;
++}
++
++static void
++dma32_txreset(dma_info_t *di)
++{
++ uint32 status;
++
++ /* suspend tx DMA first */
++ W_REG(&di->d32txregs->control, XC_SE);
++ SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED &&
++ status != XS_XS_IDLE &&
++ status != XS_XS_STOPPED,
++ 10000);
++
++ W_REG(&di->d32txregs->control, 0);
++ SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED,
++ 10000);
++
++ if (status != XS_XS_DISABLED) {
++ DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
++ }
++
++ /* wait for the last transaction to complete */
++ OSL_DELAY(300);
++}
++
++static void
++dma32_rxreset(dma_info_t *di)
++{
++ uint32 status;
++
++ W_REG(&di->d32rxregs->control, 0);
++ SPINWAIT((status = (R_REG(&di->d32rxregs->status) & RS_RS_MASK)) != RS_RS_DISABLED,
++ 10000);
++
++ if (status != RS_RS_DISABLED) {
++ DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
++ }
++}
++
++static bool
++dma32_txsuspendedidle(dma_info_t *di)
++{
++ if (!(R_REG(&di->d32txregs->control) & XC_SE))
++ return 0;
++
++ if ((R_REG(&di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
++ return 0;
++
++ OSL_DELAY(2);
++ return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_IDLE);
++}
++
++/*
++ * supports full 32bit dma engine buffer addressing so
++ * dma buffers can cross 4 Kbyte page boundaries.
++ */
++static int
++dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags)
++{
++ void *p, *next;
++ uchar *data;
++ uint len;
++ uint txout;
++ uint32 ctrl;
++ uint32 pa;
++
++ DMA_TRACE(("%s: dma_txfast\n", di->name));
++
++ txout = di->txout;
++ ctrl = 0;
++
++ /*
++ * Walk the chain of packet buffers
++ * allocating and initializing transmit descriptor entries.
++ */
++ for (p = p0; p; p = next) {
++ data = PKTDATA(di->osh, p);
++ len = PKTLEN(di->osh, p);
++ next = PKTNEXT(di->osh, p);
++
++ /* return nonzero if out of tx descriptors */
++ if (NEXTTXD(txout) == di->txin)
++ goto outoftxd;
++
++ if (len == 0)
++ continue;
++
++ /* get physical address of buffer start */
++ pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
++
++ /* build the descriptor control value */
++ ctrl = len & CTRL_BC_MASK;
++
++ ctrl |= coreflags;
++
++ if (p == p0)
++ ctrl |= CTRL_SOF;
++ if (next == NULL)
++ ctrl |= (CTRL_IOC | CTRL_EOF);
++ if (txout == (di->ntxd - 1))
++ ctrl |= CTRL_EOT;
++
++ if (DMA64_ENAB(di)) {
++ dma64_dd_upd(di, di->txd64, pa, txout, &ctrl, len);
++ } else {
++ dma32_dd_upd(di, di->txd32, pa, txout, &ctrl);
++ }
++
++ ASSERT(di->txp[txout] == NULL);
++
++ txout = NEXTTXD(txout);
++ }
++
++ /* if last txd eof not set, fix it */
++ if (!(ctrl & CTRL_EOF))
++ W_SM(&di->txd32[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF));
++
++ /* save the packet */
++ di->txp[PREVTXD(txout)] = p0;
++
++ /* bump the tx descriptor index */
++ di->txout = txout;
++
++ /* kick the chip */
++ if (DMA64_ENAB(di)) {
++ W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
++ } else {
++ W_REG(&di->d32txregs->ptr, I2B(txout, dma32dd_t));
++ }
++
++ /* tx flow control */
++ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++
++ return (0);
++
++ outoftxd:
++ DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
++ PKTFREE(di->osh, p0, TRUE);
++ di->txavail = 0;
++ di->hnddma.txnobuf++;
++ return (-1);
++}
++
++static void*
++dma32_getnexttxp(dma_info_t *di, bool forceall)
++{
++ uint start, end, i;
++ void *txp;
++
++ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
++
++ txp = NULL;
++
++ start = di->txin;
++ if (forceall)
++ end = di->txout;
++ else
++ end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
++
++ if ((start == 0) && (end > di->txout))
++ goto bogus;
++
++ for (i = start; i != end && !txp; i = NEXTTXD(i)) {
++ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - di->dataoffsetlow),
++ (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & CTRL_BC_MASK), DMA_TX, di->txp[i]);
++
++ W_SM(&di->txd32[i].addr, 0xdeadbeef);
++ txp = di->txp[i];
++ di->txp[i] = NULL;
++ }
++
++ di->txin = i;
++
++ /* tx flow control */
++ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++
++ return (txp);
++
++bogus:
++/*
++ DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
++ start, end, di->txout, forceall));
++*/
++ return (NULL);
++}
++
++static void *
++dma32_getnextrxp(dma_info_t *di, bool forceall)
++{
++ uint i;
++ void *rxp;
++
++ /* if forcing, dma engine must be disabled */
++ ASSERT(!forceall || !dma_rxenabled(di));
++
++ i = di->rxin;
++
++ /* return if no packets posted */
++ if (i == di->rxout)
++ return (NULL);
++
++ /* ignore curr if forceall */
++ if (!forceall && (i == B2I(R_REG(&di->d32rxregs->status) & RS_CD_MASK, dma32dd_t)))
++ return (NULL);
++
++ /* get the packet pointer that corresponds to the rx descriptor */
++ rxp = di->rxp[i];
++ ASSERT(rxp);
++ di->rxp[i] = NULL;
++
++ /* clear this packet from the descriptor ring */
++ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - di->dataoffsetlow),
++ di->rxbufsize, DMA_RX, rxp);
++ W_SM(&di->rxd32[i].addr, 0xdeadbeef);
++
++ di->rxin = NEXTRXD(i);
++
++ return (rxp);
++}
++
++static void
++dma32_txrotate(di_t *di)
++{
++ uint ad;
++ uint nactive;
++ uint rot;
++ uint old, new;
++ uint32 w;
++ uint first, last;
++
++ ASSERT(dma_txsuspendedidle(di));
++
++ nactive = dma_txactive(di);
++ ad = B2I(((R_REG(&di->d32txregs->status) & XS_AD_MASK) >> XS_AD_SHIFT), dma32dd_t);
++ rot = TXD(ad - di->txin);
++
++ ASSERT(rot < di->ntxd);
++
++ /* full-ring case is a lot harder - don't worry about this */
++ if (rot >= (di->ntxd - nactive)) {
++ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
++ return;
++ }
++
++ first = di->txin;
++ last = PREVTXD(di->txout);
++
++ /* move entries starting at last and moving backwards to first */
++ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
++ new = TXD(old + rot);
++
++ /*
++ * Move the tx dma descriptor.
++ * EOT is set only in the last entry in the ring.
++ */
++ w = R_SM(&di->txd32[old].ctrl) & ~CTRL_EOT;
++ if (new == (di->ntxd - 1))
++ w |= CTRL_EOT;
++ W_SM(&di->txd32[new].ctrl, w);
++ W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
++
++ /* zap the old tx dma descriptor address field */
++ W_SM(&di->txd32[old].addr, 0xdeadbeef);
++
++ /* move the corresponding txp[] entry */
++ ASSERT(di->txp[new] == NULL);
++ di->txp[new] = di->txp[old];
++ di->txp[old] = NULL;
++ }
++
++ /* update txin and txout */
++ di->txin = ad;
++ di->txout = TXD(di->txout + rot);
++ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++
++ /* kick the chip */
++ W_REG(&di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
++}
++
++/*** 64 bits DMA non-inline functions ***/
++
++#ifdef BCMDMA64
++
++static bool
++dma64_alloc(dma_info_t *di, uint direction)
++{
++ uint size;
++ uint ddlen;
++ uint32 alignbytes;
++ void *va;
++
++ ddlen = sizeof (dma64dd_t);
++
++ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
++
++ alignbytes = di->dma64align;
++
++ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, alignbytes))
++ size += alignbytes;
++
++
++ if (direction == DMA_TX) {
++ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
++ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
++ return FALSE;
++ }
++
++ di->txd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
++ di->txdalign = (uint)((int8*)di->txd64 - (int8*)va);
++ di->txdpa += di->txdalign;
++ di->txdalloc = size;
++ ASSERT(ISALIGNED((uintptr)di->txd64, alignbytes));
++ } else {
++ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
++ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
++ return FALSE;
++ }
++ di->rxd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
++ di->rxdalign = (uint)((int8*)di->rxd64 - (int8*)va);
++ di->rxdpa += di->rxdalign;
++ di->rxdalloc = size;
++ ASSERT(ISALIGNED((uintptr)di->rxd64, alignbytes));
++ }
++
++ return TRUE;
++}
++
++static void
++dma64_txreset(dma_info_t *di)
++{
++ uint32 status;
++
++ /* suspend tx DMA first */
++ W_REG(&di->d64txregs->control, D64_XC_SE);
++ SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED &&
++ status != D64_XS0_XS_IDLE &&
++ status != D64_XS0_XS_STOPPED,
++ 10000);
++
++ W_REG(&di->d64txregs->control, 0);
++ SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED,
++ 10000);
++
++ if (status != D64_XS0_XS_DISABLED) {
++ DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
++ }
++
++ /* wait for the last transaction to complete */
++ OSL_DELAY(300);
++}
++
++static void
++dma64_rxreset(dma_info_t *di)
++{
++ uint32 status;
++
++ W_REG(&di->d64rxregs->control, 0);
++ SPINWAIT((status = (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED,
++ 10000);
++
++ if (status != D64_RS0_RS_DISABLED) {
++ DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
++ }