2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright 2006, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
13 * $Id: hnddma.c,v 1.11 2006/04/08 07:12:42 honor Exp $
19 #include "linux_osl.h"
20 #include <bcmendian.h>
30 #define DMA_ERROR(args)
31 #define DMA_TRACE(args)
33 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
34 static uint dma_msg_level
=
37 #define MAXNAMEL 8 /* 8 char names */
39 #define DI_INFO(dmah) (dma_info_t *)dmah
41 /* dma engine software state */
42 typedef struct dma_info
{
43 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
44 * which could be const
46 uint
*msg_level
; /* message level pointer */
47 char name
[MAXNAMEL
]; /* callers name for diag msgs */
49 void *osh
; /* os handle */
50 sb_t
*sbh
; /* sb handle */
52 bool dma64
; /* dma64 enabled */
53 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
55 dma32regs_t
*d32txregs
; /* 32 bits dma tx engine registers */
56 dma32regs_t
*d32rxregs
; /* 32 bits dma rx engine registers */
57 dma64regs_t
*d64txregs
; /* 64 bits dma tx engine registers */
58 dma64regs_t
*d64rxregs
; /* 64 bits dma rx engine registers */
60 uint32 dma64align
; /* either 8k or 4k depends on number of dd */
61 dma32dd_t
*txd32
; /* pointer to dma32 tx descriptor ring */
62 dma64dd_t
*txd64
; /* pointer to dma64 tx descriptor ring */
63 uint ntxd
; /* # tx descriptors tunable */
64 uint txin
; /* index of next descriptor to reclaim */
65 uint txout
; /* index of next descriptor to post */
66 void **txp
; /* pointer to parallel array of pointers to packets */
67 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
68 osldma_t
**txp_dmah
; /* DMA TX packet data handle */
69 ulong txdpa
; /* physical address of descriptor ring */
70 uint txdalign
; /* #bytes added to alloc'd mem to align txd */
71 uint txdalloc
; /* #bytes allocated for the ring */
73 dma32dd_t
*rxd32
; /* pointer to dma32 rx descriptor ring */
74 dma64dd_t
*rxd64
; /* pointer to dma64 rx descriptor ring */
75 uint nrxd
; /* # rx descriptors tunable */
76 uint rxin
; /* index of next descriptor to reclaim */
77 uint rxout
; /* index of next descriptor to post */
78 void **rxp
; /* pointer to parallel array of pointers to packets */
79 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
80 osldma_t
**rxp_dmah
; /* DMA RX packet data handle */
81 ulong rxdpa
; /* physical address of descriptor ring */
82 uint rxdalign
; /* #bytes added to alloc'd mem to align rxd */
83 uint rxdalloc
; /* #bytes allocated for the ring */
86 uint rxbufsize
; /* rx buffer size in bytes,
87 not including the extra headroom
89 uint nrxpost
; /* # rx buffers to keep posted */
90 uint rxoffset
; /* rxcontrol offset */
91 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
92 uint ddoffsethigh
; /* high 32 bits */
93 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
94 uint dataoffsethigh
; /* high 32 bits */
97 /* descriptor bumping macros */
98 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
99 #define TXD(x) XXD((x), di->ntxd)
100 #define RXD(x) XXD((x), di->nrxd)
101 #define NEXTTXD(i) TXD(i + 1)
102 #define PREVTXD(i) TXD(i - 1)
103 #define NEXTRXD(i) RXD(i + 1)
104 #define NTXDACTIVE(h, t) TXD(t - h)
105 #define NRXDACTIVE(h, t) RXD(t - h)
107 /* macros to convert between byte offsets and indexes */
108 #define B2I(bytes, type) ((bytes) / sizeof(type))
109 #define I2B(index, type) ((index) * sizeof(type))
111 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
112 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
115 /* common prototypes */
116 static bool _dma_isaddrext(dma_info_t
*di
);
117 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
118 static void _dma_detach(dma_info_t
*di
);
119 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, ulong pa
);
120 static void _dma_rxinit(dma_info_t
*di
);
121 static void *_dma_rx(dma_info_t
*di
);
122 static void _dma_rxfill(dma_info_t
*di
);
123 static void _dma_rxreclaim(dma_info_t
*di
);
124 static void _dma_rxenable(dma_info_t
*di
);
125 static void * _dma_getnextrxp(dma_info_t
*di
, bool forceall
);
127 static void _dma_txblock(dma_info_t
*di
);
128 static void _dma_txunblock(dma_info_t
*di
);
129 static uint
_dma_txactive(dma_info_t
*di
);
131 static void* _dma_peeknexttxp(dma_info_t
*di
);
132 static uintptr
_dma_getvar(dma_info_t
*di
, char *name
);
133 static void _dma_counterreset(dma_info_t
*di
);
134 static void _dma_fifoloopbackenable(dma_info_t
*di
);
136 /* ** 32 bit DMA prototypes */
137 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
138 static bool dma32_txreset(dma_info_t
*di
);
139 static bool dma32_rxreset(dma_info_t
*di
);
140 static bool dma32_txsuspendedidle(dma_info_t
*di
);
141 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
);
142 static void *dma32_getnexttxp(dma_info_t
*di
, bool forceall
);
143 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
144 static void dma32_txrotate(dma_info_t
*di
);
145 static bool dma32_rxidle(dma_info_t
*di
);
146 static void dma32_txinit(dma_info_t
*di
);
147 static bool dma32_txenabled(dma_info_t
*di
);
148 static void dma32_txsuspend(dma_info_t
*di
);
149 static void dma32_txresume(dma_info_t
*di
);
150 static bool dma32_txsuspended(dma_info_t
*di
);
151 static void dma32_txreclaim(dma_info_t
*di
, bool forceall
);
152 static bool dma32_txstopped(dma_info_t
*di
);
153 static bool dma32_rxstopped(dma_info_t
*di
);
154 static bool dma32_rxenabled(dma_info_t
*di
);
155 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
);
158 static di_fcn_t dma32proc
= {
159 (di_detach_t
)_dma_detach
,
160 (di_txinit_t
)dma32_txinit
,
161 (di_txreset_t
)dma32_txreset
,
162 (di_txenabled_t
)dma32_txenabled
,
163 (di_txsuspend_t
)dma32_txsuspend
,
164 (di_txresume_t
)dma32_txresume
,
165 (di_txsuspended_t
)dma32_txsuspended
,
166 (di_txsuspendedidle_t
)dma32_txsuspendedidle
,
167 (di_txfast_t
)dma32_txfast
,
168 (di_txstopped_t
)dma32_txstopped
,
169 (di_txreclaim_t
)dma32_txreclaim
,
170 (di_getnexttxp_t
)dma32_getnexttxp
,
171 (di_peeknexttxp_t
)_dma_peeknexttxp
,
172 (di_txblock_t
)_dma_txblock
,
173 (di_txunblock_t
)_dma_txunblock
,
174 (di_txactive_t
)_dma_txactive
,
175 (di_txrotate_t
)dma32_txrotate
,
177 (di_rxinit_t
)_dma_rxinit
,
178 (di_rxreset_t
)dma32_rxreset
,
179 (di_rxidle_t
)dma32_rxidle
,
180 (di_rxstopped_t
)dma32_rxstopped
,
181 (di_rxenable_t
)_dma_rxenable
,
182 (di_rxenabled_t
)dma32_rxenabled
,
184 (di_rxfill_t
)_dma_rxfill
,
185 (di_rxreclaim_t
)_dma_rxreclaim
,
186 (di_getnextrxp_t
)_dma_getnextrxp
,
188 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
189 (di_getvar_t
)_dma_getvar
,
190 (di_counterreset_t
)_dma_counterreset
,
199 dma_attach(osl_t
*osh
, char *name
, sb_t
*sbh
, void *dmaregstx
, void *dmaregsrx
,
200 uint ntxd
, uint nrxd
, uint rxbufsize
, uint nrxpost
, uint rxoffset
, uint
*msg_level
)
205 /* allocate private info structure */
206 if ((di
= MALLOC(osh
, sizeof (dma_info_t
))) == NULL
) {
209 bzero((char *)di
, sizeof(dma_info_t
));
211 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
213 /* old chips w/o sb is no longer supported */
216 /* check arguments */
217 ASSERT(ISPOWEROF2(ntxd
));
218 ASSERT(ISPOWEROF2(nrxd
));
220 ASSERT(dmaregsrx
== NULL
);
222 ASSERT(dmaregstx
== NULL
);
225 /* init dma reg pointer */
226 ASSERT(ntxd
<= D32MAXDD
);
227 ASSERT(nrxd
<= D32MAXDD
);
228 di
->d32txregs
= (dma32regs_t
*)dmaregstx
;
229 di
->d32rxregs
= (dma32regs_t
*)dmaregsrx
;
231 DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d "
232 "rxoffset %d dmaregstx %p dmaregsrx %p\n",
233 name
, "DMA32", osh
, ntxd
, nrxd
, rxbufsize
,
234 nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
236 /* make a private copy of our callers name */
237 strncpy(di
->name
, name
, MAXNAMEL
);
238 di
->name
[MAXNAMEL
-1] = '\0';
247 /* the actual dma size doesn't include the extra headroom */
248 if (rxbufsize
> BCMEXTRAHDROOM
)
249 di
->rxbufsize
= rxbufsize
- BCMEXTRAHDROOM
;
251 di
->rxbufsize
= rxbufsize
;
253 di
->nrxpost
= nrxpost
;
254 di
->rxoffset
= rxoffset
;
257 * figure out the DMA physical address offset for dd and data
258 * for old chips w/o sb, use zero
259 * for new chips w sb,
260 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
261 * Other bus: use zero
262 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
265 di
->dataoffsetlow
= 0;
266 /* for pci bus, add offset */
267 if (sbh
->bustype
== PCI_BUS
) {
268 di
->ddoffsetlow
= SB_PCI_DMA
;
269 di
->ddoffsethigh
= 0;
270 di
->dataoffsetlow
= di
->ddoffsetlow
;
271 di
->dataoffsethigh
= di
->ddoffsethigh
;
274 #if defined(__mips__) && defined(IL_BIGENDIAN)
275 di
->dataoffsetlow
= di
->dataoffsetlow
+ SB_SDRAM_SWAPPED
;
278 di
->addrext
= _dma_isaddrext(di
);
280 /* allocate tx packet pointer vector */
282 size
= ntxd
* sizeof(void *);
283 if ((di
->txp
= MALLOC(osh
, size
)) == NULL
) {
284 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n",
285 di
->name
, MALLOCED(osh
)));
288 bzero((char *)di
->txp
, size
);
291 /* allocate rx packet pointer vector */
293 size
= nrxd
* sizeof(void *);
294 if ((di
->rxp
= MALLOC(osh
, size
)) == NULL
) {
295 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n",
296 di
->name
, MALLOCED(osh
)));
299 bzero((char *)di
->rxp
, size
);
302 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
304 if (!dma32_alloc(di
, DMA_TX
))
308 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
310 if (!dma32_alloc(di
, DMA_RX
))
314 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->txdpa
> SB_PCI_DMA_SZ
) && !di
->addrext
) {
315 DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
316 di
->name
, di
->txdpa
));
319 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->rxdpa
> SB_PCI_DMA_SZ
) && !di
->addrext
) {
320 DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
321 di
->name
, di
->rxdpa
));
325 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
326 "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
,
327 di
->dataoffsethigh
, di
->addrext
));
329 /* allocate tx packet pointer vector and DMA mapping vectors */
332 size
= ntxd
* sizeof(osldma_t
**);
333 if ((di
->txp_dmah
= (osldma_t
**)MALLOC(osh
, size
)) == NULL
)
335 bzero((char*)di
->txp_dmah
, size
);
339 /* allocate rx packet pointer vector and DMA mapping vectors */
342 size
= nrxd
* sizeof(osldma_t
**);
343 if ((di
->rxp_dmah
= (osldma_t
**)MALLOC(osh
, size
)) == NULL
)
345 bzero((char*)di
->rxp_dmah
, size
);
350 /* initialize opsvec of function pointers */
351 di
->hnddma
.di_fn
= dma32proc
;
353 return ((hnddma_t
*)di
);
360 /* init the tx or rx descriptor */
362 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, ulong pa
, uint outidx
, uint32
*flags
,
365 /* dma32 uses 32 bits control to fit both flags and bufcounter */
366 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
368 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
369 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
370 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
372 /* address extension */
375 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
376 pa
&= ~PCI32ADDR_HIGH
;
378 *flags
|= (ae
<< CTRL_AE_SHIFT
);
379 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
380 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
385 _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
)
389 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
390 w
= R_REG(osh
, &dma32regs
->control
);
391 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
392 return ((w
& XC_AE
) == XC_AE
);
395 /* !! may be called with core in reset */
397 _dma_detach(dma_info_t
*di
)
402 DMA_TRACE(("%s: dma_detach\n", di
->name
));
404 /* shouldn't be here if descriptors are unreclaimed */
405 ASSERT(di
->txin
== di
->txout
);
406 ASSERT(di
->rxin
== di
->rxout
);
408 /* free dma descriptor rings */
410 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)di
->txd32
- di
->txdalign
),
411 di
->txdalloc
, (di
->txdpa
- di
->txdalign
), &di
->tx_dmah
);
413 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)di
->rxd32
- di
->rxdalign
),
414 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
), &di
->rx_dmah
);
416 /* free packet pointer vectors */
418 MFREE(di
->osh
, (void *)di
->txp
, (di
->ntxd
* sizeof(void *)));
420 MFREE(di
->osh
, (void *)di
->rxp
, (di
->nrxd
* sizeof(void *)));
422 /* free tx packet DMA handles */
424 MFREE(di
->osh
, (void *)di
->txp_dmah
, di
->ntxd
* sizeof(osldma_t
**));
426 /* free rx packet DMA handles */
428 MFREE(di
->osh
, (void *)di
->rxp_dmah
, di
->nrxd
* sizeof(osldma_t
**));
430 /* free our private info structure */
431 MFREE(di
->osh
, (void *)di
, sizeof(dma_info_t
));
435 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
437 _dma_isaddrext(dma_info_t
*di
)
440 return (_dma32_addrext(di
->osh
, di
->d32txregs
));
441 else if (di
->d32rxregs
)
442 return (_dma32_addrext(di
->osh
, di
->d32rxregs
));
446 /* initialize descriptor table base address */
448 _dma_ddtable_init(dma_info_t
*di
, uint direction
, ulong pa
)
450 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
451 if (direction
== DMA_TX
)
452 W_REG(di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
454 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
456 /* dma32 address extension */
460 /* shift the high bit(s) from pa to ae */
461 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
462 pa
&= ~PCI32ADDR_HIGH
;
464 if (direction
== DMA_TX
) {
465 W_REG(di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
466 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
, ae
<<XC_AE_SHIFT
);
468 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
469 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
, ae
<<RC_AE_SHIFT
);
475 _dma_fifoloopbackenable(dma_info_t
*di
)
477 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
478 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
482 _dma_rxinit(dma_info_t
*di
)
484 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
489 di
->rxin
= di
->rxout
= 0;
491 /* clear rx descriptor ring */
492 BZERO_SM((void *)di
->rxd32
, (di
->nrxd
* sizeof(dma32dd_t
)));
494 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
498 _dma_rxenable(dma_info_t
*di
)
500 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
502 W_REG(di
->osh
, &di
->d32rxregs
->control
, ((di
->rxoffset
<< RC_RO_SHIFT
) | RC_RE
));
505 /* !! rx entry routine, returns a pointer to the next frame received,
506 * or NULL if there are no more
509 _dma_rx(dma_info_t
*di
)
515 while ((p
= _dma_getnextrxp(di
, FALSE
))) {
516 /* skip giant packets which span multiple rx descriptors */
518 skiplen
-= di
->rxbufsize
;
521 PKTFREE(di
->osh
, p
, FALSE
);
525 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, p
)));
526 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
528 /* bad frame length check */
529 if (len
> (di
->rxbufsize
- di
->rxoffset
)) {
530 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
532 skiplen
= len
- (di
->rxbufsize
- di
->rxoffset
);
533 PKTFREE(di
->osh
, p
, FALSE
);
534 di
->hnddma
.rxgiants
++;
538 /* set actual length */
539 PKTSETLEN(di
->osh
, p
, (di
->rxoffset
+ len
));
547 /* post receive buffers */
549 _dma_rxfill(dma_info_t
*di
)
557 uint extra_offset
= 0;
560 * Determine how many receive buffers we're lacking
561 * from the full complement, allocate, initialize,
562 * and post them, then update the chip rx lastdscr.
568 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
570 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
572 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
573 extra_offset
= BCMEXTRAHDROOM
;
575 for (i
= 0; i
< n
; i
++) {
576 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
579 if ((p
= PKTGET(di
->osh
, di
->rxbufsize
+ extra_offset
,
581 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di
->name
));
582 di
->hnddma
.rxnobuf
++;
585 /* reserve an extra headroom, if applicable */
587 PKTPULL(di
->osh
, p
, extra_offset
);
589 /* Do a cached write instead of uncached write since DMA_MAP
590 * will flush the cache.
592 *(uint32
*)(PKTDATA(di
->osh
, p
)) = 0;
594 pa
= (uint32
) DMA_MAP(di
->osh
, PKTDATA(di
->osh
, p
),
595 di
->rxbufsize
, DMA_RX
, p
);
597 ASSERT(ISALIGNED(pa
, 4));
599 /* save the free packet pointer */
600 ASSERT(di
->rxp
[rxout
] == NULL
);
603 /* reset flags for each descriptor */
605 if (rxout
== (di
->nrxd
- 1))
607 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
608 rxout
= NEXTRXD(rxout
);
613 /* update the chip lastdscr pointer */
614 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
617 /* like getnexttxp but no reclaim */
619 _dma_peeknexttxp(dma_info_t
*di
)
626 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
628 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
636 _dma_rxreclaim(dma_info_t
*di
)
640 /* "unused local" warning suppression for OSLs that
641 * define PKTFREE() without using the di->osh arg
645 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
647 while ((p
= _dma_getnextrxp(di
, TRUE
)))
648 PKTFREE(di
->osh
, p
, FALSE
);
652 _dma_getnextrxp(dma_info_t
*di
, bool forceall
)
657 return dma32_getnextrxp(di
, forceall
);
661 _dma_txblock(dma_info_t
*di
)
663 di
->hnddma
.txavail
= 0;
667 _dma_txunblock(dma_info_t
*di
)
669 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
673 _dma_txactive(dma_info_t
*di
)
675 return (NTXDACTIVE(di
->txin
, di
->txout
));
679 _dma_counterreset(dma_info_t
*di
)
681 /* reset all software counter */
682 di
->hnddma
.rxgiants
= 0;
683 di
->hnddma
.rxnobuf
= 0;
684 di
->hnddma
.txnobuf
= 0;
687 /* get the address of the var in order to change later */
689 _dma_getvar(dma_info_t
*di
, char *name
)
691 if (!strcmp(name
, "&txavail"))
692 return ((uintptr
) &(di
->hnddma
.txavail
));
700 dma_txpioloopback(osl_t
*osh
, dma32regs_t
*regs
)
702 OR_REG(osh
, ®s
->control
, XC_LE
);
707 /* 32 bits DMA functions */
709 dma32_txinit(dma_info_t
*di
)
711 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
716 di
->txin
= di
->txout
= 0;
717 di
->hnddma
.txavail
= di
->ntxd
- 1;
719 /* clear tx descriptor ring */
720 BZERO_SM((void *)di
->txd32
, (di
->ntxd
* sizeof(dma32dd_t
)));
721 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_XE
);
722 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
726 dma32_txenabled(dma_info_t
*di
)
730 /* If the chip is dead, it is not enabled :-) */
731 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
732 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
736 dma32_txsuspend(dma_info_t
*di
)
738 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
743 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
747 dma32_txresume(dma_info_t
*di
)
749 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
754 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
758 dma32_txsuspended(dma_info_t
*di
)
760 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
764 dma32_txreclaim(dma_info_t
*di
, bool forceall
)
768 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
770 while ((p
= dma32_getnexttxp(di
, forceall
)))
771 PKTFREE(di
->osh
, p
, TRUE
);
775 dma32_txstopped(dma_info_t
*di
)
777 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_STOPPED
);
781 dma32_rxstopped(dma_info_t
*di
)
783 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) == RS_RS_STOPPED
);
787 dma32_alloc(dma_info_t
*di
, uint direction
)
793 ddlen
= sizeof(dma32dd_t
);
795 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
797 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, D32RINGALIGN
))
798 size
+= D32RINGALIGN
;
801 if (direction
== DMA_TX
) {
802 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->txdpa
, &di
->tx_dmah
)) == NULL
) {
803 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
808 di
->txd32
= (dma32dd_t
*) ROUNDUP((uintptr
)va
, D32RINGALIGN
);
809 di
->txdalign
= (uint
)((int8
*)di
->txd32
- (int8
*)va
);
810 di
->txdpa
+= di
->txdalign
;
812 ASSERT(ISALIGNED((uintptr
)di
->txd32
, D32RINGALIGN
));
814 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->rxdpa
, &di
->rx_dmah
)) == NULL
) {
815 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
819 di
->rxd32
= (dma32dd_t
*) ROUNDUP((uintptr
)va
, D32RINGALIGN
);
820 di
->rxdalign
= (uint
)((int8
*)di
->rxd32
- (int8
*)va
);
821 di
->rxdpa
+= di
->rxdalign
;
823 ASSERT(ISALIGNED((uintptr
)di
->rxd32
, D32RINGALIGN
));
830 dma32_txreset(dma_info_t
*di
)
837 /* suspend tx DMA first */
838 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
839 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
840 != XS_XS_DISABLED
) &&
841 (status
!= XS_XS_IDLE
) &&
842 (status
!= XS_XS_STOPPED
),
845 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
846 SPINWAIT(((status
= (R_REG(di
->osh
,
847 &di
->d32txregs
->status
) & XS_XS_MASK
)) != XS_XS_DISABLED
),
850 /* wait for the last transaction to complete */
853 return (status
== XS_XS_DISABLED
);
857 dma32_rxidle(dma_info_t
*di
)
859 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
864 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
865 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
869 dma32_rxreset(dma_info_t
*di
)
876 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
877 SPINWAIT(((status
= (R_REG(di
->osh
,
878 &di
->d32rxregs
->status
) & RS_RS_MASK
)) != RS_RS_DISABLED
),
881 return (status
== RS_RS_DISABLED
);
885 dma32_rxenabled(dma_info_t
*di
)
889 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
890 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
894 dma32_txsuspendedidle(dma_info_t
*di
)
899 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
902 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
906 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_IDLE
);
909 /* !! tx entry routine
910 * supports full 32bit dma engine buffer addressing so
911 * dma buffers can cross 4 Kbyte page boundaries.
914 dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
)
923 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
928 * Walk the chain of packet buffers
929 * allocating and initializing transmit descriptor entries.
931 for (p
= p0
; p
; p
= next
) {
932 data
= PKTDATA(di
->osh
, p
);
933 len
= PKTLEN(di
->osh
, p
);
934 next
= PKTNEXT(di
->osh
, p
);
936 /* return nonzero if out of tx descriptors */
937 if (NEXTTXD(txout
) == di
->txin
)
943 /* get physical address of buffer start */
944 pa
= (uint32
) DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
);
950 flags
|= (CTRL_IOC
| CTRL_EOF
);
951 if (txout
== (di
->ntxd
- 1))
954 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
955 ASSERT(di
->txp
[txout
] == NULL
);
957 txout
= NEXTTXD(txout
);
960 /* if last txd eof not set, fix it */
961 if (!(flags
& CTRL_EOF
))
962 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
, BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
964 /* save the packet */
965 di
->txp
[PREVTXD(txout
)] = p0
;
967 /* bump the tx descriptor index */
972 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
974 /* tx flow control */
975 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
980 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
981 PKTFREE(di
->osh
, p0
, TRUE
);
982 di
->hnddma
.txavail
= 0;
983 di
->hnddma
.txnobuf
++;
988 * Reclaim next completed txd (txds if using chained buffers) and
989 * return associated packet.
990 * If 'force' is true, reclaim txd(s) and return associated packet
991 * regardless of the value of the hardware "curr" pointer.
994 dma32_getnexttxp(dma_info_t
*di
, bool forceall
)
999 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
1010 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1012 if ((start
== 0) && (end
> di
->txout
))
1015 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1016 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
),
1017 (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
),
1018 DMA_TX
, di
->txp
[i
]);
1020 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
1027 /* tx flow control */
1028 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1034 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1035 start, end, di->txout, forceall));
1041 dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
1046 /* if forcing, dma engine must be disabled */
1047 ASSERT(!forceall
|| !dma32_rxenabled(di
));
1051 /* return if no packets posted */
1055 /* ignore curr if forceall */
1056 if (!forceall
&& (i
== B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
)))
1059 /* get the packet pointer that corresponds to the rx descriptor */
1064 /* clear this packet from the descriptor ring */
1065 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
),
1066 di
->rxbufsize
, DMA_RX
, rxp
);
1068 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
1070 di
->rxin
= NEXTRXD(i
);
1076 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1079 dma32_txrotate(dma_info_t
*di
)
1088 ASSERT(dma32_txsuspendedidle(di
));
1090 nactive
= _dma_txactive(di
);
1091 ad
= B2I(((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >> XS_AD_SHIFT
), dma32dd_t
);
1092 rot
= TXD(ad
- di
->txin
);
1094 ASSERT(rot
< di
->ntxd
);
1096 /* full-ring case is a lot harder - don't worry about this */
1097 if (rot
>= (di
->ntxd
- nactive
)) {
1098 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1103 last
= PREVTXD(di
->txout
);
1105 /* move entries starting at last and moving backwards to first */
1106 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1107 new = TXD(old
+ rot
);
1110 * Move the tx dma descriptor.
1111 * EOT is set only in the last entry in the ring.
1113 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
1114 if (new == (di
->ntxd
- 1))
1116 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
1117 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
1119 /* zap the old tx dma descriptor address field */
1120 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
1122 /* move the corresponding txp[] entry */
1123 ASSERT(di
->txp
[new] == NULL
);
1124 di
->txp
[new] = di
->txp
[old
];
1125 di
->txp
[old
] = NULL
;
1128 /* update txin and txout */
1130 di
->txout
= TXD(di
->txout
+ rot
);
1131 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1134 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
1139 dma_addrwidth(sb_t
*sbh
, void *dmaregs
)
1141 dma32regs_t
*dma32regs
;
1146 /* Start checking for 32-bit / 30-bit addressing */
1147 dma32regs
= (dma32regs_t
*)dmaregs
;
1149 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
1150 if ((BUSTYPE(sbh
->bustype
) == SB_BUS
) ||
1151 ((BUSTYPE(sbh
->bustype
) == PCI_BUS
) && sbh
->buscoretype
== SB_PCIE
) ||
1152 (_dma32_addrext(osh
, dma32regs
)))
1153 return (DMADDRWIDTH_32
);
1156 return (DMADDRWIDTH_30
);
This page took 0.159298 seconds and 5 git commands to generate.