2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright 2006, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
13 * $Id: hnddma.c,v 1.11 2006/04/08 07:12:42 honor Exp $
19 #include "linux_osl.h"
20 #include <bcmendian.h>
30 #define DMA_ERROR(args)
31 #define DMA_TRACE(args)
33 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
34 static uint dma_msg_level
=
37 #define MAXNAMEL 8 /* 8 char names */
39 #define DI_INFO(dmah) (dma_info_t *)dmah
41 /* dma engine software state */
42 typedef struct dma_info
{
43 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
44 * which could be const
46 uint
*msg_level
; /* message level pointer */
47 char name
[MAXNAMEL
]; /* callers name for diag msgs */
49 void *osh
; /* os handle */
50 sb_t
*sbh
; /* sb handle */
52 bool dma64
; /* dma64 enabled */
53 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
55 dma32regs_t
*d32txregs
; /* 32 bits dma tx engine registers */
56 dma32regs_t
*d32rxregs
; /* 32 bits dma rx engine registers */
57 dma64regs_t
*d64txregs
; /* 64 bits dma tx engine registers */
58 dma64regs_t
*d64rxregs
; /* 64 bits dma rx engine registers */
60 uint32 dma64align
; /* either 8k or 4k depends on number of dd */
61 dma32dd_t
*txd32
; /* pointer to dma32 tx descriptor ring */
62 dma64dd_t
*txd64
; /* pointer to dma64 tx descriptor ring */
63 uint ntxd
; /* # tx descriptors tunable */
64 uint txin
; /* index of next descriptor to reclaim */
65 uint txout
; /* index of next descriptor to post */
66 void **txp
; /* pointer to parallel array of pointers to packets */
67 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
68 osldma_t
**txp_dmah
; /* DMA TX packet data handle */
69 ulong txdpa
; /* physical address of descriptor ring */
70 uint txdalign
; /* #bytes added to alloc'd mem to align txd */
71 uint txdalloc
; /* #bytes allocated for the ring */
73 dma32dd_t
*rxd32
; /* pointer to dma32 rx descriptor ring */
74 dma64dd_t
*rxd64
; /* pointer to dma64 rx descriptor ring */
75 uint nrxd
; /* # rx descriptors tunable */
76 uint rxin
; /* index of next descriptor to reclaim */
77 uint rxout
; /* index of next descriptor to post */
78 void **rxp
; /* pointer to parallel array of pointers to packets */
79 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
80 osldma_t
**rxp_dmah
; /* DMA RX packet data handle */
81 ulong rxdpa
; /* physical address of descriptor ring */
82 uint rxdalign
; /* #bytes added to alloc'd mem to align rxd */
83 uint rxdalloc
; /* #bytes allocated for the ring */
86 uint rxbufsize
; /* rx buffer size in bytes,
87 not including the extra headroom
89 uint nrxpost
; /* # rx buffers to keep posted */
90 uint rxoffset
; /* rxcontrol offset */
91 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
92 uint ddoffsethigh
; /* high 32 bits */
93 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
94 uint dataoffsethigh
; /* high 32 bits */
98 #define DMA64_ENAB(di) ((di)->dma64)
99 #define DMA64_CAP TRUE
101 #define DMA64_ENAB(di) (0)
102 #define DMA64_CAP FALSE
105 /* descriptor bumping macros */
106 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
107 #define TXD(x) XXD((x), di->ntxd)
108 #define RXD(x) XXD((x), di->nrxd)
109 #define NEXTTXD(i) TXD(i + 1)
110 #define PREVTXD(i) TXD(i - 1)
111 #define NEXTRXD(i) RXD(i + 1)
112 #define NTXDACTIVE(h, t) TXD(t - h)
113 #define NRXDACTIVE(h, t) RXD(t - h)
115 /* macros to convert between byte offsets and indexes */
116 #define B2I(bytes, type) ((bytes) / sizeof(type))
117 #define I2B(index, type) ((index) * sizeof(type))
119 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
120 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
123 /* common prototypes */
124 static bool _dma_isaddrext(dma_info_t
*di
);
125 static bool _dma_alloc(dma_info_t
*di
, uint direction
);
126 static void _dma_detach(dma_info_t
*di
);
127 static void _dma_ddtable_init(dma_info_t
*di
, uint direction
, ulong pa
);
128 static void _dma_rxinit(dma_info_t
*di
);
129 static void *_dma_rx(dma_info_t
*di
);
130 static void _dma_rxfill(dma_info_t
*di
);
131 static void _dma_rxreclaim(dma_info_t
*di
);
132 static void _dma_rxenable(dma_info_t
*di
);
133 static void * _dma_getnextrxp(dma_info_t
*di
, bool forceall
);
135 static void _dma_txblock(dma_info_t
*di
);
136 static void _dma_txunblock(dma_info_t
*di
);
137 static uint
_dma_txactive(dma_info_t
*di
);
139 static void* _dma_peeknexttxp(dma_info_t
*di
);
140 static uintptr
_dma_getvar(dma_info_t
*di
, const char *name
);
141 static void _dma_counterreset(dma_info_t
*di
);
142 static void _dma_fifoloopbackenable(dma_info_t
*di
);
144 /* ** 32 bit DMA prototypes */
145 static bool dma32_alloc(dma_info_t
*di
, uint direction
);
146 static bool dma32_txreset(dma_info_t
*di
);
147 static bool dma32_rxreset(dma_info_t
*di
);
148 static bool dma32_txsuspendedidle(dma_info_t
*di
);
149 static int dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
);
150 static void *dma32_getnexttxp(dma_info_t
*di
, bool forceall
);
151 static void *dma32_getnextrxp(dma_info_t
*di
, bool forceall
);
152 static void dma32_txrotate(dma_info_t
*di
);
153 static bool dma32_rxidle(dma_info_t
*di
);
154 static void dma32_txinit(dma_info_t
*di
);
155 static bool dma32_txenabled(dma_info_t
*di
);
156 static void dma32_txsuspend(dma_info_t
*di
);
157 static void dma32_txresume(dma_info_t
*di
);
158 static bool dma32_txsuspended(dma_info_t
*di
);
159 static void dma32_txreclaim(dma_info_t
*di
, bool forceall
);
160 static bool dma32_txstopped(dma_info_t
*di
);
161 static bool dma32_rxstopped(dma_info_t
*di
);
162 static bool dma32_rxenabled(dma_info_t
*di
);
163 static bool _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
);
165 /* ** 64 bit DMA prototypes and stubs */
167 static bool dma64_alloc(dma_info_t
*di
, uint direction
);
168 static bool dma64_txreset(dma_info_t
*di
);
169 static bool dma64_rxreset(dma_info_t
*di
);
170 static bool dma64_txsuspendedidle(dma_info_t
*di
);
171 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
);
172 static void *dma64_getnexttxp(dma_info_t
*di
, bool forceall
);
173 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
);
174 static void dma64_txrotate(dma_info_t
*di
);
176 static bool dma64_rxidle(dma_info_t
*di
);
177 static void dma64_txinit(dma_info_t
*di
);
178 static bool dma64_txenabled(dma_info_t
*di
);
179 static void dma64_txsuspend(dma_info_t
*di
);
180 static void dma64_txresume(dma_info_t
*di
);
181 static bool dma64_txsuspended(dma_info_t
*di
);
182 static void dma64_txreclaim(dma_info_t
*di
, bool forceall
);
183 static bool dma64_txstopped(dma_info_t
*di
);
184 static bool dma64_rxstopped(dma_info_t
*di
);
185 static bool dma64_rxenabled(dma_info_t
*di
);
186 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
);
189 static bool dma64_alloc(dma_info_t
*di
, uint direction
) { return FALSE
; }
190 static bool dma64_txreset(dma_info_t
*di
) { return FALSE
; }
191 static bool dma64_rxreset(dma_info_t
*di
) { return FALSE
; }
192 static bool dma64_txsuspendedidle(dma_info_t
*di
) { return FALSE
;}
193 static int dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
) { return 0; }
194 static void *dma64_getnexttxp(dma_info_t
*di
, bool forceall
) { return NULL
; }
195 static void *dma64_getnextrxp(dma_info_t
*di
, bool forceall
) { return NULL
; }
196 static void dma64_txrotate(dma_info_t
*di
) { return; }
198 static bool dma64_rxidle(dma_info_t
*di
) { return FALSE
; }
199 static void dma64_txinit(dma_info_t
*di
) { return; }
200 static bool dma64_txenabled(dma_info_t
*di
) { return FALSE
; }
201 static void dma64_txsuspend(dma_info_t
*di
) { return; }
202 static void dma64_txresume(dma_info_t
*di
) { return; }
203 static bool dma64_txsuspended(dma_info_t
*di
) {return FALSE
; }
204 static void dma64_txreclaim(dma_info_t
*di
, bool forceall
) { return; }
205 static bool dma64_txstopped(dma_info_t
*di
) { return FALSE
; }
206 static bool dma64_rxstopped(dma_info_t
*di
) { return FALSE
; }
207 static bool dma64_rxenabled(dma_info_t
*di
) { return FALSE
; }
208 static bool _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
) { return FALSE
; }
210 #endif /* BCMDMA64 */
214 static di_fcn_t dma64proc
= {
215 (di_detach_t
)_dma_detach
,
216 (di_txinit_t
)dma64_txinit
,
217 (di_txreset_t
)dma64_txreset
,
218 (di_txenabled_t
)dma64_txenabled
,
219 (di_txsuspend_t
)dma64_txsuspend
,
220 (di_txresume_t
)dma64_txresume
,
221 (di_txsuspended_t
)dma64_txsuspended
,
222 (di_txsuspendedidle_t
)dma64_txsuspendedidle
,
223 (di_txfast_t
)dma64_txfast
,
224 (di_txstopped_t
)dma64_txstopped
,
225 (di_txreclaim_t
)dma64_txreclaim
,
226 (di_getnexttxp_t
)dma64_getnexttxp
,
227 (di_peeknexttxp_t
)_dma_peeknexttxp
,
228 (di_txblock_t
)_dma_txblock
,
229 (di_txunblock_t
)_dma_txunblock
,
230 (di_txactive_t
)_dma_txactive
,
231 (di_txrotate_t
)dma64_txrotate
,
233 (di_rxinit_t
)_dma_rxinit
,
234 (di_rxreset_t
)dma64_rxreset
,
235 (di_rxidle_t
)dma64_rxidle
,
236 (di_rxstopped_t
)dma64_rxstopped
,
237 (di_rxenable_t
)_dma_rxenable
,
238 (di_rxenabled_t
)dma64_rxenabled
,
240 (di_rxfill_t
)_dma_rxfill
,
241 (di_rxreclaim_t
)_dma_rxreclaim
,
242 (di_getnextrxp_t
)_dma_getnextrxp
,
244 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
245 (di_getvar_t
)_dma_getvar
,
246 (di_counterreset_t
)_dma_counterreset
,
254 static di_fcn_t dma32proc
= {
255 (di_detach_t
)_dma_detach
,
256 (di_txinit_t
)dma32_txinit
,
257 (di_txreset_t
)dma32_txreset
,
258 (di_txenabled_t
)dma32_txenabled
,
259 (di_txsuspend_t
)dma32_txsuspend
,
260 (di_txresume_t
)dma32_txresume
,
261 (di_txsuspended_t
)dma32_txsuspended
,
262 (di_txsuspendedidle_t
)dma32_txsuspendedidle
,
263 (di_txfast_t
)dma32_txfast
,
264 (di_txstopped_t
)dma32_txstopped
,
265 (di_txreclaim_t
)dma32_txreclaim
,
266 (di_getnexttxp_t
)dma32_getnexttxp
,
267 (di_peeknexttxp_t
)_dma_peeknexttxp
,
268 (di_txblock_t
)_dma_txblock
,
269 (di_txunblock_t
)_dma_txunblock
,
270 (di_txactive_t
)_dma_txactive
,
271 (di_txrotate_t
)dma32_txrotate
,
273 (di_rxinit_t
)_dma_rxinit
,
274 (di_rxreset_t
)dma32_rxreset
,
275 (di_rxidle_t
)dma32_rxidle
,
276 (di_rxstopped_t
)dma32_rxstopped
,
277 (di_rxenable_t
)_dma_rxenable
,
278 (di_rxenabled_t
)dma32_rxenabled
,
280 (di_rxfill_t
)_dma_rxfill
,
281 (di_rxreclaim_t
)_dma_rxreclaim
,
282 (di_getnextrxp_t
)_dma_getnextrxp
,
284 (di_fifoloopbackenable_t
)_dma_fifoloopbackenable
,
285 (di_getvar_t
)_dma_getvar
,
286 (di_counterreset_t
)_dma_counterreset
,
295 dma_attach(osl_t
*osh
, char *name
, sb_t
*sbh
, void *dmaregstx
, void *dmaregsrx
,
296 uint ntxd
, uint nrxd
, uint rxbufsize
, uint nrxpost
, uint rxoffset
, uint
*msg_level
)
301 /* allocate private info structure */
302 if ((di
= MALLOC(osh
, sizeof (dma_info_t
))) == NULL
) {
305 bzero((char *)di
, sizeof(dma_info_t
));
307 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
309 /* old chips w/o sb is no longer supported */
312 di
->dma64
= ((sb_coreflagshi(sbh
, 0, 0) & SBTMH_DMA64
) == SBTMH_DMA64
);
316 DMA_ERROR(("dma_attach: driver doesn't have the capability to support "
322 /* check arguments */
323 ASSERT(ISPOWEROF2(ntxd
));
324 ASSERT(ISPOWEROF2(nrxd
));
326 ASSERT(dmaregsrx
== NULL
);
328 ASSERT(dmaregstx
== NULL
);
331 /* init dma reg pointer */
333 ASSERT(ntxd
<= D64MAXDD
);
334 ASSERT(nrxd
<= D64MAXDD
);
335 di
->d64txregs
= (dma64regs_t
*)dmaregstx
;
336 di
->d64rxregs
= (dma64regs_t
*)dmaregsrx
;
338 di
->dma64align
= D64RINGALIGN
;
339 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2)) {
340 /* for smaller dd table, HW relax the alignment requirement */
341 di
->dma64align
= D64RINGALIGN
/ 2;
344 ASSERT(ntxd
<= D32MAXDD
);
345 ASSERT(nrxd
<= D32MAXDD
);
346 di
->d32txregs
= (dma32regs_t
*)dmaregstx
;
347 di
->d32rxregs
= (dma32regs_t
*)dmaregsrx
;
350 DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d "
351 "rxoffset %d dmaregstx %p dmaregsrx %p\n",
352 name
, (di
->dma64
? "DMA64" : "DMA32"), osh
, ntxd
, nrxd
, rxbufsize
,
353 nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
355 /* make a private copy of our callers name */
356 strncpy(di
->name
, name
, MAXNAMEL
);
357 di
->name
[MAXNAMEL
-1] = '\0';
366 /* the actual dma size doesn't include the extra headroom */
367 if (rxbufsize
> BCMEXTRAHDROOM
)
368 di
->rxbufsize
= rxbufsize
- BCMEXTRAHDROOM
;
370 di
->rxbufsize
= rxbufsize
;
372 di
->nrxpost
= nrxpost
;
373 di
->rxoffset
= rxoffset
;
376 * figure out the DMA physical address offset for dd and data
377 * for old chips w/o sb, use zero
378 * for new chips w sb,
379 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
380 * Other bus: use zero
381 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
384 di
->dataoffsetlow
= 0;
385 /* for pci bus, add offset */
386 if (sbh
->bustype
== PCI_BUS
) {
387 if ((sbh
->buscoretype
== SB_PCIE
) && di
->dma64
) {
388 /* pcie with DMA64 */
390 di
->ddoffsethigh
= SB_PCIE_DMA_H32
;
392 /* pci(DMA32/DMA64) or pcie with DMA32 */
393 di
->ddoffsetlow
= SB_PCI_DMA
;
394 di
->ddoffsethigh
= 0;
396 di
->dataoffsetlow
= di
->ddoffsetlow
;
397 di
->dataoffsethigh
= di
->ddoffsethigh
;
400 #if defined(__mips__) && defined(IL_BIGENDIAN)
401 di
->dataoffsetlow
= di
->dataoffsetlow
+ SB_SDRAM_SWAPPED
;
404 di
->addrext
= _dma_isaddrext(di
);
406 /* allocate tx packet pointer vector */
408 size
= ntxd
* sizeof(void *);
409 if ((di
->txp
= MALLOC(osh
, size
)) == NULL
) {
410 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n",
411 di
->name
, MALLOCED(osh
)));
414 bzero((char *)di
->txp
, size
);
417 /* allocate rx packet pointer vector */
419 size
= nrxd
* sizeof(void *);
420 if ((di
->rxp
= MALLOC(osh
, size
)) == NULL
) {
421 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n",
422 di
->name
, MALLOCED(osh
)));
425 bzero((char *)di
->rxp
, size
);
428 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
430 if (!_dma_alloc(di
, DMA_TX
))
434 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
436 if (!_dma_alloc(di
, DMA_RX
))
440 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->txdpa
> SB_PCI_DMA_SZ
) && !di
->addrext
) {
441 DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
442 di
->name
, di
->txdpa
));
445 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->rxdpa
> SB_PCI_DMA_SZ
) && !di
->addrext
) {
446 DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
447 di
->name
, di
->rxdpa
));
451 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
452 "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
,
453 di
->dataoffsethigh
, di
->addrext
));
455 /* allocate tx packet pointer vector and DMA mapping vectors */
458 size
= ntxd
* sizeof(osldma_t
**);
459 if ((di
->txp_dmah
= (osldma_t
**)MALLOC(osh
, size
)) == NULL
)
461 bzero((char*)di
->txp_dmah
, size
);
465 /* allocate rx packet pointer vector and DMA mapping vectors */
468 size
= nrxd
* sizeof(osldma_t
**);
469 if ((di
->rxp_dmah
= (osldma_t
**)MALLOC(osh
, size
)) == NULL
)
471 bzero((char*)di
->rxp_dmah
, size
);
476 /* initialize opsvec of function pointers */
477 di
->hnddma
.di_fn
= DMA64_ENAB(di
) ? dma64proc
: dma32proc
;
479 return ((hnddma_t
*)di
);
486 /* init the tx or rx descriptor */
488 dma32_dd_upd(dma_info_t
*di
, dma32dd_t
*ddring
, ulong pa
, uint outidx
, uint32
*flags
,
491 /* dma32 uses 32 bits control to fit both flags and bufcounter */
492 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
494 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
495 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
496 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
498 /* address extension */
501 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
502 pa
&= ~PCI32ADDR_HIGH
;
504 *flags
|= (ae
<< CTRL_AE_SHIFT
);
505 W_SM(&ddring
[outidx
].addr
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
506 W_SM(&ddring
[outidx
].ctrl
, BUS_SWAP32(*flags
));
511 dma64_dd_upd(dma_info_t
*di
, dma64dd_t
*ddring
, ulong pa
, uint outidx
, uint32
*flags
,
514 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
516 /* PCI bus with big(>1G) physical address, use address extension */
517 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
518 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
519 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(0 + di
->dataoffsethigh
));
520 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
521 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
523 /* address extension */
527 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
528 pa
&= ~PCI32ADDR_HIGH
;
530 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
531 W_SM(&ddring
[outidx
].addrlow
, BUS_SWAP32(pa
+ di
->dataoffsetlow
));
532 W_SM(&ddring
[outidx
].addrhigh
, BUS_SWAP32(0 + di
->dataoffsethigh
));
533 W_SM(&ddring
[outidx
].ctrl1
, BUS_SWAP32(*flags
));
534 W_SM(&ddring
[outidx
].ctrl2
, BUS_SWAP32(ctrl2
));
539 _dma32_addrext(osl_t
*osh
, dma32regs_t
*dma32regs
)
543 OR_REG(osh
, &dma32regs
->control
, XC_AE
);
544 w
= R_REG(osh
, &dma32regs
->control
);
545 AND_REG(osh
, &dma32regs
->control
, ~XC_AE
);
546 return ((w
& XC_AE
) == XC_AE
);
550 _dma_alloc(dma_info_t
*di
, uint direction
)
552 if (DMA64_ENAB(di
)) {
553 return dma64_alloc(di
, direction
);
555 return dma32_alloc(di
, direction
);
559 /* !! may be called with core in reset */
561 _dma_detach(dma_info_t
*di
)
566 DMA_TRACE(("%s: dma_detach\n", di
->name
));
568 /* shouldn't be here if descriptors are unreclaimed */
569 ASSERT(di
->txin
== di
->txout
);
570 ASSERT(di
->rxin
== di
->rxout
);
572 /* free dma descriptor rings */
573 if (DMA64_ENAB(di
)) {
575 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd64
- di
->txdalign
),
576 di
->txdalloc
, (di
->txdpa
- di
->txdalign
), &di
->tx_dmah
);
578 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd64
- di
->rxdalign
),
579 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
), &di
->rx_dmah
);
582 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->txd32
- di
->txdalign
),
583 di
->txdalloc
, (di
->txdpa
- di
->txdalign
), &di
->tx_dmah
);
585 DMA_FREE_CONSISTENT(di
->osh
, ((int8
*)(uintptr
)di
->rxd32
- di
->rxdalign
),
586 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
), &di
->rx_dmah
);
589 /* free packet pointer vectors */
591 MFREE(di
->osh
, (void *)di
->txp
, (di
->ntxd
* sizeof(void *)));
593 MFREE(di
->osh
, (void *)di
->rxp
, (di
->nrxd
* sizeof(void *)));
595 /* free tx packet DMA handles */
597 MFREE(di
->osh
, (void *)di
->txp_dmah
, di
->ntxd
* sizeof(osldma_t
**));
599 /* free rx packet DMA handles */
601 MFREE(di
->osh
, (void *)di
->rxp_dmah
, di
->nrxd
* sizeof(osldma_t
**));
603 /* free our private info structure */
604 MFREE(di
->osh
, (void *)di
, sizeof(dma_info_t
));
608 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
610 _dma_isaddrext(dma_info_t
*di
)
612 if (DMA64_ENAB(di
)) {
613 /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
615 /* not all tx or rx channel are available */
616 if (di
->d64txregs
!= NULL
) {
617 if (!_dma64_addrext(di
->osh
, di
->d64txregs
)) {
618 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n",
623 } else if (di
->d64rxregs
!= NULL
) {
624 if (!_dma64_addrext(di
->osh
, di
->d64rxregs
)) {
625 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n",
632 } else if (di
->d32txregs
)
633 return (_dma32_addrext(di
->osh
, di
->d32txregs
));
634 else if (di
->d32rxregs
)
635 return (_dma32_addrext(di
->osh
, di
->d32rxregs
));
639 /* initialize descriptor table base address */
641 _dma_ddtable_init(dma_info_t
*di
, uint direction
, ulong pa
)
643 if (DMA64_ENAB(di
)) {
645 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
646 if (direction
== DMA_TX
) {
647 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (pa
+ di
->ddoffsetlow
));
648 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
650 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (pa
+ di
->ddoffsetlow
));
651 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
654 /* DMA64 32bits address extension */
658 /* shift the high bit(s) from pa to ae */
659 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
660 pa
&= ~PCI32ADDR_HIGH
;
662 if (direction
== DMA_TX
) {
663 W_REG(di
->osh
, &di
->d64txregs
->addrlow
, (pa
+ di
->ddoffsetlow
));
664 W_REG(di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
665 SET_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_AE
,
666 (ae
<< D64_XC_AE_SHIFT
));
668 W_REG(di
->osh
, &di
->d64rxregs
->addrlow
, (pa
+ di
->ddoffsetlow
));
669 W_REG(di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
670 SET_REG(di
->osh
, &di
->d64rxregs
->control
, D64_RC_AE
,
671 (ae
<< D64_RC_AE_SHIFT
));
676 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
)) {
677 if (direction
== DMA_TX
)
678 W_REG(di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
680 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
682 /* dma32 address extension */
686 /* shift the high bit(s) from pa to ae */
687 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
688 pa
&= ~PCI32ADDR_HIGH
;
690 if (direction
== DMA_TX
) {
691 W_REG(di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
692 SET_REG(di
->osh
, &di
->d32txregs
->control
, XC_AE
, ae
<<XC_AE_SHIFT
);
694 W_REG(di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
695 SET_REG(di
->osh
, &di
->d32rxregs
->control
, RC_AE
, ae
<<RC_AE_SHIFT
);
702 _dma_fifoloopbackenable(dma_info_t
*di
)
704 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di
->name
));
706 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
708 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_LE
);
712 _dma_rxinit(dma_info_t
*di
)
714 DMA_TRACE(("%s: dma_rxinit\n", di
->name
));
719 di
->rxin
= di
->rxout
= 0;
721 /* clear rx descriptor ring */
722 if (DMA64_ENAB(di
)) {
723 BZERO_SM((void *)(uintptr
)di
->rxd64
, (di
->nrxd
* sizeof(dma64dd_t
)));
725 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
727 BZERO_SM((void *)(uintptr
)di
->rxd32
, (di
->nrxd
* sizeof(dma32dd_t
)));
729 _dma_ddtable_init(di
, DMA_RX
, di
->rxdpa
);
734 _dma_rxenable(dma_info_t
*di
)
736 DMA_TRACE(("%s: dma_rxenable\n", di
->name
));
739 W_REG(di
->osh
, &di
->d64rxregs
->control
,
740 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | D64_RC_RE
));
742 W_REG(di
->osh
, &di
->d32rxregs
->control
, ((di
->rxoffset
<< RC_RO_SHIFT
) | RC_RE
));
745 /* !! rx entry routine, returns a pointer to the next frame received,
746 * or NULL if there are no more
749 _dma_rx(dma_info_t
*di
)
755 while ((p
= _dma_getnextrxp(di
, FALSE
))) {
756 /* skip giant packets which span multiple rx descriptors */
758 skiplen
-= di
->rxbufsize
;
761 PKTFREE(di
->osh
, p
, FALSE
);
765 len
= ltoh16(*(uint16
*)(PKTDATA(di
->osh
, p
)));
766 DMA_TRACE(("%s: dma_rx len %d\n", di
->name
, len
));
768 /* bad frame length check */
769 if (len
> (di
->rxbufsize
- di
->rxoffset
)) {
770 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
772 skiplen
= len
- (di
->rxbufsize
- di
->rxoffset
);
773 PKTFREE(di
->osh
, p
, FALSE
);
774 di
->hnddma
.rxgiants
++;
778 /* set actual length */
779 PKTSETLEN(di
->osh
, p
, (di
->rxoffset
+ len
));
787 /* post receive buffers */
789 _dma_rxfill(dma_info_t
*di
)
797 uint extra_offset
= 0;
800 * Determine how many receive buffers we're lacking
801 * from the full complement, allocate, initialize,
802 * and post them, then update the chip rx lastdscr.
808 n
= di
->nrxpost
- NRXDACTIVE(rxin
, rxout
);
810 DMA_TRACE(("%s: dma_rxfill: post %d\n", di
->name
, n
));
812 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
813 extra_offset
= BCMEXTRAHDROOM
;
815 for (i
= 0; i
< n
; i
++) {
816 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
819 if ((p
= PKTGET(di
->osh
, di
->rxbufsize
+ extra_offset
,
821 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di
->name
));
822 di
->hnddma
.rxnobuf
++;
825 /* reserve an extra headroom, if applicable */
827 PKTPULL(di
->osh
, p
, extra_offset
);
829 /* Do a cached write instead of uncached write since DMA_MAP
830 * will flush the cache.
832 *(uint32
*)(PKTDATA(di
->osh
, p
)) = 0;
834 pa
= (uint32
) DMA_MAP(di
->osh
, PKTDATA(di
->osh
, p
),
835 di
->rxbufsize
, DMA_RX
, p
);
837 ASSERT(ISALIGNED(pa
, 4));
839 /* save the free packet pointer */
840 ASSERT(di
->rxp
[rxout
] == NULL
);
843 /* reset flags for each descriptor */
845 if (DMA64_ENAB(di
)) {
846 if (rxout
== (di
->nrxd
- 1))
847 flags
= D64_CTRL1_EOT
;
849 dma64_dd_upd(di
, di
->rxd64
, pa
, rxout
, &flags
, di
->rxbufsize
);
851 if (rxout
== (di
->nrxd
- 1))
854 dma32_dd_upd(di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
856 rxout
= NEXTRXD(rxout
);
861 /* update the chip lastdscr pointer */
862 if (DMA64_ENAB(di
)) {
863 W_REG(di
->osh
, &di
->d64rxregs
->ptr
, I2B(rxout
, dma64dd_t
));
865 W_REG(di
->osh
, &di
->d32rxregs
->ptr
, I2B(rxout
, dma32dd_t
));
869 /* like getnexttxp but no reclaim */
871 _dma_peeknexttxp(dma_info_t
*di
)
878 if (DMA64_ENAB(di
)) {
879 end
= B2I(R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
, dma64dd_t
);
881 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
884 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD(i
))
892 _dma_rxreclaim(dma_info_t
*di
)
896 /* "unused local" warning suppression for OSLs that
897 * define PKTFREE() without using the di->osh arg
901 DMA_TRACE(("%s: dma_rxreclaim\n", di
->name
));
903 while ((p
= _dma_getnextrxp(di
, TRUE
)))
904 PKTFREE(di
->osh
, p
, FALSE
);
908 _dma_getnextrxp(dma_info_t
*di
, bool forceall
)
913 if (DMA64_ENAB(di
)) {
914 return dma64_getnextrxp(di
, forceall
);
916 return dma32_getnextrxp(di
, forceall
);
921 _dma_txblock(dma_info_t
*di
)
923 di
->hnddma
.txavail
= 0;
927 _dma_txunblock(dma_info_t
*di
)
929 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
933 _dma_txactive(dma_info_t
*di
)
935 return (NTXDACTIVE(di
->txin
, di
->txout
));
939 _dma_counterreset(dma_info_t
*di
)
941 /* reset all software counter */
942 di
->hnddma
.rxgiants
= 0;
943 di
->hnddma
.rxnobuf
= 0;
944 di
->hnddma
.txnobuf
= 0;
947 /* get the address of the var in order to change later */
949 _dma_getvar(dma_info_t
*di
, const char *name
)
951 if (!strcmp(name
, "&txavail"))
952 return ((uintptr
) &(di
->hnddma
.txavail
));
960 dma_txpioloopback(osl_t
*osh
, dma32regs_t
*regs
)
962 OR_REG(osh
, ®s
->control
, XC_LE
);
967 /* 32 bits DMA functions */
969 dma32_txinit(dma_info_t
*di
)
971 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
976 di
->txin
= di
->txout
= 0;
977 di
->hnddma
.txavail
= di
->ntxd
- 1;
979 /* clear tx descriptor ring */
980 BZERO_SM((void *)(uintptr
)di
->txd32
, (di
->ntxd
* sizeof(dma32dd_t
)));
981 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_XE
);
982 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
986 dma32_txenabled(dma_info_t
*di
)
990 /* If the chip is dead, it is not enabled :-) */
991 xc
= R_REG(di
->osh
, &di
->d32txregs
->control
);
992 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
996 dma32_txsuspend(dma_info_t
*di
)
998 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1003 OR_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1007 dma32_txresume(dma_info_t
*di
)
1009 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1014 AND_REG(di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1018 dma32_txsuspended(dma_info_t
*di
)
1020 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1024 dma32_txreclaim(dma_info_t
*di
, bool forceall
)
1028 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
1030 while ((p
= dma32_getnexttxp(di
, forceall
)))
1031 PKTFREE(di
->osh
, p
, TRUE
);
1035 dma32_txstopped(dma_info_t
*di
)
1037 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_STOPPED
);
1041 dma32_rxstopped(dma_info_t
*di
)
1043 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) == RS_RS_STOPPED
);
1047 dma32_alloc(dma_info_t
*di
, uint direction
)
1053 ddlen
= sizeof(dma32dd_t
);
1055 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1057 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, D32RINGALIGN
))
1058 size
+= D32RINGALIGN
;
1061 if (direction
== DMA_TX
) {
1062 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->txdpa
, &di
->tx_dmah
)) == NULL
) {
1063 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1068 di
->txd32
= (dma32dd_t
*) ROUNDUP((uintptr
)va
, D32RINGALIGN
);
1069 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd32
- (int8
*)va
);
1070 di
->txdpa
+= di
->txdalign
;
1071 di
->txdalloc
= size
;
1072 ASSERT(ISALIGNED((uintptr
)di
->txd32
, D32RINGALIGN
));
1074 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->rxdpa
, &di
->rx_dmah
)) == NULL
) {
1075 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1079 di
->rxd32
= (dma32dd_t
*) ROUNDUP((uintptr
)va
, D32RINGALIGN
);
1080 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd32
- (int8
*)va
);
1081 di
->rxdpa
+= di
->rxdalign
;
1082 di
->rxdalloc
= size
;
1083 ASSERT(ISALIGNED((uintptr
)di
->rxd32
, D32RINGALIGN
));
1090 dma32_txreset(dma_info_t
*di
)
1097 /* suspend tx DMA first */
1098 W_REG(di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1099 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1100 != XS_XS_DISABLED
) &&
1101 (status
!= XS_XS_IDLE
) &&
1102 (status
!= XS_XS_STOPPED
),
1105 W_REG(di
->osh
, &di
->d32txregs
->control
, 0);
1106 SPINWAIT(((status
= (R_REG(di
->osh
,
1107 &di
->d32txregs
->status
) & XS_XS_MASK
)) != XS_XS_DISABLED
),
1110 /* wait for the last transaction to complete */
1113 return (status
== XS_XS_DISABLED
);
1117 dma32_rxidle(dma_info_t
*di
)
1119 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1124 return ((R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
1125 R_REG(di
->osh
, &di
->d32rxregs
->ptr
));
1129 dma32_rxreset(dma_info_t
*di
)
1136 W_REG(di
->osh
, &di
->d32rxregs
->control
, 0);
1137 SPINWAIT(((status
= (R_REG(di
->osh
,
1138 &di
->d32rxregs
->status
) & RS_RS_MASK
)) != RS_RS_DISABLED
),
1141 return (status
== RS_RS_DISABLED
);
1145 dma32_rxenabled(dma_info_t
*di
)
1149 rc
= R_REG(di
->osh
, &di
->d32rxregs
->control
);
1150 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
1154 dma32_txsuspendedidle(dma_info_t
*di
)
1159 if (!(R_REG(di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
1162 if ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
1166 return ((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) == XS_XS_IDLE
);
1169 /* !! tx entry routine
1170 * supports full 32bit dma engine buffer addressing so
1171 * dma buffers can cross 4 Kbyte page boundaries.
1174 dma32_txfast(dma_info_t
*di
, void *p0
, bool commit
)
1183 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1188 * Walk the chain of packet buffers
1189 * allocating and initializing transmit descriptor entries.
1191 for (p
= p0
; p
; p
= next
) {
1192 data
= PKTDATA(di
->osh
, p
);
1193 len
= PKTLEN(di
->osh
, p
);
1194 next
= PKTNEXT(di
->osh
, p
);
1196 /* return nonzero if out of tx descriptors */
1197 if (NEXTTXD(txout
) == di
->txin
)
1203 /* get physical address of buffer start */
1204 pa
= (uint32
) DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
);
1210 flags
|= (CTRL_IOC
| CTRL_EOF
);
1211 if (txout
== (di
->ntxd
- 1))
1214 dma32_dd_upd(di
, di
->txd32
, pa
, txout
, &flags
, len
);
1215 ASSERT(di
->txp
[txout
] == NULL
);
1217 txout
= NEXTTXD(txout
);
1220 /* if last txd eof not set, fix it */
1221 if (!(flags
& CTRL_EOF
))
1222 W_SM(&di
->txd32
[PREVTXD(txout
)].ctrl
, BUS_SWAP32(flags
| CTRL_IOC
| CTRL_EOF
));
1224 /* save the packet */
1225 di
->txp
[PREVTXD(txout
)] = p0
;
1227 /* bump the tx descriptor index */
1232 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(txout
, dma32dd_t
));
1234 /* tx flow control */
1235 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1240 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
1241 PKTFREE(di
->osh
, p0
, TRUE
);
1242 di
->hnddma
.txavail
= 0;
1243 di
->hnddma
.txnobuf
++;
1248 * Reclaim next completed txd (txds if using chained buffers) and
1249 * return associated packet.
1250 * If 'force' is true, reclaim txd(s) and return associated packet
1251 * regardless of the value of the hardware "curr" pointer.
1254 dma32_getnexttxp(dma_info_t
*di
, bool forceall
)
1259 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
1270 end
= B2I(R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1272 if ((start
== 0) && (end
> di
->txout
))
1275 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1276 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
),
1277 (BUS_SWAP32(R_SM(&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
),
1278 DMA_TX
, di
->txp
[i
]);
1280 W_SM(&di
->txd32
[i
].addr
, 0xdeadbeef);
1287 /* tx flow control */
1288 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1294 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1295 start, end, di->txout, forceall));
1301 dma32_getnextrxp(dma_info_t
*di
, bool forceall
)
1306 /* if forcing, dma engine must be disabled */
1307 ASSERT(!forceall
|| !dma32_rxenabled(di
));
1311 /* return if no packets posted */
1315 /* ignore curr if forceall */
1316 if (!forceall
&& (i
== B2I(R_REG(di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
, dma32dd_t
)))
1319 /* get the packet pointer that corresponds to the rx descriptor */
1324 /* clear this packet from the descriptor ring */
1325 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
),
1326 di
->rxbufsize
, DMA_RX
, rxp
);
1328 W_SM(&di
->rxd32
[i
].addr
, 0xdeadbeef);
1330 di
->rxin
= NEXTRXD(i
);
1336 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1339 dma32_txrotate(dma_info_t
*di
)
1348 ASSERT(dma32_txsuspendedidle(di
));
1350 nactive
= _dma_txactive(di
);
1351 ad
= B2I(((R_REG(di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >> XS_AD_SHIFT
), dma32dd_t
);
1352 rot
= TXD(ad
- di
->txin
);
1354 ASSERT(rot
< di
->ntxd
);
1356 /* full-ring case is a lot harder - don't worry about this */
1357 if (rot
>= (di
->ntxd
- nactive
)) {
1358 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1363 last
= PREVTXD(di
->txout
);
1365 /* move entries starting at last and moving backwards to first */
1366 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1367 new = TXD(old
+ rot
);
1370 * Move the tx dma descriptor.
1371 * EOT is set only in the last entry in the ring.
1373 w
= BUS_SWAP32(R_SM(&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
1374 if (new == (di
->ntxd
- 1))
1376 W_SM(&di
->txd32
[new].ctrl
, BUS_SWAP32(w
));
1377 W_SM(&di
->txd32
[new].addr
, R_SM(&di
->txd32
[old
].addr
));
1379 /* zap the old tx dma descriptor address field */
1380 W_SM(&di
->txd32
[old
].addr
, BUS_SWAP32(0xdeadbeef));
1382 /* move the corresponding txp[] entry */
1383 ASSERT(di
->txp
[new] == NULL
);
1384 di
->txp
[new] = di
->txp
[old
];
1385 di
->txp
[old
] = NULL
;
1388 /* update txin and txout */
1390 di
->txout
= TXD(di
->txout
+ rot
);
1391 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1394 W_REG(di
->osh
, &di
->d32txregs
->ptr
, I2B(di
->txout
, dma32dd_t
));
1397 /* 64 bits DMA functions */
1401 dma64_txinit(dma_info_t
*di
)
1403 DMA_TRACE(("%s: dma_txinit\n", di
->name
));
1408 di
->txin
= di
->txout
= 0;
1409 di
->hnddma
.txavail
= di
->ntxd
- 1;
1411 /* clear tx descriptor ring */
1412 BZERO_SM((void *)(uintptr
)di
->txd64
, (di
->ntxd
* sizeof(dma64dd_t
)));
1413 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_XE
);
1414 _dma_ddtable_init(di
, DMA_TX
, di
->txdpa
);
1418 dma64_txenabled(dma_info_t
*di
)
1422 /* If the chip is dead, it is not enabled :-) */
1423 xc
= R_REG(di
->osh
, &di
->d64txregs
->control
);
1424 return ((xc
!= 0xffffffff) && (xc
& D64_XC_XE
));
1428 dma64_txsuspend(dma_info_t
*di
)
1430 DMA_TRACE(("%s: dma_txsuspend\n", di
->name
));
1435 OR_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
1439 dma64_txresume(dma_info_t
*di
)
1441 DMA_TRACE(("%s: dma_txresume\n", di
->name
));
1446 AND_REG(di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
1450 dma64_txsuspended(dma_info_t
*di
)
1452 return (di
->ntxd
== 0) || ((R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
)
1457 dma64_txreclaim(dma_info_t
*di
, bool forceall
)
1461 DMA_TRACE(("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
1463 while ((p
= dma64_getnexttxp(di
, forceall
)))
1464 PKTFREE(di
->osh
, p
, TRUE
);
1468 dma64_txstopped(dma_info_t
*di
)
1470 return ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_STOPPED
);
1474 dma64_rxstopped(dma_info_t
*di
)
1476 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) == D64_RS0_RS_STOPPED
);
1480 dma64_alloc(dma_info_t
*di
, uint direction
)
1487 ddlen
= sizeof(dma64dd_t
);
1489 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1491 alignbytes
= di
->dma64align
;
1493 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, alignbytes
))
1496 if (direction
== DMA_TX
) {
1497 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->txdpa
, &di
->tx_dmah
)) == NULL
) {
1498 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1503 di
->txd64
= (dma64dd_t
*) ROUNDUP((uintptr
)va
, alignbytes
);
1504 di
->txdalign
= (uint
)((int8
*)(uintptr
)di
->txd64
- (int8
*)va
);
1505 di
->txdpa
+= di
->txdalign
;
1506 di
->txdalloc
= size
;
1507 ASSERT(ISALIGNED((uintptr
)di
->txd64
, alignbytes
));
1509 if ((va
= DMA_ALLOC_CONSISTENT(di
->osh
, size
, &di
->rxdpa
, &di
->rx_dmah
)) == NULL
) {
1510 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1514 di
->rxd64
= (dma64dd_t
*) ROUNDUP((uintptr
)va
, alignbytes
);
1515 di
->rxdalign
= (uint
)((int8
*)(uintptr
)di
->rxd64
- (int8
*)va
);
1516 di
->rxdpa
+= di
->rxdalign
;
1517 di
->rxdalloc
= size
;
1518 ASSERT(ISALIGNED((uintptr
)di
->rxd64
, alignbytes
));
1525 dma64_txreset(dma_info_t
*di
)
1532 /* suspend tx DMA first */
1533 W_REG(di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
1534 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
1535 D64_XS0_XS_DISABLED
) &&
1536 (status
!= D64_XS0_XS_IDLE
) &&
1537 (status
!= D64_XS0_XS_STOPPED
),
1540 W_REG(di
->osh
, &di
->d64txregs
->control
, 0);
1541 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
1542 D64_XS0_XS_DISABLED
),
1545 /* wait for the last transaction to complete */
1548 return (status
== D64_XS0_XS_DISABLED
);
1552 dma64_rxidle(dma_info_t
*di
)
1554 DMA_TRACE(("%s: dma_rxidle\n", di
->name
));
1559 return ((R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
1560 R_REG(di
->osh
, &di
->d64rxregs
->ptr
));
1564 dma64_rxreset(dma_info_t
*di
)
1571 W_REG(di
->osh
, &di
->d64rxregs
->control
, 0);
1572 SPINWAIT(((status
= (R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
)) !=
1573 D64_RS0_RS_DISABLED
),
1576 return (status
== D64_RS0_RS_DISABLED
);
1580 dma64_rxenabled(dma_info_t
*di
)
1584 rc
= R_REG(di
->osh
, &di
->d64rxregs
->control
);
1585 return ((rc
!= 0xffffffff) && (rc
& D64_RC_RE
));
1589 dma64_txsuspendedidle(dma_info_t
*di
)
1595 if (!(R_REG(di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
1598 if ((R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) == D64_XS0_XS_IDLE
)
1605 /* !! tx entry routine */
1607 dma64_txfast(dma_info_t
*di
, void *p0
, bool commit
)
1616 DMA_TRACE(("%s: dma_txfast\n", di
->name
));
1621 * Walk the chain of packet buffers
1622 * allocating and initializing transmit descriptor entries.
1624 for (p
= p0
; p
; p
= next
) {
1625 data
= PKTDATA(di
->osh
, p
);
1626 len
= PKTLEN(di
->osh
, p
);
1627 next
= PKTNEXT(di
->osh
, p
);
1629 /* return nonzero if out of tx descriptors */
1630 if (NEXTTXD(txout
) == di
->txin
)
1636 /* get physical address of buffer start */
1637 pa
= (uint32
) DMA_MAP(di
->osh
, data
, len
, DMA_TX
, p
);
1641 flags
|= D64_CTRL1_SOF
;
1643 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
1644 if (txout
== (di
->ntxd
- 1))
1645 flags
|= D64_CTRL1_EOT
;
1647 dma64_dd_upd(di
, di
->txd64
, pa
, txout
, &flags
, len
);
1648 ASSERT(di
->txp
[txout
] == NULL
);
1650 txout
= NEXTTXD(txout
);
1653 /* if last txd eof not set, fix it */
1654 if (!(flags
& D64_CTRL1_EOF
))
1655 W_SM(&di
->txd64
[PREVTXD(txout
)].ctrl1
,
1656 BUS_SWAP32(flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
1658 /* save the packet */
1659 di
->txp
[PREVTXD(txout
)] = p0
;
1661 /* bump the tx descriptor index */
1666 W_REG(di
->osh
, &di
->d64txregs
->ptr
, I2B(txout
, dma64dd_t
));
1668 /* tx flow control */
1669 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1674 DMA_ERROR(("%s: dma_txfast: out of txds\n", di
->name
));
1675 PKTFREE(di
->osh
, p0
, TRUE
);
1676 di
->hnddma
.txavail
= 0;
1677 di
->hnddma
.txnobuf
++;
1682 * Reclaim next completed txd (txds if using chained buffers) and
1683 * return associated packet.
1684 * If 'force' is true, reclaim txd(s) and return associated packet
1685 * regardless of the value of the hardware "curr" pointer.
1688 dma64_getnexttxp(dma_info_t
*di
, bool forceall
)
1693 DMA_TRACE(("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
1704 end
= B2I(R_REG(di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
, dma64dd_t
);
1706 if ((start
== 0) && (end
> di
->txout
))
1709 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD(i
)) {
1710 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->txd64
[i
].addrlow
)) - di
->dataoffsetlow
),
1711 (BUS_SWAP32(R_SM(&di
->txd64
[i
].ctrl2
)) & D64_CTRL2_BC_MASK
),
1712 DMA_TX
, di
->txp
[i
]);
1714 W_SM(&di
->txd64
[i
].addrlow
, 0xdeadbeef);
1715 W_SM(&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
1723 /* tx flow control */
1724 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1730 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1731 start, end, di->txout, forceall));
1737 dma64_getnextrxp(dma_info_t
*di
, bool forceall
)
1742 /* if forcing, dma engine must be disabled */
1743 ASSERT(!forceall
|| !dma64_rxenabled(di
));
1747 /* return if no packets posted */
1751 /* ignore curr if forceall */
1753 (i
== B2I(R_REG(di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
, dma64dd_t
)))
1756 /* get the packet pointer that corresponds to the rx descriptor */
1761 /* clear this packet from the descriptor ring */
1762 DMA_UNMAP(di
->osh
, (BUS_SWAP32(R_SM(&di
->rxd64
[i
].addrlow
)) - di
->dataoffsetlow
),
1763 di
->rxbufsize
, DMA_RX
, rxp
);
1765 W_SM(&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
1766 W_SM(&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
1768 di
->rxin
= NEXTRXD(i
);
1774 _dma64_addrext(osl_t
*osh
, dma64regs_t
*dma64regs
)
1777 OR_REG(osh
, &dma64regs
->control
, D64_XC_AE
);
1778 w
= R_REG(osh
, &dma64regs
->control
);
1779 AND_REG(osh
, &dma64regs
->control
, ~D64_XC_AE
);
1780 return ((w
& D64_XC_AE
) == D64_XC_AE
);
1784 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1787 dma64_txrotate(dma_info_t
*di
)
1796 ASSERT(dma64_txsuspendedidle(di
));
1798 nactive
= _dma_txactive(di
);
1799 ad
= B2I((R_REG(di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
), dma64dd_t
);
1800 rot
= TXD(ad
- di
->txin
);
1802 ASSERT(rot
< di
->ntxd
);
1804 /* full-ring case is a lot harder - don't worry about this */
1805 if (rot
>= (di
->ntxd
- nactive
)) {
1806 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di
->name
));
1811 last
= PREVTXD(di
->txout
);
1813 /* move entries starting at last and moving backwards to first */
1814 for (old
= last
; old
!= PREVTXD(first
); old
= PREVTXD(old
)) {
1815 new = TXD(old
+ rot
);
1818 * Move the tx dma descriptor.
1819 * EOT is set only in the last entry in the ring.
1821 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
1822 if (new == (di
->ntxd
- 1))
1824 W_SM(&di
->txd64
[new].ctrl1
, BUS_SWAP32(w
));
1826 w
= BUS_SWAP32(R_SM(&di
->txd64
[old
].ctrl2
));
1827 W_SM(&di
->txd64
[new].ctrl2
, BUS_SWAP32(w
));
1829 W_SM(&di
->txd64
[new].addrlow
, R_SM(&di
->txd64
[old
].addrlow
));
1830 W_SM(&di
->txd64
[new].addrhigh
, R_SM(&di
->txd64
[old
].addrhigh
));
1832 /* zap the old tx dma descriptor address field */
1833 W_SM(&di
->txd64
[old
].addrlow
, BUS_SWAP32(0xdeadbeef));
1834 W_SM(&di
->txd64
[old
].addrhigh
, BUS_SWAP32(0xdeadbeef));
1836 /* move the corresponding txp[] entry */
1837 ASSERT(di
->txp
[new] == NULL
);
1838 di
->txp
[new] = di
->txp
[old
];
1839 di
->txp
[old
] = NULL
;
1842 /* update txin and txout */
1844 di
->txout
= TXD(di
->txout
+ rot
);
1845 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE(di
->txin
, di
->txout
) - 1;
1848 W_REG(di
->osh
, &di
->d64txregs
->ptr
, I2B(di
->txout
, dma64dd_t
));
1851 #endif /* BCMDMA64 */
1854 dma_addrwidth(sb_t
*sbh
, void *dmaregs
)
1856 dma32regs_t
*dma32regs
;
1862 /* DMA engine is 64-bit capable */
1863 if (((sb_coreflagshi(sbh
, 0, 0) & SBTMH_DMA64
) == SBTMH_DMA64
)) {
1864 /* backplane are 64 bits capable */
1866 if (sb_backplane64(sbh
))
1867 /* If bus is System Backplane or PCIE then we can access 64-bits */
1868 if ((BUSTYPE(sbh
->bustype
) == SB_BUS
) ||
1869 ((BUSTYPE(sbh
->bustype
) == PCI_BUS
) &&
1870 sbh
->buscoretype
== SB_PCIE
))
1871 return (DMADDRWIDTH_64
);
1874 /* DMA64 is always 32 bits capable, AE is always TRUE */
1876 ASSERT(_dma64_addrext(osh
, (dma64regs_t
*)dmaregs
));
1878 return (DMADDRWIDTH_32
);
1882 /* Start checking for 32-bit / 30-bit addressing */
1883 dma32regs
= (dma32regs_t
*)dmaregs
;
1885 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
1886 if ((BUSTYPE(sbh
->bustype
) == SB_BUS
) ||
1887 ((BUSTYPE(sbh
->bustype
) == PCI_BUS
) && sbh
->buscoretype
== SB_PCIE
) ||
1888 (_dma32_addrext(osh
, dma32regs
)))
1889 return (DMADDRWIDTH_32
);
1892 return (DMADDRWIDTH_30
);
This page took 0.211768 seconds and 5 git commands to generate.