[adm5120] swap I/O space if running in big-endian mode
[openwrt.git] / package / broadcom-wl / src / driver / hnddma.c
1 /*
2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
4 *
5 * Copyright 2007, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 *
13 * $Id$
14 */
15
16 #include <typedefs.h>
17 #include <bcmdefs.h>
18 #include <osl.h>
19 #include "linux_osl.h"
20 #include <bcmendian.h>
21 #include <sbconfig.h>
22 #include "bcmutils.h"
23 #include <bcmdevs.h>
24 #include <sbutils.h>
25
26 #include "sbhnddma.h"
27 #include "hnddma.h"
28
29 /* debug/trace */
30 #ifdef BCMDBG
31 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
32 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
33 #else
34 #define DMA_ERROR(args)
35 #define DMA_TRACE(args)
36 #endif
37
38 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
39 static uint dma_msg_level = 0;
40
41 #define MAXNAMEL 8 /* 8 char names */
42
43 #define DI_INFO(dmah) (dma_info_t *)dmah
44 typedef struct osl_dmainfo osldma_t;
45
46 /* dma engine software state */
47 typedef struct dma_info
48 {
49 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
50 * which could be const
51 */
52 uint *msg_level; /* message level pointer */
53 char name[MAXNAMEL]; /* callers name for diag msgs */
54
55 void *osh; /* os handle */
56 sb_t *sbh; /* sb handle */
57
58 bool dma64; /* dma64 enabled */
59 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
60
61 dma32regs_t *d32txregs; /* 32 bits dma tx engine registers */
62 dma32regs_t *d32rxregs; /* 32 bits dma rx engine registers */
63 dma64regs_t *d64txregs; /* 64 bits dma tx engine registers */
64 dma64regs_t *d64rxregs; /* 64 bits dma rx engine registers */
65
66 uint32 dma64align; /* either 8k or 4k depends on number of dd */
67 dma32dd_t *txd32; /* pointer to dma32 tx descriptor ring */
68 dma64dd_t *txd64; /* pointer to dma64 tx descriptor ring */
69 uint ntxd; /* # tx descriptors tunable */
70 uint txin; /* index of next descriptor to reclaim */
71 uint txout; /* index of next descriptor to post */
72 void **txp; /* pointer to parallel array of pointers to packets */
73 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
74 osldma_t **txp_dmah; /* DMA TX packet data handle */
75 ulong txdpa; /* physical address of descriptor ring */
76 uint txdalign; /* #bytes added to alloc'd mem to align txd */
77 uint txdalloc; /* #bytes allocated for the ring */
78
79 dma32dd_t *rxd32; /* pointer to dma32 rx descriptor ring */
80 dma64dd_t *rxd64; /* pointer to dma64 rx descriptor ring */
81 uint nrxd; /* # rx descriptors tunable */
82 uint rxin; /* index of next descriptor to reclaim */
83 uint rxout; /* index of next descriptor to post */
84 void **rxp; /* pointer to parallel array of pointers to packets */
85 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
86 osldma_t **rxp_dmah; /* DMA RX packet data handle */
87 ulong rxdpa; /* physical address of descriptor ring */
88 uint rxdalign; /* #bytes added to alloc'd mem to align rxd */
89 uint rxdalloc; /* #bytes allocated for the ring */
90
91 /* tunables */
92 uint rxbufsize; /* rx buffer size in bytes,
93 not including the extra headroom
94 */
95 uint nrxpost; /* # rx buffers to keep posted */
96 uint rxoffset; /* rxcontrol offset */
97 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
98 uint ddoffsethigh; /* high 32 bits */
99 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
100 uint dataoffsethigh; /* high 32 bits */
101 } dma_info_t;
102
103 #ifdef BCMDMA64
104 #define DMA64_ENAB(di) ((di)->dma64)
105 #define DMA64_CAP TRUE
106 #else
107 #define DMA64_ENAB(di) (0)
108 #define DMA64_CAP FALSE
109 #endif
110
111 /* descriptor bumping macros */
112 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
113 #define TXD(x) XXD((x), di->ntxd)
114 #define RXD(x) XXD((x), di->nrxd)
115 #define NEXTTXD(i) TXD(i + 1)
116 #define PREVTXD(i) TXD(i - 1)
117 #define NEXTRXD(i) RXD(i + 1)
118 #define NTXDACTIVE(h, t) TXD(t - h)
119 #define NRXDACTIVE(h, t) RXD(t - h)
120
121 /* macros to convert between byte offsets and indexes */
122 #define B2I(bytes, type) ((bytes) / sizeof(type))
123 #define I2B(index, type) ((index) * sizeof(type))
124
125 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
126 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
127
128
129 /* common prototypes */
130 static bool _dma_isaddrext (dma_info_t * di);
131 static bool _dma_alloc (dma_info_t * di, uint direction);
132 static void _dma_detach (dma_info_t * di);
133 static void _dma_ddtable_init (dma_info_t * di, uint direction, ulong pa);
134 static void _dma_rxinit (dma_info_t * di);
135 static void *_dma_rx (dma_info_t * di);
136 static void _dma_rxfill (dma_info_t * di);
137 static void _dma_rxreclaim (dma_info_t * di);
138 static void _dma_rxenable (dma_info_t * di);
139 static void *_dma_getnextrxp (dma_info_t * di, bool forceall);
140
141 static void _dma_txblock (dma_info_t * di);
142 static void _dma_txunblock (dma_info_t * di);
143 static uint _dma_txactive (dma_info_t * di);
144
145 static void *_dma_peeknexttxp (dma_info_t * di);
146 static uintptr _dma_getvar (dma_info_t * di, const char *name);
147 static void _dma_counterreset (dma_info_t * di);
148 static void _dma_fifoloopbackenable (dma_info_t * di);
149
150 /* ** 32 bit DMA prototypes */
151 static bool dma32_alloc (dma_info_t * di, uint direction);
152 static bool dma32_txreset (dma_info_t * di);
153 static bool dma32_rxreset (dma_info_t * di);
154 static bool dma32_txsuspendedidle (dma_info_t * di);
155 static int dma32_txfast (dma_info_t * di, void *p0, bool commit);
156 static void *dma32_getnexttxp (dma_info_t * di, bool forceall);
157 static void *dma32_getnextrxp (dma_info_t * di, bool forceall);
158 static void dma32_txrotate (dma_info_t * di);
159 static bool dma32_rxidle (dma_info_t * di);
160 static void dma32_txinit (dma_info_t * di);
161 static bool dma32_txenabled (dma_info_t * di);
162 static void dma32_txsuspend (dma_info_t * di);
163 static void dma32_txresume (dma_info_t * di);
164 static bool dma32_txsuspended (dma_info_t * di);
165 static void dma32_txreclaim (dma_info_t * di, bool forceall);
166 static bool dma32_txstopped (dma_info_t * di);
167 static bool dma32_rxstopped (dma_info_t * di);
168 static bool dma32_rxenabled (dma_info_t * di);
169 static bool _dma32_addrext (osl_t * osh, dma32regs_t * dma32regs);
170
171 /* ** 64 bit DMA prototypes and stubs */
172 #ifdef BCMDMA64
173 static bool dma64_alloc (dma_info_t * di, uint direction);
174 static bool dma64_txreset (dma_info_t * di);
175 static bool dma64_rxreset (dma_info_t * di);
176 static bool dma64_txsuspendedidle (dma_info_t * di);
177 static int dma64_txfast (dma_info_t * di, void *p0, bool commit);
178 static void *dma64_getnexttxp (dma_info_t * di, bool forceall);
179 static void *dma64_getnextrxp (dma_info_t * di, bool forceall);
180 static void dma64_txrotate (dma_info_t * di);
181
182 static bool dma64_rxidle (dma_info_t * di);
183 static void dma64_txinit (dma_info_t * di);
184 static bool dma64_txenabled (dma_info_t * di);
185 static void dma64_txsuspend (dma_info_t * di);
186 static void dma64_txresume (dma_info_t * di);
187 static bool dma64_txsuspended (dma_info_t * di);
188 static void dma64_txreclaim (dma_info_t * di, bool forceall);
189 static bool dma64_txstopped (dma_info_t * di);
190 static bool dma64_rxstopped (dma_info_t * di);
191 static bool dma64_rxenabled (dma_info_t * di);
192 static bool _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs);
193
194 #else
195 static bool
196 dma64_alloc (dma_info_t * di, uint direction)
197 {
198 return FALSE;
199 }
200 static bool
201 dma64_txreset (dma_info_t * di)
202 {
203 return FALSE;
204 }
205 static bool
206 dma64_rxreset (dma_info_t * di)
207 {
208 return FALSE;
209 }
210 static bool
211 dma64_txsuspendedidle (dma_info_t * di)
212 {
213 return FALSE;
214 }
215 static int
216 dma64_txfast (dma_info_t * di, void *p0, bool commit)
217 {
218 return 0;
219 }
220 static void *
221 dma64_getnexttxp (dma_info_t * di, bool forceall)
222 {
223 return NULL;
224 }
225 static void *
226 dma64_getnextrxp (dma_info_t * di, bool forceall)
227 {
228 return NULL;
229 }
230 static void
231 dma64_txrotate (dma_info_t * di)
232 {
233 return;
234 }
235
236 static bool
237 dma64_rxidle (dma_info_t * di)
238 {
239 return FALSE;
240 }
241 static void
242 dma64_txinit (dma_info_t * di)
243 {
244 return;
245 }
246 static bool
247 dma64_txenabled (dma_info_t * di)
248 {
249 return FALSE;
250 }
251 static void
252 dma64_txsuspend (dma_info_t * di)
253 {
254 return;
255 }
256 static void
257 dma64_txresume (dma_info_t * di)
258 {
259 return;
260 }
261 static bool
262 dma64_txsuspended (dma_info_t * di)
263 {
264 return FALSE;
265 }
266 static void
267 dma64_txreclaim (dma_info_t * di, bool forceall)
268 {
269 return;
270 }
271 static bool
272 dma64_txstopped (dma_info_t * di)
273 {
274 return FALSE;
275 }
276 static bool
277 dma64_rxstopped (dma_info_t * di)
278 {
279 return FALSE;
280 }
281 static bool
282 dma64_rxenabled (dma_info_t * di)
283 {
284 return FALSE;
285 }
286 static bool
287 _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs)
288 {
289 return FALSE;
290 }
291
292 #endif /* BCMDMA64 */
293
294 #ifdef BCMDBG
295 static void dma32_dumpring (dma_info_t * di, struct bcmstrbuf *b,
296 dma32dd_t * ring, uint start, uint end,
297 uint max_num);
298 static void dma32_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring);
299 static void dma32_dumptx (dma_info_t * di, struct bcmstrbuf *b,
300 bool dumpring);
301 static void dma32_dumprx (dma_info_t * di, struct bcmstrbuf *b,
302 bool dumpring);
303
304 static void dma64_dumpring (dma_info_t * di, struct bcmstrbuf *b,
305 dma64dd_t * ring, uint start, uint end,
306 uint max_num);
307 static void dma64_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring);
308 static void dma64_dumptx (dma_info_t * di, struct bcmstrbuf *b,
309 bool dumpring);
310 static void dma64_dumprx (dma_info_t * di, struct bcmstrbuf *b,
311 bool dumpring);
312 #endif
313
314
315 static di_fcn_t dma64proc = {
316 (di_detach_t) _dma_detach,
317 (di_txinit_t) dma64_txinit,
318 (di_txreset_t) dma64_txreset,
319 (di_txenabled_t) dma64_txenabled,
320 (di_txsuspend_t) dma64_txsuspend,
321 (di_txresume_t) dma64_txresume,
322 (di_txsuspended_t) dma64_txsuspended,
323 (di_txsuspendedidle_t) dma64_txsuspendedidle,
324 (di_txfast_t) dma64_txfast,
325 (di_txstopped_t) dma64_txstopped,
326 (di_txreclaim_t) dma64_txreclaim,
327 (di_getnexttxp_t) dma64_getnexttxp,
328 (di_peeknexttxp_t) _dma_peeknexttxp,
329 (di_txblock_t) _dma_txblock,
330 (di_txunblock_t) _dma_txunblock,
331 (di_txactive_t) _dma_txactive,
332 (di_txrotate_t) dma64_txrotate,
333
334 (di_rxinit_t) _dma_rxinit,
335 (di_rxreset_t) dma64_rxreset,
336 (di_rxidle_t) dma64_rxidle,
337 (di_rxstopped_t) dma64_rxstopped,
338 (di_rxenable_t) _dma_rxenable,
339 (di_rxenabled_t) dma64_rxenabled,
340 (di_rx_t) _dma_rx,
341 (di_rxfill_t) _dma_rxfill,
342 (di_rxreclaim_t) _dma_rxreclaim,
343 (di_getnextrxp_t) _dma_getnextrxp,
344
345 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
346 (di_getvar_t) _dma_getvar,
347 (di_counterreset_t) _dma_counterreset,
348
349 #ifdef BCMDBG
350 (di_dump_t) dma64_dump,
351 (di_dumptx_t) dma64_dumptx,
352 (di_dumprx_t) dma64_dumprx,
353 #else
354 NULL,
355 NULL,
356 NULL,
357 #endif
358 34
359 };
360
361 static di_fcn_t dma32proc = {
362 (di_detach_t) _dma_detach,
363 (di_txinit_t) dma32_txinit,
364 (di_txreset_t) dma32_txreset,
365 (di_txenabled_t) dma32_txenabled,
366 (di_txsuspend_t) dma32_txsuspend,
367 (di_txresume_t) dma32_txresume,
368 (di_txsuspended_t) dma32_txsuspended,
369 (di_txsuspendedidle_t) dma32_txsuspendedidle,
370 (di_txfast_t) dma32_txfast,
371 (di_txstopped_t) dma32_txstopped,
372 (di_txreclaim_t) dma32_txreclaim,
373 (di_getnexttxp_t) dma32_getnexttxp,
374 (di_peeknexttxp_t) _dma_peeknexttxp,
375 (di_txblock_t) _dma_txblock,
376 (di_txunblock_t) _dma_txunblock,
377 (di_txactive_t) _dma_txactive,
378 (di_txrotate_t) dma32_txrotate,
379
380 (di_rxinit_t) _dma_rxinit,
381 (di_rxreset_t) dma32_rxreset,
382 (di_rxidle_t) dma32_rxidle,
383 (di_rxstopped_t) dma32_rxstopped,
384 (di_rxenable_t) _dma_rxenable,
385 (di_rxenabled_t) dma32_rxenabled,
386 (di_rx_t) _dma_rx,
387 (di_rxfill_t) _dma_rxfill,
388 (di_rxreclaim_t) _dma_rxreclaim,
389 (di_getnextrxp_t) _dma_getnextrxp,
390
391 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
392 (di_getvar_t) _dma_getvar,
393 (di_counterreset_t) _dma_counterreset,
394
395 #ifdef BCMDBG
396 (di_dump_t) dma32_dump,
397 (di_dumptx_t) dma32_dumptx,
398 (di_dumprx_t) dma32_dumprx,
399 #else
400 NULL,
401 NULL,
402 NULL,
403 #endif
404 34
405 };
406
407 hnddma_t *
408 dma_attach (osl_t * osh, char *name, sb_t * sbh, void *dmaregstx,
409 void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
410 uint nrxpost, uint rxoffset, uint * msg_level)
411 {
412 dma_info_t *di;
413 uint size;
414
415 /* allocate private info structure */
416 if ((di = MALLOC (osh, sizeof (dma_info_t))) == NULL)
417 {
418 #ifdef BCMDBG
419 printf ("dma_attach: out of memory, malloced %d bytes\n",
420 MALLOCED (osh));
421 #endif
422 return (NULL);
423 }
424 bzero ((char *) di, sizeof (dma_info_t));
425
426 di->msg_level = msg_level ? msg_level : &dma_msg_level;
427
428 /* old chips w/o sb is no longer supported */
429 ASSERT (sbh != NULL);
430
431 di->dma64 = ((sb_coreflagshi (sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64);
432
433 #ifndef BCMDMA64
434 if (di->dma64)
435 {
436 DMA_ERROR (("dma_attach: driver doesn't have the capability to support "
437 "64 bits DMA\n"));
438 goto fail;
439 }
440 #endif
441
442 /* check arguments */
443 ASSERT (ISPOWEROF2 (ntxd));
444 ASSERT (ISPOWEROF2 (nrxd));
445 if (nrxd == 0)
446 ASSERT (dmaregsrx == NULL);
447 if (ntxd == 0)
448 ASSERT (dmaregstx == NULL);
449
450
451 /* init dma reg pointer */
452 if (di->dma64)
453 {
454 ASSERT (ntxd <= D64MAXDD);
455 ASSERT (nrxd <= D64MAXDD);
456 di->d64txregs = (dma64regs_t *) dmaregstx;
457 di->d64rxregs = (dma64regs_t *) dmaregsrx;
458
459 di->dma64align = D64RINGALIGN;
460 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
461 {
462 /* for smaller dd table, HW relax the alignment requirement */
463 di->dma64align = D64RINGALIGN / 2;
464 }
465 }
466 else
467 {
468 ASSERT (ntxd <= D32MAXDD);
469 ASSERT (nrxd <= D32MAXDD);
470 di->d32txregs = (dma32regs_t *) dmaregstx;
471 di->d32rxregs = (dma32regs_t *) dmaregsrx;
472 }
473
474 DMA_TRACE (("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (di->dma64 ? "DMA64" : "DMA32"), osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, dmaregstx, dmaregsrx));
475
476 /* make a private copy of our callers name */
477 strncpy (di->name, name, MAXNAMEL);
478 di->name[MAXNAMEL - 1] = '\0';
479
480 di->osh = osh;
481 di->sbh = sbh;
482
483 /* save tunables */
484 di->ntxd = ntxd;
485 di->nrxd = nrxd;
486
487 /* the actual dma size doesn't include the extra headroom */
488 if (rxbufsize > BCMEXTRAHDROOM)
489 di->rxbufsize = rxbufsize - BCMEXTRAHDROOM;
490 else
491 di->rxbufsize = rxbufsize;
492
493 di->nrxpost = nrxpost;
494 di->rxoffset = rxoffset;
495
496 /*
497 * figure out the DMA physical address offset for dd and data
498 * for old chips w/o sb, use zero
499 * for new chips w sb,
500 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
501 * Other bus: use zero
502 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
503 */
504 di->ddoffsetlow = 0;
505 di->dataoffsetlow = 0;
506 /* for pci bus, add offset */
507 if (sbh->bustype == PCI_BUS)
508 {
509 if ((sbh->buscoretype == SB_PCIE) && di->dma64)
510 {
511 /* pcie with DMA64 */
512 di->ddoffsetlow = 0;
513 di->ddoffsethigh = SB_PCIE_DMA_H32;
514 }
515 else
516 {
517 /* pci(DMA32/DMA64) or pcie with DMA32 */
518 di->ddoffsetlow = SB_PCI_DMA;
519 di->ddoffsethigh = 0;
520 }
521 di->dataoffsetlow = di->ddoffsetlow;
522 di->dataoffsethigh = di->ddoffsethigh;
523 }
524
525 #if defined(__mips__) && defined(IL_BIGENDIAN)
526 di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED;
527 #endif
528
529 di->addrext = _dma_isaddrext (di);
530
531 /* allocate tx packet pointer vector */
532 if (ntxd)
533 {
534 size = ntxd * sizeof (void *);
535 if ((di->txp = MALLOC (osh, size)) == NULL)
536 {
537 DMA_ERROR (("%s: dma_attach: out of tx memory, malloced %d bytes\n",
538 di->name, MALLOCED (osh)));
539 goto fail;
540 }
541 bzero ((char *) di->txp, size);
542 }
543
544 /* allocate rx packet pointer vector */
545 if (nrxd)
546 {
547 size = nrxd * sizeof (void *);
548 if ((di->rxp = MALLOC (osh, size)) == NULL)
549 {
550 DMA_ERROR (("%s: dma_attach: out of rx memory, malloced %d bytes\n",
551 di->name, MALLOCED (osh)));
552 goto fail;
553 }
554 bzero ((char *) di->rxp, size);
555 }
556
557 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
558 if (ntxd)
559 {
560 if (!_dma_alloc (di, DMA_TX))
561 goto fail;
562 }
563
564 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
565 if (nrxd)
566 {
567 if (!_dma_alloc (di, DMA_RX))
568 goto fail;
569 }
570
571 if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ)
572 && !di->addrext)
573 {
574 DMA_ERROR (("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
575 di->name, di->txdpa));
576 goto fail;
577 }
578 if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ)
579 && !di->addrext)
580 {
581 DMA_ERROR (("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
582 di->name, di->rxdpa));
583 goto fail;
584 }
585
586 DMA_TRACE (("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
587
588 /* allocate tx packet pointer vector and DMA mapping vectors */
589 if (ntxd)
590 {
591
592 size = ntxd * sizeof (osldma_t **);
593 if ((di->txp_dmah = (osldma_t **) MALLOC (osh, size)) == NULL)
594 goto fail;
595 bzero ((char *) di->txp_dmah, size);
596 }
597 else
598 di->txp_dmah = NULL;
599
600 /* allocate rx packet pointer vector and DMA mapping vectors */
601 if (nrxd)
602 {
603
604 size = nrxd * sizeof (osldma_t **);
605 if ((di->rxp_dmah = (osldma_t **) MALLOC (osh, size)) == NULL)
606 goto fail;
607 bzero ((char *) di->rxp_dmah, size);
608
609 }
610 else
611 di->rxp_dmah = NULL;
612
613 /* initialize opsvec of function pointers */
614 di->hnddma.di_fn = DMA64_ENAB (di) ? dma64proc : dma32proc;
615
616 return ((hnddma_t *) di);
617
618 fail:
619 _dma_detach (di);
620 return (NULL);
621 }
622
623 /* init the tx or rx descriptor */
624 static INLINE void
625 dma32_dd_upd (dma_info_t * di, dma32dd_t * ddring, ulong pa, uint outidx,
626 uint32 * flags, uint32 bufcount)
627 {
628 /* dma32 uses 32 bits control to fit both flags and bufcounter */
629 *flags = *flags | (bufcount & CTRL_BC_MASK);
630
631 if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
632 {
633 W_SM (&ddring[outidx].addr, BUS_SWAP32 (pa + di->dataoffsetlow));
634 W_SM (&ddring[outidx].ctrl, BUS_SWAP32 (*flags));
635 }
636 else
637 {
638 /* address extension */
639 uint32 ae;
640 ASSERT (di->addrext);
641 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
642 pa &= ~PCI32ADDR_HIGH;
643
644 *flags |= (ae << CTRL_AE_SHIFT);
645 W_SM (&ddring[outidx].addr, BUS_SWAP32 (pa + di->dataoffsetlow));
646 W_SM (&ddring[outidx].ctrl, BUS_SWAP32 (*flags));
647 }
648 }
649
650 static INLINE void
651 dma64_dd_upd (dma_info_t * di, dma64dd_t * ddring, ulong pa, uint outidx,
652 uint32 * flags, uint32 bufcount)
653 {
654 uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
655
656 /* PCI bus with big(>1G) physical address, use address extension */
657 if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
658 {
659 W_SM (&ddring[outidx].addrlow, BUS_SWAP32 (pa + di->dataoffsetlow));
660 W_SM (&ddring[outidx].addrhigh, BUS_SWAP32 (0 + di->dataoffsethigh));
661 W_SM (&ddring[outidx].ctrl1, BUS_SWAP32 (*flags));
662 W_SM (&ddring[outidx].ctrl2, BUS_SWAP32 (ctrl2));
663 }
664 else
665 {
666 /* address extension */
667 uint32 ae;
668 ASSERT (di->addrext);
669
670 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
671 pa &= ~PCI32ADDR_HIGH;
672
673 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
674 W_SM (&ddring[outidx].addrlow, BUS_SWAP32 (pa + di->dataoffsetlow));
675 W_SM (&ddring[outidx].addrhigh, BUS_SWAP32 (0 + di->dataoffsethigh));
676 W_SM (&ddring[outidx].ctrl1, BUS_SWAP32 (*flags));
677 W_SM (&ddring[outidx].ctrl2, BUS_SWAP32 (ctrl2));
678 }
679 }
680
681 static bool
682 _dma32_addrext (osl_t * osh, dma32regs_t * dma32regs)
683 {
684 uint32 w;
685
686 OR_REG (osh, &dma32regs->control, XC_AE);
687 w = R_REG (osh, &dma32regs->control);
688 AND_REG (osh, &dma32regs->control, ~XC_AE);
689 return ((w & XC_AE) == XC_AE);
690 }
691
692 static bool
693 _dma_alloc (dma_info_t * di, uint direction)
694 {
695 if (DMA64_ENAB (di))
696 {
697 return dma64_alloc (di, direction);
698 }
699 else
700 {
701 return dma32_alloc (di, direction);
702 }
703 }
704
705 /* !! may be called with core in reset */
706 static void
707 _dma_detach (dma_info_t * di)
708 {
709 if (di == NULL)
710 return;
711
712 DMA_TRACE (("%s: dma_detach\n", di->name));
713
714 /* shouldn't be here if descriptors are unreclaimed */
715 ASSERT (di->txin == di->txout);
716 ASSERT (di->rxin == di->rxout);
717
718 /* free dma descriptor rings */
719 if (DMA64_ENAB (di))
720 {
721 if (di->txd64)
722 DMA_FREE_CONSISTENT (di->osh,
723 ((int8 *) (uintptr) di->txd64 - di->txdalign),
724 di->txdalloc, (di->txdpa - di->txdalign),
725 &di->tx_dmah);
726 if (di->rxd64)
727 DMA_FREE_CONSISTENT (di->osh,
728 ((int8 *) (uintptr) di->rxd64 - di->rxdalign),
729 di->rxdalloc, (di->rxdpa - di->rxdalign),
730 &di->rx_dmah);
731 }
732 else
733 {
734 if (di->txd32)
735 DMA_FREE_CONSISTENT (di->osh,
736 ((int8 *) (uintptr) di->txd32 - di->txdalign),
737 di->txdalloc, (di->txdpa - di->txdalign),
738 &di->tx_dmah);
739 if (di->rxd32)
740 DMA_FREE_CONSISTENT (di->osh,
741 ((int8 *) (uintptr) di->rxd32 - di->rxdalign),
742 di->rxdalloc, (di->rxdpa - di->rxdalign),
743 &di->rx_dmah);
744 }
745
746 /* free packet pointer vectors */
747 if (di->txp)
748 MFREE (di->osh, (void *) di->txp, (di->ntxd * sizeof (void *)));
749 if (di->rxp)
750 MFREE (di->osh, (void *) di->rxp, (di->nrxd * sizeof (void *)));
751
752 /* free tx packet DMA handles */
753 if (di->txp_dmah)
754 MFREE (di->osh, (void *) di->txp_dmah, di->ntxd * sizeof (osldma_t **));
755
756 /* free rx packet DMA handles */
757 if (di->rxp_dmah)
758 MFREE (di->osh, (void *) di->rxp_dmah, di->nrxd * sizeof (osldma_t **));
759
760 /* free our private info structure */
761 MFREE (di->osh, (void *) di, sizeof (dma_info_t));
762
763 }
764
765 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
766 static bool
767 _dma_isaddrext (dma_info_t * di)
768 {
769 if (DMA64_ENAB (di))
770 {
771 /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
772
773 /* not all tx or rx channel are available */
774 if (di->d64txregs != NULL)
775 {
776 if (!_dma64_addrext (di->osh, di->d64txregs))
777 {
778 DMA_ERROR (("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
779 ASSERT (0);
780 }
781 return TRUE;
782 }
783 else if (di->d64rxregs != NULL)
784 {
785 if (!_dma64_addrext (di->osh, di->d64rxregs))
786 {
787 DMA_ERROR (("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
788 ASSERT (0);
789 }
790 return TRUE;
791 }
792 return FALSE;
793 }
794 else if (di->d32txregs)
795 return (_dma32_addrext (di->osh, di->d32txregs));
796 else if (di->d32rxregs)
797 return (_dma32_addrext (di->osh, di->d32rxregs));
798 return FALSE;
799 }
800
801 /* initialize descriptor table base address */
802 static void
803 _dma_ddtable_init (dma_info_t * di, uint direction, ulong pa)
804 {
805 if (DMA64_ENAB (di))
806 {
807
808 if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
809 {
810 if (direction == DMA_TX)
811 {
812 W_REG (di->osh, &di->d64txregs->addrlow,
813 (pa + di->ddoffsetlow));
814 W_REG (di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh);
815 }
816 else
817 {
818 W_REG (di->osh, &di->d64rxregs->addrlow,
819 (pa + di->ddoffsetlow));
820 W_REG (di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh);
821 }
822 }
823 else
824 {
825 /* DMA64 32bits address extension */
826 uint32 ae;
827 ASSERT (di->addrext);
828
829 /* shift the high bit(s) from pa to ae */
830 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
831 pa &= ~PCI32ADDR_HIGH;
832
833 if (direction == DMA_TX)
834 {
835 W_REG (di->osh, &di->d64txregs->addrlow,
836 (pa + di->ddoffsetlow));
837 W_REG (di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh);
838 SET_REG (di->osh, &di->d64txregs->control, D64_XC_AE,
839 (ae << D64_XC_AE_SHIFT));
840 }
841 else
842 {
843 W_REG (di->osh, &di->d64rxregs->addrlow,
844 (pa + di->ddoffsetlow));
845 W_REG (di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh);
846 SET_REG (di->osh, &di->d64rxregs->control, D64_RC_AE,
847 (ae << D64_RC_AE_SHIFT));
848 }
849 }
850
851 }
852 else
853 {
854 if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
855 {
856 if (direction == DMA_TX)
857 W_REG (di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
858 else
859 W_REG (di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
860 }
861 else
862 {
863 /* dma32 address extension */
864 uint32 ae;
865 ASSERT (di->addrext);
866
867 /* shift the high bit(s) from pa to ae */
868 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
869 pa &= ~PCI32ADDR_HIGH;
870
871 if (direction == DMA_TX)
872 {
873 W_REG (di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
874 SET_REG (di->osh, &di->d32txregs->control, XC_AE,
875 ae << XC_AE_SHIFT);
876 }
877 else
878 {
879 W_REG (di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
880 SET_REG (di->osh, &di->d32rxregs->control, RC_AE,
881 ae << RC_AE_SHIFT);
882 }
883 }
884 }
885 }
886
887 static void
888 _dma_fifoloopbackenable (dma_info_t * di)
889 {
890 DMA_TRACE (("%s: dma_fifoloopbackenable\n", di->name));
891 if (DMA64_ENAB (di))
892 OR_REG (di->osh, &di->d64txregs->control, D64_XC_LE);
893 else
894 OR_REG (di->osh, &di->d32txregs->control, XC_LE);
895 }
896
897 static void
898 _dma_rxinit (dma_info_t * di)
899 {
900 DMA_TRACE (("%s: dma_rxinit\n", di->name));
901
902 if (di->nrxd == 0)
903 return;
904
905 di->rxin = di->rxout = 0;
906
907 /* clear rx descriptor ring */
908 if (DMA64_ENAB (di))
909 BZERO_SM ((void *) (uintptr) di->rxd64, (di->nrxd * sizeof (dma64dd_t)));
910 else
911 BZERO_SM ((void *) (uintptr) di->rxd32, (di->nrxd * sizeof (dma32dd_t)));
912
913 _dma_rxenable (di);
914 _dma_ddtable_init (di, DMA_RX, di->rxdpa);
915 }
916
917 static void
918 _dma_rxenable (dma_info_t * di)
919 {
920 DMA_TRACE (("%s: dma_rxenable\n", di->name));
921
922 if (DMA64_ENAB (di))
923 W_REG (di->osh, &di->d64rxregs->control,
924 ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
925 else
926 W_REG (di->osh, &di->d32rxregs->control,
927 ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
928 }
929
930 /* !! rx entry routine, returns a pointer to the next frame received,
931 * or NULL if there are no more
932 */
933 static void *
934 _dma_rx (dma_info_t * di)
935 {
936 void *p;
937 uint len;
938 int skiplen = 0;
939
940 while ((p = _dma_getnextrxp (di, FALSE)))
941 {
942 /* skip giant packets which span multiple rx descriptors */
943 if (skiplen > 0)
944 {
945 skiplen -= di->rxbufsize;
946 if (skiplen < 0)
947 skiplen = 0;
948 PKTFREE (di->osh, p, FALSE);
949 continue;
950 }
951
952 len = ltoh16 (*(uint16 *) (PKTDATA (di->osh, p)));
953 DMA_TRACE (("%s: dma_rx len %d\n", di->name, len));
954
955 /* bad frame length check */
956 if (len > (di->rxbufsize - di->rxoffset))
957 {
958 DMA_ERROR (("%s: dma_rx: bad frame length (%d)\n", di->name, len));
959 if (len > 0)
960 skiplen = len - (di->rxbufsize - di->rxoffset);
961 PKTFREE (di->osh, p, FALSE);
962 di->hnddma.rxgiants++;
963 continue;
964 }
965
966 /* set actual length */
967 PKTSETLEN (di->osh, p, (di->rxoffset + len));
968
969 break;
970 }
971
972 return (p);
973 }
974
975 /* post receive buffers */
976 static void
977 _dma_rxfill (dma_info_t * di)
978 {
979 void *p;
980 uint rxin, rxout;
981 uint32 flags = 0;
982 uint n;
983 uint i;
984 uint32 pa;
985 uint extra_offset = 0;
986
987 /*
988 * Determine how many receive buffers we're lacking
989 * from the full complement, allocate, initialize,
990 * and post them, then update the chip rx lastdscr.
991 */
992
993 rxin = di->rxin;
994 rxout = di->rxout;
995
996 n = di->nrxpost - NRXDACTIVE (rxin, rxout);
997
998 DMA_TRACE (("%s: dma_rxfill: post %d\n", di->name, n));
999
1000 if (di->rxbufsize > BCMEXTRAHDROOM)
1001 extra_offset = BCMEXTRAHDROOM;
1002
1003 for (i = 0; i < n; i++)
1004 {
1005 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1006 size to be allocated
1007 */
1008 if ((p = PKTGET (di->osh, di->rxbufsize + extra_offset, FALSE)) == NULL)
1009 {
1010 DMA_ERROR (("%s: dma_rxfill: out of rxbufs\n", di->name));
1011 di->hnddma.rxnobuf++;
1012 break;
1013 }
1014 /* reserve an extra headroom, if applicable */
1015 if (extra_offset)
1016 PKTPULL (di->osh, p, extra_offset);
1017
1018 /* Do a cached write instead of uncached write since DMA_MAP
1019 * will flush the cache.
1020 */
1021 *(uint32 *) (PKTDATA (di->osh, p)) = 0;
1022
1023 pa = (uint32) DMA_MAP (di->osh, PKTDATA (di->osh, p),
1024 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
1025
1026 ASSERT (ISALIGNED (pa, 4));
1027
1028 /* save the free packet pointer */
1029 ASSERT (di->rxp[rxout] == NULL);
1030 di->rxp[rxout] = p;
1031
1032 /* reset flags for each descriptor */
1033 flags = 0;
1034 if (DMA64_ENAB (di))
1035 {
1036 if (rxout == (di->nrxd - 1))
1037 flags = D64_CTRL1_EOT;
1038
1039 dma64_dd_upd (di, di->rxd64, pa, rxout, &flags, di->rxbufsize);
1040 }
1041 else
1042 {
1043 if (rxout == (di->nrxd - 1))
1044 flags = CTRL_EOT;
1045
1046 dma32_dd_upd (di, di->rxd32, pa, rxout, &flags, di->rxbufsize);
1047 }
1048 rxout = NEXTRXD (rxout);
1049 }
1050
1051 di->rxout = rxout;
1052
1053 /* update the chip lastdscr pointer */
1054 if (DMA64_ENAB (di))
1055 {
1056 W_REG (di->osh, &di->d64rxregs->ptr, I2B (rxout, dma64dd_t));
1057 }
1058 else
1059 {
1060 W_REG (di->osh, &di->d32rxregs->ptr, I2B (rxout, dma32dd_t));
1061 }
1062 }
1063
1064 /* like getnexttxp but no reclaim */
1065 static void *
1066 _dma_peeknexttxp (dma_info_t * di)
1067 {
1068 uint end, i;
1069
1070 if (di->ntxd == 0)
1071 return (NULL);
1072
1073 if (DMA64_ENAB (di))
1074 {
1075 end =
1076 B2I (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK,
1077 dma64dd_t);
1078 }
1079 else
1080 {
1081 end =
1082 B2I (R_REG (di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
1083 }
1084
1085 for (i = di->txin; i != end; i = NEXTTXD (i))
1086 if (di->txp[i])
1087 return (di->txp[i]);
1088
1089 return (NULL);
1090 }
1091
1092 static void
1093 _dma_rxreclaim (dma_info_t * di)
1094 {
1095 void *p;
1096
1097 /* "unused local" warning suppression for OSLs that
1098 * define PKTFREE() without using the di->osh arg
1099 */
1100 di = di;
1101
1102 DMA_TRACE (("%s: dma_rxreclaim\n", di->name));
1103
1104 while ((p = _dma_getnextrxp (di, TRUE)))
1105 PKTFREE (di->osh, p, FALSE);
1106 }
1107
1108 static void *
1109 _dma_getnextrxp (dma_info_t * di, bool forceall)
1110 {
1111 if (di->nrxd == 0)
1112 return (NULL);
1113
1114 if (DMA64_ENAB (di))
1115 {
1116 return dma64_getnextrxp (di, forceall);
1117 }
1118 else
1119 {
1120 return dma32_getnextrxp (di, forceall);
1121 }
1122 }
1123
1124 static void
1125 _dma_txblock (dma_info_t * di)
1126 {
1127 di->hnddma.txavail = 0;
1128 }
1129
1130 static void
1131 _dma_txunblock (dma_info_t * di)
1132 {
1133 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1134 }
1135
1136 static uint
1137 _dma_txactive (dma_info_t * di)
1138 {
1139 return (NTXDACTIVE (di->txin, di->txout));
1140 }
1141
1142 static void
1143 _dma_counterreset (dma_info_t * di)
1144 {
1145 /* reset all software counter */
1146 di->hnddma.rxgiants = 0;
1147 di->hnddma.rxnobuf = 0;
1148 di->hnddma.txnobuf = 0;
1149 }
1150
1151 /* get the address of the var in order to change later */
1152 static uintptr
1153 _dma_getvar (dma_info_t * di, const char *name)
1154 {
1155 if (!strcmp (name, "&txavail"))
1156 return ((uintptr) & (di->hnddma.txavail));
1157 else
1158 {
1159 ASSERT (0);
1160 }
1161 return (0);
1162 }
1163
1164 void
1165 dma_txpioloopback (osl_t * osh, dma32regs_t * regs)
1166 {
1167 OR_REG (osh, &regs->control, XC_LE);
1168 }
1169
1170 #ifdef BCMDBG
1171 static void
1172 dma32_dumpring (dma_info_t * di, struct bcmstrbuf *b, dma32dd_t * ring,
1173 uint start, uint end, uint max_num)
1174 {
1175 uint i;
1176
1177 for (i = start; i != end; i = XXD ((i + 1), max_num))
1178 {
1179 /* in the format of high->low 8 bytes */
1180 bcm_bprintf (b, "ring index %d: 0x%x %x\n", i, ring[i].addr,
1181 ring[i].ctrl);
1182 }
1183 }
1184
1185 static void
1186 dma32_dumptx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1187 {
1188 if (di->ntxd == 0)
1189 return;
1190
1191 bcm_bprintf (b, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1192 "txavail %d\n", di->txd32, di->txdpa, di->txp, di->txin,
1193 di->txout, di->hnddma.txavail);
1194
1195 bcm_bprintf (b, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1196 R_REG (di->osh, &di->d32txregs->control),
1197 R_REG (di->osh, &di->d32txregs->addr),
1198 R_REG (di->osh, &di->d32txregs->ptr),
1199 R_REG (di->osh, &di->d32txregs->status));
1200
1201 if (dumpring && di->txd32)
1202 dma32_dumpring (di, b, di->txd32, di->txin, di->txout, di->ntxd);
1203 }
1204
1205 static void
1206 dma32_dumprx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1207 {
1208 if (di->nrxd == 0)
1209 return;
1210
1211 bcm_bprintf (b, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1212 di->rxd32, di->rxdpa, di->rxp, di->rxin, di->rxout);
1213
1214 bcm_bprintf (b, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1215 R_REG (di->osh, &di->d32rxregs->control),
1216 R_REG (di->osh, &di->d32rxregs->addr),
1217 R_REG (di->osh, &di->d32rxregs->ptr),
1218 R_REG (di->osh, &di->d32rxregs->status));
1219 if (di->rxd32 && dumpring)
1220 dma32_dumpring (di, b, di->rxd32, di->rxin, di->rxout, di->nrxd);
1221 }
1222
1223 static void
1224 dma32_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1225 {
1226 dma32_dumptx (di, b, dumpring);
1227 dma32_dumprx (di, b, dumpring);
1228 }
1229
1230 static void
1231 dma64_dumpring (dma_info_t * di, struct bcmstrbuf *b, dma64dd_t * ring,
1232 uint start, uint end, uint max_num)
1233 {
1234 uint i;
1235
1236 for (i = start; i != end; i = XXD ((i + 1), max_num))
1237 {
1238 /* in the format of high->low 16 bytes */
1239 bcm_bprintf (b, "ring index %d: 0x%x %x %x %x\n",
1240 i, ring[i].addrhigh, ring[i].addrlow, ring[i].ctrl2,
1241 ring[i].ctrl1);
1242 }
1243 }
1244
1245 static void
1246 dma64_dumptx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1247 {
1248 if (di->ntxd == 0)
1249 return;
1250
1251 bcm_bprintf (b, "DMA64: txd64 %p txdpa 0x%lx txp %p txin %d txout %d "
1252 "txavail %d\n", di->txd64, di->txdpa, di->txp, di->txin,
1253 di->txout, di->hnddma.txavail);
1254
1255 bcm_bprintf (b, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1256 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1257 R_REG (di->osh, &di->d64txregs->control),
1258 R_REG (di->osh, &di->d64txregs->addrlow),
1259 R_REG (di->osh, &di->d64txregs->addrhigh),
1260 R_REG (di->osh, &di->d64txregs->ptr),
1261 R_REG (di->osh, &di->d64txregs->status0),
1262 R_REG (di->osh, &di->d64txregs->status1));
1263
1264 if (dumpring && di->txd64)
1265 {
1266 dma64_dumpring (di, b, di->txd64, di->txin, di->txout, di->ntxd);
1267 }
1268 }
1269
1270 static void
1271 dma64_dumprx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1272 {
1273 if (di->nrxd == 0)
1274 return;
1275
1276 bcm_bprintf (b, "DMA64: rxd64 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1277 di->rxd64, di->rxdpa, di->rxp, di->rxin, di->rxout);
1278
1279 bcm_bprintf (b, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1280 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1281 R_REG (di->osh, &di->d64rxregs->control),
1282 R_REG (di->osh, &di->d64rxregs->addrlow),
1283 R_REG (di->osh, &di->d64rxregs->addrhigh),
1284 R_REG (di->osh, &di->d64rxregs->ptr),
1285 R_REG (di->osh, &di->d64rxregs->status0),
1286 R_REG (di->osh, &di->d64rxregs->status1));
1287 if (di->rxd64 && dumpring)
1288 {
1289 dma64_dumpring (di, b, di->rxd64, di->rxin, di->rxout, di->nrxd);
1290 }
1291 }
1292
1293 static void
1294 dma64_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1295 {
1296 dma64_dumptx (di, b, dumpring);
1297 dma64_dumprx (di, b, dumpring);
1298 }
1299
1300 #endif /* BCMDBG */
1301
1302
1303 /* 32 bits DMA functions */
1304 static void
1305 dma32_txinit (dma_info_t * di)
1306 {
1307 DMA_TRACE (("%s: dma_txinit\n", di->name));
1308
1309 if (di->ntxd == 0)
1310 return;
1311
1312 di->txin = di->txout = 0;
1313 di->hnddma.txavail = di->ntxd - 1;
1314
1315 /* clear tx descriptor ring */
1316 BZERO_SM ((void *) (uintptr) di->txd32, (di->ntxd * sizeof (dma32dd_t)));
1317 W_REG (di->osh, &di->d32txregs->control, XC_XE);
1318 _dma_ddtable_init (di, DMA_TX, di->txdpa);
1319 }
1320
1321 static bool
1322 dma32_txenabled (dma_info_t * di)
1323 {
1324 uint32 xc;
1325
1326 /* If the chip is dead, it is not enabled :-) */
1327 xc = R_REG (di->osh, &di->d32txregs->control);
1328 return ((xc != 0xffffffff) && (xc & XC_XE));
1329 }
1330
1331 static void
1332 dma32_txsuspend (dma_info_t * di)
1333 {
1334 DMA_TRACE (("%s: dma_txsuspend\n", di->name));
1335
1336 if (di->ntxd == 0)
1337 return;
1338
1339 OR_REG (di->osh, &di->d32txregs->control, XC_SE);
1340 }
1341
1342 static void
1343 dma32_txresume (dma_info_t * di)
1344 {
1345 DMA_TRACE (("%s: dma_txresume\n", di->name));
1346
1347 if (di->ntxd == 0)
1348 return;
1349
1350 AND_REG (di->osh, &di->d32txregs->control, ~XC_SE);
1351 }
1352
1353 static bool
1354 dma32_txsuspended (dma_info_t * di)
1355 {
1356 return (di->ntxd == 0)
1357 || ((R_REG (di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
1358 }
1359
1360 static void
1361 dma32_txreclaim (dma_info_t * di, bool forceall)
1362 {
1363 void *p;
1364
1365 DMA_TRACE (("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
1366
1367 while ((p = dma32_getnexttxp (di, forceall)))
1368 PKTFREE (di->osh, p, TRUE);
1369 }
1370
1371 static bool
1372 dma32_txstopped (dma_info_t * di)
1373 {
1374 return ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1375 XS_XS_STOPPED);
1376 }
1377
1378 static bool
1379 dma32_rxstopped (dma_info_t * di)
1380 {
1381 return ((R_REG (di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
1382 RS_RS_STOPPED);
1383 }
1384
1385 static bool
1386 dma32_alloc (dma_info_t * di, uint direction)
1387 {
1388 uint size;
1389 uint ddlen;
1390 void *va;
1391
1392 ddlen = sizeof (dma32dd_t);
1393
1394 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1395
1396 if (!ISALIGNED (DMA_CONSISTENT_ALIGN, D32RINGALIGN))
1397 size += D32RINGALIGN;
1398
1399
1400 if (direction == DMA_TX)
1401 {
1402 if ((va =
1403 DMA_ALLOC_CONSISTENT (di->osh, size, &di->txdpa,
1404 &di->tx_dmah)) == NULL)
1405 {
1406 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1407 di->name));
1408 return FALSE;
1409 }
1410
1411 di->txd32 = (dma32dd_t *) ROUNDUP ((uintptr) va, D32RINGALIGN);
1412 di->txdalign = (uint) ((int8 *) (uintptr) di->txd32 - (int8 *) va);
1413 di->txdpa += di->txdalign;
1414 di->txdalloc = size;
1415 ASSERT (ISALIGNED ((uintptr) di->txd32, D32RINGALIGN));
1416 }
1417 else
1418 {
1419 if ((va =
1420 DMA_ALLOC_CONSISTENT (di->osh, size, &di->rxdpa,
1421 &di->rx_dmah)) == NULL)
1422 {
1423 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1424 di->name));
1425 return FALSE;
1426 }
1427 di->rxd32 = (dma32dd_t *) ROUNDUP ((uintptr) va, D32RINGALIGN);
1428 di->rxdalign = (uint) ((int8 *) (uintptr) di->rxd32 - (int8 *) va);
1429 di->rxdpa += di->rxdalign;
1430 di->rxdalloc = size;
1431 ASSERT (ISALIGNED ((uintptr) di->rxd32, D32RINGALIGN));
1432 }
1433
1434 return TRUE;
1435 }
1436
1437 static bool
1438 dma32_txreset (dma_info_t * di)
1439 {
1440 uint32 status;
1441
1442 if (di->ntxd == 0)
1443 return TRUE;
1444
1445 /* suspend tx DMA first */
1446 W_REG (di->osh, &di->d32txregs->control, XC_SE);
1447 SPINWAIT (((status = (R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK))
1448 != XS_XS_DISABLED) &&
1449 (status != XS_XS_IDLE) && (status != XS_XS_STOPPED), (10000));
1450
1451 W_REG (di->osh, &di->d32txregs->control, 0);
1452 SPINWAIT (((status = (R_REG (di->osh,
1453 &di->d32txregs->status) & XS_XS_MASK)) !=
1454 XS_XS_DISABLED), 10000);
1455
1456 /* wait for the last transaction to complete */
1457 OSL_DELAY (300);
1458
1459 return (status == XS_XS_DISABLED);
1460 }
1461
1462 static bool
1463 dma32_rxidle (dma_info_t * di)
1464 {
1465 DMA_TRACE (("%s: dma_rxidle\n", di->name));
1466
1467 if (di->nrxd == 0)
1468 return TRUE;
1469
1470 return ((R_REG (di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
1471 R_REG (di->osh, &di->d32rxregs->ptr));
1472 }
1473
1474 static bool
1475 dma32_rxreset (dma_info_t * di)
1476 {
1477 uint32 status;
1478
1479 if (di->nrxd == 0)
1480 return TRUE;
1481
1482 W_REG (di->osh, &di->d32rxregs->control, 0);
1483 SPINWAIT (((status = (R_REG (di->osh,
1484 &di->d32rxregs->status) & RS_RS_MASK)) !=
1485 RS_RS_DISABLED), 10000);
1486
1487 return (status == RS_RS_DISABLED);
1488 }
1489
1490 static bool
1491 dma32_rxenabled (dma_info_t * di)
1492 {
1493 uint32 rc;
1494
1495 rc = R_REG (di->osh, &di->d32rxregs->control);
1496 return ((rc != 0xffffffff) && (rc & RC_RE));
1497 }
1498
1499 static bool
1500 dma32_txsuspendedidle (dma_info_t * di)
1501 {
1502 if (di->ntxd == 0)
1503 return TRUE;
1504
1505 if (!(R_REG (di->osh, &di->d32txregs->control) & XC_SE))
1506 return 0;
1507
1508 if ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
1509 return 0;
1510
1511 OSL_DELAY (2);
1512 return ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1513 XS_XS_IDLE);
1514 }
1515
1516 /* !! tx entry routine
1517 * supports full 32bit dma engine buffer addressing so
1518 * dma buffers can cross 4 Kbyte page boundaries.
1519 */
1520 static int
1521 dma32_txfast (dma_info_t * di, void *p0, bool commit)
1522 {
1523 void *p, *next;
1524 uchar *data;
1525 uint len;
1526 uint txout;
1527 uint32 flags = 0;
1528 uint32 pa;
1529
1530 DMA_TRACE (("%s: dma_txfast\n", di->name));
1531
1532 txout = di->txout;
1533
1534 /*
1535 * Walk the chain of packet buffers
1536 * allocating and initializing transmit descriptor entries.
1537 */
1538 for (p = p0; p; p = next)
1539 {
1540 data = PKTDATA (di->osh, p);
1541 len = PKTLEN (di->osh, p);
1542 next = PKTNEXT (di->osh, p);
1543
1544 /* return nonzero if out of tx descriptors */
1545 if (NEXTTXD (txout) == di->txin)
1546 goto outoftxd;
1547
1548 if (len == 0)
1549 continue;
1550
1551 /* get physical address of buffer start */
1552 pa =
1553 (uint32) DMA_MAP (di->osh, data, len, DMA_TX, p,
1554 &di->txp_dmah[txout]);
1555
1556 flags = 0;
1557 if (p == p0)
1558 flags |= CTRL_SOF;
1559 if (next == NULL)
1560 flags |= (CTRL_IOC | CTRL_EOF);
1561 if (txout == (di->ntxd - 1))
1562 flags |= CTRL_EOT;
1563
1564 dma32_dd_upd (di, di->txd32, pa, txout, &flags, len);
1565 ASSERT (di->txp[txout] == NULL);
1566
1567 txout = NEXTTXD (txout);
1568 }
1569
1570 /* if last txd eof not set, fix it */
1571 if (!(flags & CTRL_EOF))
1572 W_SM (&di->txd32[PREVTXD (txout)].ctrl,
1573 BUS_SWAP32 (flags | CTRL_IOC | CTRL_EOF));
1574
1575 /* save the packet */
1576 di->txp[PREVTXD (txout)] = p0;
1577
1578 /* bump the tx descriptor index */
1579 di->txout = txout;
1580
1581 /* kick the chip */
1582 if (commit)
1583 W_REG (di->osh, &di->d32txregs->ptr, I2B (txout, dma32dd_t));
1584
1585 /* tx flow control */
1586 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1587
1588 return (0);
1589
1590 outoftxd:
1591 DMA_ERROR (("%s: dma_txfast: out of txds\n", di->name));
1592 PKTFREE (di->osh, p0, TRUE);
1593 di->hnddma.txavail = 0;
1594 di->hnddma.txnobuf++;
1595 return (-1);
1596 }
1597
1598 /*
1599 * Reclaim next completed txd (txds if using chained buffers) and
1600 * return associated packet.
1601 * If 'force' is true, reclaim txd(s) and return associated packet
1602 * regardless of the value of the hardware "curr" pointer.
1603 */
1604 static void *
1605 dma32_getnexttxp (dma_info_t * di, bool forceall)
1606 {
1607 uint start, end, i;
1608 void *txp;
1609
1610 DMA_TRACE (("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
1611
1612 if (di->ntxd == 0)
1613 return (NULL);
1614
1615 txp = NULL;
1616
1617 start = di->txin;
1618 if (forceall)
1619 end = di->txout;
1620 else
1621 end =
1622 B2I (R_REG (di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
1623
1624 if ((start == 0) && (end > di->txout))
1625 goto bogus;
1626
1627 for (i = start; i != end && !txp; i = NEXTTXD (i))
1628 {
1629 DMA_UNMAP (di->osh,
1630 (BUS_SWAP32 (R_SM (&di->txd32[i].addr)) - di->dataoffsetlow),
1631 (BUS_SWAP32 (R_SM (&di->txd32[i].ctrl)) & CTRL_BC_MASK),
1632 DMA_TX, di->txp[i], &di->txp_dmah[i]);
1633
1634 W_SM (&di->txd32[i].addr, 0xdeadbeef);
1635 txp = di->txp[i];
1636 di->txp[i] = NULL;
1637 }
1638
1639 di->txin = i;
1640
1641 /* tx flow control */
1642 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1643
1644 return (txp);
1645
1646 bogus:
1647 /*
1648 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1649 start, end, di->txout, forceall));
1650 */
1651 return (NULL);
1652 }
1653
1654 static void *
1655 dma32_getnextrxp (dma_info_t * di, bool forceall)
1656 {
1657 uint i;
1658 void *rxp;
1659
1660 /* if forcing, dma engine must be disabled */
1661 ASSERT (!forceall || !dma32_rxenabled (di));
1662
1663 i = di->rxin;
1664
1665 /* return if no packets posted */
1666 if (i == di->rxout)
1667 return (NULL);
1668
1669 /* ignore curr if forceall */
1670 if (!forceall
1671 && (i ==
1672 B2I (R_REG (di->osh, &di->d32rxregs->status) & RS_CD_MASK,
1673 dma32dd_t)))
1674 return (NULL);
1675
1676 /* get the packet pointer that corresponds to the rx descriptor */
1677 rxp = di->rxp[i];
1678 ASSERT (rxp);
1679 di->rxp[i] = NULL;
1680
1681 /* clear this packet from the descriptor ring */
1682 DMA_UNMAP (di->osh,
1683 (BUS_SWAP32 (R_SM (&di->rxd32[i].addr)) - di->dataoffsetlow),
1684 di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1685
1686 W_SM (&di->rxd32[i].addr, 0xdeadbeef);
1687
1688 di->rxin = NEXTRXD (i);
1689
1690 return (rxp);
1691 }
1692
1693 /*
1694 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1695 */
1696 static void
1697 dma32_txrotate (dma_info_t * di)
1698 {
1699 uint ad;
1700 uint nactive;
1701 uint rot;
1702 uint old, new;
1703 uint32 w;
1704 uint first, last;
1705
1706 ASSERT (dma32_txsuspendedidle (di));
1707
1708 nactive = _dma_txactive (di);
1709 ad =
1710 B2I (((R_REG (di->osh, &di->d32txregs->status) & XS_AD_MASK) >>
1711 XS_AD_SHIFT), dma32dd_t);
1712 rot = TXD (ad - di->txin);
1713
1714 ASSERT (rot < di->ntxd);
1715
1716 /* full-ring case is a lot harder - don't worry about this */
1717 if (rot >= (di->ntxd - nactive))
1718 {
1719 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di->name));
1720 return;
1721 }
1722
1723 first = di->txin;
1724 last = PREVTXD (di->txout);
1725
1726 /* move entries starting at last and moving backwards to first */
1727 for (old = last; old != PREVTXD (first); old = PREVTXD (old))
1728 {
1729 new = TXD (old + rot);
1730
1731 /*
1732 * Move the tx dma descriptor.
1733 * EOT is set only in the last entry in the ring.
1734 */
1735 w = BUS_SWAP32 (R_SM (&di->txd32[old].ctrl)) & ~CTRL_EOT;
1736 if (new == (di->ntxd - 1))
1737 w |= CTRL_EOT;
1738 W_SM (&di->txd32[new].ctrl, BUS_SWAP32 (w));
1739 W_SM (&di->txd32[new].addr, R_SM (&di->txd32[old].addr));
1740
1741 /* zap the old tx dma descriptor address field */
1742 W_SM (&di->txd32[old].addr, BUS_SWAP32 (0xdeadbeef));
1743
1744 /* move the corresponding txp[] entry */
1745 ASSERT (di->txp[new] == NULL);
1746 di->txp[new] = di->txp[old];
1747 di->txp[old] = NULL;
1748 }
1749
1750 /* update txin and txout */
1751 di->txin = ad;
1752 di->txout = TXD (di->txout + rot);
1753 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1754
1755 /* kick the chip */
1756 W_REG (di->osh, &di->d32txregs->ptr, I2B (di->txout, dma32dd_t));
1757 }
1758
1759 /* 64 bits DMA functions */
1760
1761 #ifdef BCMDMA64
1762 static void
1763 dma64_txinit (dma_info_t * di)
1764 {
1765 DMA_TRACE (("%s: dma_txinit\n", di->name));
1766
1767 if (di->ntxd == 0)
1768 return;
1769
1770 di->txin = di->txout = 0;
1771 di->hnddma.txavail = di->ntxd - 1;
1772
1773 /* clear tx descriptor ring */
1774 BZERO_SM ((void *) (uintptr) di->txd64, (di->ntxd * sizeof (dma64dd_t)));
1775 W_REG (di->osh, &di->d64txregs->control, D64_XC_XE);
1776 _dma_ddtable_init (di, DMA_TX, di->txdpa);
1777 }
1778
1779 static bool
1780 dma64_txenabled (dma_info_t * di)
1781 {
1782 uint32 xc;
1783
1784 /* If the chip is dead, it is not enabled :-) */
1785 xc = R_REG (di->osh, &di->d64txregs->control);
1786 return ((xc != 0xffffffff) && (xc & D64_XC_XE));
1787 }
1788
1789 static void
1790 dma64_txsuspend (dma_info_t * di)
1791 {
1792 DMA_TRACE (("%s: dma_txsuspend\n", di->name));
1793
1794 if (di->ntxd == 0)
1795 return;
1796
1797 OR_REG (di->osh, &di->d64txregs->control, D64_XC_SE);
1798 }
1799
1800 static void
1801 dma64_txresume (dma_info_t * di)
1802 {
1803 DMA_TRACE (("%s: dma_txresume\n", di->name));
1804
1805 if (di->ntxd == 0)
1806 return;
1807
1808 AND_REG (di->osh, &di->d64txregs->control, ~D64_XC_SE);
1809 }
1810
1811 static bool
1812 dma64_txsuspended (dma_info_t * di)
1813 {
1814 return (di->ntxd == 0)
1815 || ((R_REG (di->osh, &di->d64txregs->control) & D64_XC_SE) == D64_XC_SE);
1816 }
1817
1818 static void
1819 dma64_txreclaim (dma_info_t * di, bool forceall)
1820 {
1821 void *p;
1822
1823 DMA_TRACE (("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
1824
1825 while ((p = dma64_getnexttxp (di, forceall)))
1826 PKTFREE (di->osh, p, TRUE);
1827 }
1828
1829 static bool
1830 dma64_txstopped (dma_info_t * di)
1831 {
1832 return ((R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1833 D64_XS0_XS_STOPPED);
1834 }
1835
1836 static bool
1837 dma64_rxstopped (dma_info_t * di)
1838 {
1839 return ((R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
1840 D64_RS0_RS_STOPPED);
1841 }
1842
1843 static bool
1844 dma64_alloc (dma_info_t * di, uint direction)
1845 {
1846 uint size;
1847 uint ddlen;
1848 uint32 alignbytes;
1849 void *va;
1850
1851 ddlen = sizeof (dma64dd_t);
1852
1853 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1854
1855 alignbytes = di->dma64align;
1856
1857 if (!ISALIGNED (DMA_CONSISTENT_ALIGN, alignbytes))
1858 size += alignbytes;
1859
1860 if (direction == DMA_TX)
1861 {
1862 if ((va =
1863 DMA_ALLOC_CONSISTENT (di->osh, size, &di->txdpa,
1864 &di->tx_dmah)) == NULL)
1865 {
1866 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1867 di->name));
1868 return FALSE;
1869 }
1870
1871 di->txd64 = (dma64dd_t *) ROUNDUP ((uintptr) va, alignbytes);
1872 di->txdalign = (uint) ((int8 *) (uintptr) di->txd64 - (int8 *) va);
1873 di->txdpa += di->txdalign;
1874 di->txdalloc = size;
1875 ASSERT (ISALIGNED ((uintptr) di->txd64, alignbytes));
1876 }
1877 else
1878 {
1879 if ((va =
1880 DMA_ALLOC_CONSISTENT (di->osh, size, &di->rxdpa,
1881 &di->rx_dmah)) == NULL)
1882 {
1883 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1884 di->name));
1885 return FALSE;
1886 }
1887 di->rxd64 = (dma64dd_t *) ROUNDUP ((uintptr) va, alignbytes);
1888 di->rxdalign = (uint) ((int8 *) (uintptr) di->rxd64 - (int8 *) va);
1889 di->rxdpa += di->rxdalign;
1890 di->rxdalloc = size;
1891 ASSERT (ISALIGNED ((uintptr) di->rxd64, alignbytes));
1892 }
1893
1894 return TRUE;
1895 }
1896
1897 static bool
1898 dma64_txreset (dma_info_t * di)
1899 {
1900 uint32 status;
1901
1902 if (di->ntxd == 0)
1903 return TRUE;
1904
1905 /* suspend tx DMA first */
1906 W_REG (di->osh, &di->d64txregs->control, D64_XC_SE);
1907 SPINWAIT (((status =
1908 (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) !=
1909 D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1910 && (status != D64_XS0_XS_STOPPED), 10000);
1911
1912 W_REG (di->osh, &di->d64txregs->control, 0);
1913 SPINWAIT (((status =
1914 (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) !=
1915 D64_XS0_XS_DISABLED), 10000);
1916
1917 /* wait for the last transaction to complete */
1918 OSL_DELAY (300);
1919
1920 return (status == D64_XS0_XS_DISABLED);
1921 }
1922
1923 static bool
1924 dma64_rxidle (dma_info_t * di)
1925 {
1926 DMA_TRACE (("%s: dma_rxidle\n", di->name));
1927
1928 if (di->nrxd == 0)
1929 return TRUE;
1930
1931 return ((R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1932 R_REG (di->osh, &di->d64rxregs->ptr));
1933 }
1934
1935 static bool
1936 dma64_rxreset (dma_info_t * di)
1937 {
1938 uint32 status;
1939
1940 if (di->nrxd == 0)
1941 return TRUE;
1942
1943 W_REG (di->osh, &di->d64rxregs->control, 0);
1944 SPINWAIT (((status =
1945 (R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK)) !=
1946 D64_RS0_RS_DISABLED), 10000);
1947
1948 return (status == D64_RS0_RS_DISABLED);
1949 }
1950
1951 static bool
1952 dma64_rxenabled (dma_info_t * di)
1953 {
1954 uint32 rc;
1955
1956 rc = R_REG (di->osh, &di->d64rxregs->control);
1957 return ((rc != 0xffffffff) && (rc & D64_RC_RE));
1958 }
1959
1960 static bool
1961 dma64_txsuspendedidle (dma_info_t * di)
1962 {
1963
1964 if (di->ntxd == 0)
1965 return TRUE;
1966
1967 if (!(R_REG (di->osh, &di->d64txregs->control) & D64_XC_SE))
1968 return 0;
1969
1970 if ((R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1971 D64_XS0_XS_IDLE)
1972 return 1;
1973
1974 return 0;
1975 }
1976
1977
1978 /* !! tx entry routine */
1979 static int
1980 dma64_txfast (dma_info_t * di, void *p0, bool commit)
1981 {
1982 void *p, *next;
1983 uchar *data;
1984 uint len;
1985 uint txout;
1986 uint32 flags = 0;
1987 uint32 pa;
1988
1989 DMA_TRACE (("%s: dma_txfast\n", di->name));
1990
1991 txout = di->txout;
1992
1993 /*
1994 * Walk the chain of packet buffers
1995 * allocating and initializing transmit descriptor entries.
1996 */
1997 for (p = p0; p; p = next)
1998 {
1999 data = PKTDATA (di->osh, p);
2000 len = PKTLEN (di->osh, p);
2001 next = PKTNEXT (di->osh, p);
2002
2003 /* return nonzero if out of tx descriptors */
2004 if (NEXTTXD (txout) == di->txin)
2005 goto outoftxd;
2006
2007 if (len == 0)
2008 continue;
2009
2010 /* get physical address of buffer start */
2011 pa =
2012 (uint32) DMA_MAP (di->osh, data, len, DMA_TX, p,
2013 &di->txp_dmah[txout]);
2014
2015 flags = 0;
2016 if (p == p0)
2017 flags |= D64_CTRL1_SOF;
2018 if (next == NULL)
2019 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
2020 if (txout == (di->ntxd - 1))
2021 flags |= D64_CTRL1_EOT;
2022
2023 dma64_dd_upd (di, di->txd64, pa, txout, &flags, len);
2024 ASSERT (di->txp[txout] == NULL);
2025
2026 txout = NEXTTXD (txout);
2027 }
2028
2029 /* if last txd eof not set, fix it */
2030 if (!(flags & D64_CTRL1_EOF))
2031 W_SM (&di->txd64[PREVTXD (txout)].ctrl1,
2032 BUS_SWAP32 (flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
2033
2034 /* save the packet */
2035 di->txp[PREVTXD (txout)] = p0;
2036
2037 /* bump the tx descriptor index */
2038 di->txout = txout;
2039
2040 /* kick the chip */
2041 if (commit)
2042 W_REG (di->osh, &di->d64txregs->ptr, I2B (txout, dma64dd_t));
2043
2044 /* tx flow control */
2045 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2046
2047 return (0);
2048
2049 outoftxd:
2050 DMA_ERROR (("%s: dma_txfast: out of txds\n", di->name));
2051 PKTFREE (di->osh, p0, TRUE);
2052 di->hnddma.txavail = 0;
2053 di->hnddma.txnobuf++;
2054 return (-1);
2055 }
2056
2057 /*
2058 * Reclaim next completed txd (txds if using chained buffers) and
2059 * return associated packet.
2060 * If 'force' is true, reclaim txd(s) and return associated packet
2061 * regardless of the value of the hardware "curr" pointer.
2062 */
2063 static void *
2064 dma64_getnexttxp (dma_info_t * di, bool forceall)
2065 {
2066 uint start, end, i;
2067 void *txp;
2068
2069 DMA_TRACE (("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
2070
2071 if (di->ntxd == 0)
2072 return (NULL);
2073
2074 txp = NULL;
2075
2076 start = di->txin;
2077 if (forceall)
2078 end = di->txout;
2079 else
2080 end =
2081 B2I (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK,
2082 dma64dd_t);
2083
2084 if ((start == 0) && (end > di->txout))
2085 goto bogus;
2086
2087 for (i = start; i != end && !txp; i = NEXTTXD (i))
2088 {
2089 DMA_UNMAP (di->osh,
2090 (BUS_SWAP32 (R_SM (&di->txd64[i].addrlow)) -
2091 di->dataoffsetlow),
2092 (BUS_SWAP32 (R_SM (&di->txd64[i].ctrl2)) &
2093 D64_CTRL2_BC_MASK), DMA_TX, di->txp[i], &di->txp_dmah[i]);
2094
2095 W_SM (&di->txd64[i].addrlow, 0xdeadbeef);
2096 W_SM (&di->txd64[i].addrhigh, 0xdeadbeef);
2097
2098 txp = di->txp[i];
2099 di->txp[i] = NULL;
2100 }
2101
2102 di->txin = i;
2103
2104 /* tx flow control */
2105 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2106
2107 return (txp);
2108
2109 bogus:
2110 /*
2111 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2112 start, end, di->txout, forceall));
2113 */
2114 return (NULL);
2115 }
2116
2117 static void *
2118 dma64_getnextrxp (dma_info_t * di, bool forceall)
2119 {
2120 uint i;
2121 void *rxp;
2122
2123 /* if forcing, dma engine must be disabled */
2124 ASSERT (!forceall || !dma64_rxenabled (di));
2125
2126 i = di->rxin;
2127
2128 /* return if no packets posted */
2129 if (i == di->rxout)
2130 return (NULL);
2131
2132 /* ignore curr if forceall */
2133 if (!forceall &&
2134 (i ==
2135 B2I (R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK,
2136 dma64dd_t)))
2137 return (NULL);
2138
2139 /* get the packet pointer that corresponds to the rx descriptor */
2140 rxp = di->rxp[i];
2141 ASSERT (rxp);
2142 di->rxp[i] = NULL;
2143
2144 /* clear this packet from the descriptor ring */
2145 DMA_UNMAP (di->osh,
2146 (BUS_SWAP32 (R_SM (&di->rxd64[i].addrlow)) - di->dataoffsetlow),
2147 di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
2148
2149 W_SM (&di->rxd64[i].addrlow, 0xdeadbeef);
2150 W_SM (&di->rxd64[i].addrhigh, 0xdeadbeef);
2151
2152 di->rxin = NEXTRXD (i);
2153
2154 return (rxp);
2155 }
2156
2157 static bool
2158 _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs)
2159 {
2160 uint32 w;
2161 OR_REG (osh, &dma64regs->control, D64_XC_AE);
2162 w = R_REG (osh, &dma64regs->control);
2163 AND_REG (osh, &dma64regs->control, ~D64_XC_AE);
2164 return ((w & D64_XC_AE) == D64_XC_AE);
2165 }
2166
2167 /*
2168 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2169 */
2170 static void
2171 dma64_txrotate (dma_info_t * di)
2172 {
2173 uint ad;
2174 uint nactive;
2175 uint rot;
2176 uint old, new;
2177 uint32 w;
2178 uint first, last;
2179
2180 ASSERT (dma64_txsuspendedidle (di));
2181
2182 nactive = _dma_txactive (di);
2183 ad =
2184 B2I ((R_REG (di->osh, &di->d64txregs->status1) & D64_XS1_AD_MASK),
2185 dma64dd_t);
2186 rot = TXD (ad - di->txin);
2187
2188 ASSERT (rot < di->ntxd);
2189
2190 /* full-ring case is a lot harder - don't worry about this */
2191 if (rot >= (di->ntxd - nactive))
2192 {
2193 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di->name));
2194 return;
2195 }
2196
2197 first = di->txin;
2198 last = PREVTXD (di->txout);
2199
2200 /* move entries starting at last and moving backwards to first */
2201 for (old = last; old != PREVTXD (first); old = PREVTXD (old))
2202 {
2203 new = TXD (old + rot);
2204
2205 /*
2206 * Move the tx dma descriptor.
2207 * EOT is set only in the last entry in the ring.
2208 */
2209 w = BUS_SWAP32 (R_SM (&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
2210 if (new == (di->ntxd - 1))
2211 w |= D64_CTRL1_EOT;
2212 W_SM (&di->txd64[new].ctrl1, BUS_SWAP32 (w));
2213
2214 w = BUS_SWAP32 (R_SM (&di->txd64[old].ctrl2));
2215 W_SM (&di->txd64[new].ctrl2, BUS_SWAP32 (w));
2216
2217 W_SM (&di->txd64[new].addrlow, R_SM (&di->txd64[old].addrlow));
2218 W_SM (&di->txd64[new].addrhigh, R_SM (&di->txd64[old].addrhigh));
2219
2220 /* zap the old tx dma descriptor address field */
2221 W_SM (&di->txd64[old].addrlow, BUS_SWAP32 (0xdeadbeef));
2222 W_SM (&di->txd64[old].addrhigh, BUS_SWAP32 (0xdeadbeef));
2223
2224 /* move the corresponding txp[] entry */
2225 ASSERT (di->txp[new] == NULL);
2226 di->txp[new] = di->txp[old];
2227 di->txp[old] = NULL;
2228 }
2229
2230 /* update txin and txout */
2231 di->txin = ad;
2232 di->txout = TXD (di->txout + rot);
2233 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2234
2235 /* kick the chip */
2236 W_REG (di->osh, &di->d64txregs->ptr, I2B (di->txout, dma64dd_t));
2237 }
2238
2239 #endif /* BCMDMA64 */
2240
2241 uint
2242 dma_addrwidth (sb_t * sbh, void *dmaregs)
2243 {
2244 dma32regs_t *dma32regs;
2245 osl_t *osh;
2246
2247 osh = sb_osh (sbh);
2248
2249 if (DMA64_CAP)
2250 {
2251 /* DMA engine is 64-bit capable */
2252 if (((sb_coreflagshi (sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64))
2253 {
2254 /* backplane are 64 bits capable */
2255 if (sb_backplane64 (sbh))
2256 /* If bus is System Backplane or PCIE then we can access 64-bits */
2257 if ((BUSTYPE (sbh->bustype) == SB_BUS) ||
2258 ((BUSTYPE (sbh->bustype) == PCI_BUS) &&
2259 sbh->buscoretype == SB_PCIE))
2260 return (DMADDRWIDTH_64);
2261
2262 /* DMA64 is always 32 bits capable, AE is always TRUE */
2263 #ifdef BCMDMA64
2264 ASSERT (_dma64_addrext (osh, (dma64regs_t *) dmaregs));
2265 #endif
2266 return (DMADDRWIDTH_32);
2267 }
2268 }
2269
2270 /* Start checking for 32-bit / 30-bit addressing */
2271 dma32regs = (dma32regs_t *) dmaregs;
2272
2273 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2274 if ((BUSTYPE (sbh->bustype) == SB_BUS) ||
2275 ((BUSTYPE (sbh->bustype) == PCI_BUS) && sbh->buscoretype == SB_PCIE) ||
2276 (_dma32_addrext (osh, dma32regs)))
2277 return (DMADDRWIDTH_32);
2278
2279 /* Fallthru */
2280 return (DMADDRWIDTH_30);
2281 }
This page took 0.140646 seconds and 5 git commands to generate.