openssl: remove the makefile timestamp check, it breaks on some host systems
[openwrt.git] / package / broadcom-wl / src / driver / hnddma.c
1 /*
2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
4 *
5 * Copyright 2007, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 *
13 */
14
15 #include <typedefs.h>
16 #include <bcmdefs.h>
17 #include <osl.h>
18 #include "linux_osl.h"
19 #include <bcmendian.h>
20 #include <sbconfig.h>
21 #include "bcmutils.h"
22 #include <bcmdevs.h>
23 #include <sbutils.h>
24
25 #include "sbhnddma.h"
26 #include "hnddma.h"
27
28 /* debug/trace */
29 #ifdef BCMDBG
30 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
31 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
32 #else
33 #define DMA_ERROR(args)
34 #define DMA_TRACE(args)
35 #endif
36
37 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
38 static uint dma_msg_level = 0;
39
40 #define MAXNAMEL 8 /* 8 char names */
41
42 #define DI_INFO(dmah) (dma_info_t *)dmah
43 typedef struct osl_dmainfo osldma_t;
44
45 /* dma engine software state */
46 typedef struct dma_info
47 {
48 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
49 * which could be const
50 */
51 uint *msg_level; /* message level pointer */
52 char name[MAXNAMEL]; /* callers name for diag msgs */
53
54 void *osh; /* os handle */
55 sb_t *sbh; /* sb handle */
56
57 bool dma64; /* dma64 enabled */
58 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
59
60 dma32regs_t *d32txregs; /* 32 bits dma tx engine registers */
61 dma32regs_t *d32rxregs; /* 32 bits dma rx engine registers */
62 dma64regs_t *d64txregs; /* 64 bits dma tx engine registers */
63 dma64regs_t *d64rxregs; /* 64 bits dma rx engine registers */
64
65 uint32 dma64align; /* either 8k or 4k depends on number of dd */
66 dma32dd_t *txd32; /* pointer to dma32 tx descriptor ring */
67 dma64dd_t *txd64; /* pointer to dma64 tx descriptor ring */
68 uint ntxd; /* # tx descriptors tunable */
69 uint txin; /* index of next descriptor to reclaim */
70 uint txout; /* index of next descriptor to post */
71 void **txp; /* pointer to parallel array of pointers to packets */
72 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
73 osldma_t **txp_dmah; /* DMA TX packet data handle */
74 ulong txdpa; /* physical address of descriptor ring */
75 uint txdalign; /* #bytes added to alloc'd mem to align txd */
76 uint txdalloc; /* #bytes allocated for the ring */
77
78 dma32dd_t *rxd32; /* pointer to dma32 rx descriptor ring */
79 dma64dd_t *rxd64; /* pointer to dma64 rx descriptor ring */
80 uint nrxd; /* # rx descriptors tunable */
81 uint rxin; /* index of next descriptor to reclaim */
82 uint rxout; /* index of next descriptor to post */
83 void **rxp; /* pointer to parallel array of pointers to packets */
84 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
85 osldma_t **rxp_dmah; /* DMA RX packet data handle */
86 ulong rxdpa; /* physical address of descriptor ring */
87 uint rxdalign; /* #bytes added to alloc'd mem to align rxd */
88 uint rxdalloc; /* #bytes allocated for the ring */
89
90 /* tunables */
91 uint rxbufsize; /* rx buffer size in bytes,
92 not including the extra headroom
93 */
94 uint nrxpost; /* # rx buffers to keep posted */
95 uint rxoffset; /* rxcontrol offset */
96 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
97 uint ddoffsethigh; /* high 32 bits */
98 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
99 uint dataoffsethigh; /* high 32 bits */
100 } dma_info_t;
101
102 #ifdef BCMDMA64
103 #define DMA64_ENAB(di) ((di)->dma64)
104 #define DMA64_CAP TRUE
105 #else
106 #define DMA64_ENAB(di) (0)
107 #define DMA64_CAP FALSE
108 #endif
109
110 /* descriptor bumping macros */
111 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
112 #define TXD(x) XXD((x), di->ntxd)
113 #define RXD(x) XXD((x), di->nrxd)
114 #define NEXTTXD(i) TXD(i + 1)
115 #define PREVTXD(i) TXD(i - 1)
116 #define NEXTRXD(i) RXD(i + 1)
117 #define NTXDACTIVE(h, t) TXD(t - h)
118 #define NRXDACTIVE(h, t) RXD(t - h)
119
120 /* macros to convert between byte offsets and indexes */
121 #define B2I(bytes, type) ((bytes) / sizeof(type))
122 #define I2B(index, type) ((index) * sizeof(type))
123
124 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
125 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
126
127
128 /* common prototypes */
129 static bool _dma_isaddrext (dma_info_t * di);
130 static bool _dma_alloc (dma_info_t * di, uint direction);
131 static void _dma_detach (dma_info_t * di);
132 static void _dma_ddtable_init (dma_info_t * di, uint direction, ulong pa);
133 static void _dma_rxinit (dma_info_t * di);
134 static void *_dma_rx (dma_info_t * di);
135 static void _dma_rxfill (dma_info_t * di);
136 static void _dma_rxreclaim (dma_info_t * di);
137 static void _dma_rxenable (dma_info_t * di);
138 static void *_dma_getnextrxp (dma_info_t * di, bool forceall);
139
140 static void _dma_txblock (dma_info_t * di);
141 static void _dma_txunblock (dma_info_t * di);
142 static uint _dma_txactive (dma_info_t * di);
143
144 static void *_dma_peeknexttxp (dma_info_t * di);
145 static uintptr _dma_getvar (dma_info_t * di, const char *name);
146 static void _dma_counterreset (dma_info_t * di);
147 static void _dma_fifoloopbackenable (dma_info_t * di);
148
149 /* ** 32 bit DMA prototypes */
150 static bool dma32_alloc (dma_info_t * di, uint direction);
151 static bool dma32_txreset (dma_info_t * di);
152 static bool dma32_rxreset (dma_info_t * di);
153 static bool dma32_txsuspendedidle (dma_info_t * di);
154 static int dma32_txfast (dma_info_t * di, void *p0, bool commit);
155 static void *dma32_getnexttxp (dma_info_t * di, bool forceall);
156 static void *dma32_getnextrxp (dma_info_t * di, bool forceall);
157 static void dma32_txrotate (dma_info_t * di);
158 static bool dma32_rxidle (dma_info_t * di);
159 static void dma32_txinit (dma_info_t * di);
160 static bool dma32_txenabled (dma_info_t * di);
161 static void dma32_txsuspend (dma_info_t * di);
162 static void dma32_txresume (dma_info_t * di);
163 static bool dma32_txsuspended (dma_info_t * di);
164 static void dma32_txreclaim (dma_info_t * di, bool forceall);
165 static bool dma32_txstopped (dma_info_t * di);
166 static bool dma32_rxstopped (dma_info_t * di);
167 static bool dma32_rxenabled (dma_info_t * di);
168 static bool _dma32_addrext (osl_t * osh, dma32regs_t * dma32regs);
169
170 /* ** 64 bit DMA prototypes and stubs */
171 #ifdef BCMDMA64
172 static bool dma64_alloc (dma_info_t * di, uint direction);
173 static bool dma64_txreset (dma_info_t * di);
174 static bool dma64_rxreset (dma_info_t * di);
175 static bool dma64_txsuspendedidle (dma_info_t * di);
176 static int dma64_txfast (dma_info_t * di, void *p0, bool commit);
177 static void *dma64_getnexttxp (dma_info_t * di, bool forceall);
178 static void *dma64_getnextrxp (dma_info_t * di, bool forceall);
179 static void dma64_txrotate (dma_info_t * di);
180
181 static bool dma64_rxidle (dma_info_t * di);
182 static void dma64_txinit (dma_info_t * di);
183 static bool dma64_txenabled (dma_info_t * di);
184 static void dma64_txsuspend (dma_info_t * di);
185 static void dma64_txresume (dma_info_t * di);
186 static bool dma64_txsuspended (dma_info_t * di);
187 static void dma64_txreclaim (dma_info_t * di, bool forceall);
188 static bool dma64_txstopped (dma_info_t * di);
189 static bool dma64_rxstopped (dma_info_t * di);
190 static bool dma64_rxenabled (dma_info_t * di);
191 static bool _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs);
192
193 #else
194 static bool
195 dma64_alloc (dma_info_t * di, uint direction)
196 {
197 return FALSE;
198 }
199 static bool
200 dma64_txreset (dma_info_t * di)
201 {
202 return FALSE;
203 }
204 static bool
205 dma64_rxreset (dma_info_t * di)
206 {
207 return FALSE;
208 }
209 static bool
210 dma64_txsuspendedidle (dma_info_t * di)
211 {
212 return FALSE;
213 }
214 static int
215 dma64_txfast (dma_info_t * di, void *p0, bool commit)
216 {
217 return 0;
218 }
219 static void *
220 dma64_getnexttxp (dma_info_t * di, bool forceall)
221 {
222 return NULL;
223 }
224 static void *
225 dma64_getnextrxp (dma_info_t * di, bool forceall)
226 {
227 return NULL;
228 }
229 static void
230 dma64_txrotate (dma_info_t * di)
231 {
232 return;
233 }
234
235 static bool
236 dma64_rxidle (dma_info_t * di)
237 {
238 return FALSE;
239 }
240 static void
241 dma64_txinit (dma_info_t * di)
242 {
243 return;
244 }
245 static bool
246 dma64_txenabled (dma_info_t * di)
247 {
248 return FALSE;
249 }
250 static void
251 dma64_txsuspend (dma_info_t * di)
252 {
253 return;
254 }
255 static void
256 dma64_txresume (dma_info_t * di)
257 {
258 return;
259 }
260 static bool
261 dma64_txsuspended (dma_info_t * di)
262 {
263 return FALSE;
264 }
265 static void
266 dma64_txreclaim (dma_info_t * di, bool forceall)
267 {
268 return;
269 }
270 static bool
271 dma64_txstopped (dma_info_t * di)
272 {
273 return FALSE;
274 }
275 static bool
276 dma64_rxstopped (dma_info_t * di)
277 {
278 return FALSE;
279 }
280 static bool
281 dma64_rxenabled (dma_info_t * di)
282 {
283 return FALSE;
284 }
285 static bool
286 _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs)
287 {
288 return FALSE;
289 }
290
291 #endif /* BCMDMA64 */
292
293 #ifdef BCMDBG
294 static void dma32_dumpring (dma_info_t * di, struct bcmstrbuf *b,
295 dma32dd_t * ring, uint start, uint end,
296 uint max_num);
297 static void dma32_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring);
298 static void dma32_dumptx (dma_info_t * di, struct bcmstrbuf *b,
299 bool dumpring);
300 static void dma32_dumprx (dma_info_t * di, struct bcmstrbuf *b,
301 bool dumpring);
302
303 static void dma64_dumpring (dma_info_t * di, struct bcmstrbuf *b,
304 dma64dd_t * ring, uint start, uint end,
305 uint max_num);
306 static void dma64_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring);
307 static void dma64_dumptx (dma_info_t * di, struct bcmstrbuf *b,
308 bool dumpring);
309 static void dma64_dumprx (dma_info_t * di, struct bcmstrbuf *b,
310 bool dumpring);
311 #endif
312
313
314 static di_fcn_t dma64proc = {
315 (di_detach_t) _dma_detach,
316 (di_txinit_t) dma64_txinit,
317 (di_txreset_t) dma64_txreset,
318 (di_txenabled_t) dma64_txenabled,
319 (di_txsuspend_t) dma64_txsuspend,
320 (di_txresume_t) dma64_txresume,
321 (di_txsuspended_t) dma64_txsuspended,
322 (di_txsuspendedidle_t) dma64_txsuspendedidle,
323 (di_txfast_t) dma64_txfast,
324 (di_txstopped_t) dma64_txstopped,
325 (di_txreclaim_t) dma64_txreclaim,
326 (di_getnexttxp_t) dma64_getnexttxp,
327 (di_peeknexttxp_t) _dma_peeknexttxp,
328 (di_txblock_t) _dma_txblock,
329 (di_txunblock_t) _dma_txunblock,
330 (di_txactive_t) _dma_txactive,
331 (di_txrotate_t) dma64_txrotate,
332
333 (di_rxinit_t) _dma_rxinit,
334 (di_rxreset_t) dma64_rxreset,
335 (di_rxidle_t) dma64_rxidle,
336 (di_rxstopped_t) dma64_rxstopped,
337 (di_rxenable_t) _dma_rxenable,
338 (di_rxenabled_t) dma64_rxenabled,
339 (di_rx_t) _dma_rx,
340 (di_rxfill_t) _dma_rxfill,
341 (di_rxreclaim_t) _dma_rxreclaim,
342 (di_getnextrxp_t) _dma_getnextrxp,
343
344 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
345 (di_getvar_t) _dma_getvar,
346 (di_counterreset_t) _dma_counterreset,
347
348 #ifdef BCMDBG
349 (di_dump_t) dma64_dump,
350 (di_dumptx_t) dma64_dumptx,
351 (di_dumprx_t) dma64_dumprx,
352 #else
353 NULL,
354 NULL,
355 NULL,
356 #endif
357 34
358 };
359
360 static di_fcn_t dma32proc = {
361 (di_detach_t) _dma_detach,
362 (di_txinit_t) dma32_txinit,
363 (di_txreset_t) dma32_txreset,
364 (di_txenabled_t) dma32_txenabled,
365 (di_txsuspend_t) dma32_txsuspend,
366 (di_txresume_t) dma32_txresume,
367 (di_txsuspended_t) dma32_txsuspended,
368 (di_txsuspendedidle_t) dma32_txsuspendedidle,
369 (di_txfast_t) dma32_txfast,
370 (di_txstopped_t) dma32_txstopped,
371 (di_txreclaim_t) dma32_txreclaim,
372 (di_getnexttxp_t) dma32_getnexttxp,
373 (di_peeknexttxp_t) _dma_peeknexttxp,
374 (di_txblock_t) _dma_txblock,
375 (di_txunblock_t) _dma_txunblock,
376 (di_txactive_t) _dma_txactive,
377 (di_txrotate_t) dma32_txrotate,
378
379 (di_rxinit_t) _dma_rxinit,
380 (di_rxreset_t) dma32_rxreset,
381 (di_rxidle_t) dma32_rxidle,
382 (di_rxstopped_t) dma32_rxstopped,
383 (di_rxenable_t) _dma_rxenable,
384 (di_rxenabled_t) dma32_rxenabled,
385 (di_rx_t) _dma_rx,
386 (di_rxfill_t) _dma_rxfill,
387 (di_rxreclaim_t) _dma_rxreclaim,
388 (di_getnextrxp_t) _dma_getnextrxp,
389
390 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
391 (di_getvar_t) _dma_getvar,
392 (di_counterreset_t) _dma_counterreset,
393
394 #ifdef BCMDBG
395 (di_dump_t) dma32_dump,
396 (di_dumptx_t) dma32_dumptx,
397 (di_dumprx_t) dma32_dumprx,
398 #else
399 NULL,
400 NULL,
401 NULL,
402 #endif
403 34
404 };
405
406 hnddma_t *
407 dma_attach (osl_t * osh, char *name, sb_t * sbh, void *dmaregstx,
408 void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
409 uint nrxpost, uint rxoffset, uint * msg_level)
410 {
411 dma_info_t *di;
412 uint size;
413
414 /* allocate private info structure */
415 if ((di = MALLOC (osh, sizeof (dma_info_t))) == NULL)
416 {
417 #ifdef BCMDBG
418 printf ("dma_attach: out of memory, malloced %d bytes\n",
419 MALLOCED (osh));
420 #endif
421 return (NULL);
422 }
423 bzero ((char *) di, sizeof (dma_info_t));
424
425 di->msg_level = msg_level ? msg_level : &dma_msg_level;
426
427 /* old chips w/o sb is no longer supported */
428 ASSERT (sbh != NULL);
429
430 di->dma64 = ((sb_coreflagshi (sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64);
431
432 #ifndef BCMDMA64
433 if (di->dma64)
434 {
435 DMA_ERROR (("dma_attach: driver doesn't have the capability to support "
436 "64 bits DMA\n"));
437 goto fail;
438 }
439 #endif
440
441 /* check arguments */
442 ASSERT (ISPOWEROF2 (ntxd));
443 ASSERT (ISPOWEROF2 (nrxd));
444 if (nrxd == 0)
445 ASSERT (dmaregsrx == NULL);
446 if (ntxd == 0)
447 ASSERT (dmaregstx == NULL);
448
449
450 /* init dma reg pointer */
451 if (di->dma64)
452 {
453 ASSERT (ntxd <= D64MAXDD);
454 ASSERT (nrxd <= D64MAXDD);
455 di->d64txregs = (dma64regs_t *) dmaregstx;
456 di->d64rxregs = (dma64regs_t *) dmaregsrx;
457
458 di->dma64align = D64RINGALIGN;
459 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2))
460 {
461 /* for smaller dd table, HW relax the alignment requirement */
462 di->dma64align = D64RINGALIGN / 2;
463 }
464 }
465 else
466 {
467 ASSERT (ntxd <= D32MAXDD);
468 ASSERT (nrxd <= D32MAXDD);
469 di->d32txregs = (dma32regs_t *) dmaregstx;
470 di->d32rxregs = (dma32regs_t *) dmaregsrx;
471 }
472
473 DMA_TRACE (("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (di->dma64 ? "DMA64" : "DMA32"), osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, dmaregstx, dmaregsrx));
474
475 /* make a private copy of our callers name */
476 strncpy (di->name, name, MAXNAMEL);
477 di->name[MAXNAMEL - 1] = '\0';
478
479 di->osh = osh;
480 di->sbh = sbh;
481
482 /* save tunables */
483 di->ntxd = ntxd;
484 di->nrxd = nrxd;
485
486 /* the actual dma size doesn't include the extra headroom */
487 if (rxbufsize > BCMEXTRAHDROOM)
488 di->rxbufsize = rxbufsize - BCMEXTRAHDROOM;
489 else
490 di->rxbufsize = rxbufsize;
491
492 di->nrxpost = nrxpost;
493 di->rxoffset = rxoffset;
494
495 /*
496 * figure out the DMA physical address offset for dd and data
497 * for old chips w/o sb, use zero
498 * for new chips w sb,
499 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
500 * Other bus: use zero
501 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
502 */
503 di->ddoffsetlow = 0;
504 di->dataoffsetlow = 0;
505 /* for pci bus, add offset */
506 if (sbh->bustype == PCI_BUS)
507 {
508 if ((sbh->buscoretype == SB_PCIE) && di->dma64)
509 {
510 /* pcie with DMA64 */
511 di->ddoffsetlow = 0;
512 di->ddoffsethigh = SB_PCIE_DMA_H32;
513 }
514 else
515 {
516 /* pci(DMA32/DMA64) or pcie with DMA32 */
517 di->ddoffsetlow = SB_PCI_DMA;
518 di->ddoffsethigh = 0;
519 }
520 di->dataoffsetlow = di->ddoffsetlow;
521 di->dataoffsethigh = di->ddoffsethigh;
522 }
523
524 #if defined(__mips__) && defined(IL_BIGENDIAN)
525 di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED;
526 #endif
527
528 di->addrext = _dma_isaddrext (di);
529
530 /* allocate tx packet pointer vector */
531 if (ntxd)
532 {
533 size = ntxd * sizeof (void *);
534 if ((di->txp = MALLOC (osh, size)) == NULL)
535 {
536 DMA_ERROR (("%s: dma_attach: out of tx memory, malloced %d bytes\n",
537 di->name, MALLOCED (osh)));
538 goto fail;
539 }
540 bzero ((char *) di->txp, size);
541 }
542
543 /* allocate rx packet pointer vector */
544 if (nrxd)
545 {
546 size = nrxd * sizeof (void *);
547 if ((di->rxp = MALLOC (osh, size)) == NULL)
548 {
549 DMA_ERROR (("%s: dma_attach: out of rx memory, malloced %d bytes\n",
550 di->name, MALLOCED (osh)));
551 goto fail;
552 }
553 bzero ((char *) di->rxp, size);
554 }
555
556 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
557 if (ntxd)
558 {
559 if (!_dma_alloc (di, DMA_TX))
560 goto fail;
561 }
562
563 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
564 if (nrxd)
565 {
566 if (!_dma_alloc (di, DMA_RX))
567 goto fail;
568 }
569
570 if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ)
571 && !di->addrext)
572 {
573 DMA_ERROR (("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
574 di->name, di->txdpa));
575 goto fail;
576 }
577 if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ)
578 && !di->addrext)
579 {
580 DMA_ERROR (("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
581 di->name, di->rxdpa));
582 goto fail;
583 }
584
585 DMA_TRACE (("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
586
587 /* allocate tx packet pointer vector and DMA mapping vectors */
588 if (ntxd)
589 {
590
591 size = ntxd * sizeof (osldma_t **);
592 if ((di->txp_dmah = (osldma_t **) MALLOC (osh, size)) == NULL)
593 goto fail;
594 bzero ((char *) di->txp_dmah, size);
595 }
596 else
597 di->txp_dmah = NULL;
598
599 /* allocate rx packet pointer vector and DMA mapping vectors */
600 if (nrxd)
601 {
602
603 size = nrxd * sizeof (osldma_t **);
604 if ((di->rxp_dmah = (osldma_t **) MALLOC (osh, size)) == NULL)
605 goto fail;
606 bzero ((char *) di->rxp_dmah, size);
607
608 }
609 else
610 di->rxp_dmah = NULL;
611
612 /* initialize opsvec of function pointers */
613 di->hnddma.di_fn = DMA64_ENAB (di) ? dma64proc : dma32proc;
614
615 return ((hnddma_t *) di);
616
617 fail:
618 _dma_detach (di);
619 return (NULL);
620 }
621
622 /* init the tx or rx descriptor */
623 static INLINE void
624 dma32_dd_upd (dma_info_t * di, dma32dd_t * ddring, ulong pa, uint outidx,
625 uint32 * flags, uint32 bufcount)
626 {
627 /* dma32 uses 32 bits control to fit both flags and bufcounter */
628 *flags = *flags | (bufcount & CTRL_BC_MASK);
629
630 if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
631 {
632 W_SM (&ddring[outidx].addr, BUS_SWAP32 (pa + di->dataoffsetlow));
633 W_SM (&ddring[outidx].ctrl, BUS_SWAP32 (*flags));
634 }
635 else
636 {
637 /* address extension */
638 uint32 ae;
639 ASSERT (di->addrext);
640 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
641 pa &= ~PCI32ADDR_HIGH;
642
643 *flags |= (ae << CTRL_AE_SHIFT);
644 W_SM (&ddring[outidx].addr, BUS_SWAP32 (pa + di->dataoffsetlow));
645 W_SM (&ddring[outidx].ctrl, BUS_SWAP32 (*flags));
646 }
647 }
648
649 static INLINE void
650 dma64_dd_upd (dma_info_t * di, dma64dd_t * ddring, ulong pa, uint outidx,
651 uint32 * flags, uint32 bufcount)
652 {
653 uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
654
655 /* PCI bus with big(>1G) physical address, use address extension */
656 if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
657 {
658 W_SM (&ddring[outidx].addrlow, BUS_SWAP32 (pa + di->dataoffsetlow));
659 W_SM (&ddring[outidx].addrhigh, BUS_SWAP32 (0 + di->dataoffsethigh));
660 W_SM (&ddring[outidx].ctrl1, BUS_SWAP32 (*flags));
661 W_SM (&ddring[outidx].ctrl2, BUS_SWAP32 (ctrl2));
662 }
663 else
664 {
665 /* address extension */
666 uint32 ae;
667 ASSERT (di->addrext);
668
669 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
670 pa &= ~PCI32ADDR_HIGH;
671
672 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
673 W_SM (&ddring[outidx].addrlow, BUS_SWAP32 (pa + di->dataoffsetlow));
674 W_SM (&ddring[outidx].addrhigh, BUS_SWAP32 (0 + di->dataoffsethigh));
675 W_SM (&ddring[outidx].ctrl1, BUS_SWAP32 (*flags));
676 W_SM (&ddring[outidx].ctrl2, BUS_SWAP32 (ctrl2));
677 }
678 }
679
680 static bool
681 _dma32_addrext (osl_t * osh, dma32regs_t * dma32regs)
682 {
683 uint32 w;
684
685 OR_REG (osh, &dma32regs->control, XC_AE);
686 w = R_REG (osh, &dma32regs->control);
687 AND_REG (osh, &dma32regs->control, ~XC_AE);
688 return ((w & XC_AE) == XC_AE);
689 }
690
691 static bool
692 _dma_alloc (dma_info_t * di, uint direction)
693 {
694 if (DMA64_ENAB (di))
695 {
696 return dma64_alloc (di, direction);
697 }
698 else
699 {
700 return dma32_alloc (di, direction);
701 }
702 }
703
704 /* !! may be called with core in reset */
705 static void
706 _dma_detach (dma_info_t * di)
707 {
708 if (di == NULL)
709 return;
710
711 DMA_TRACE (("%s: dma_detach\n", di->name));
712
713 /* shouldn't be here if descriptors are unreclaimed */
714 ASSERT (di->txin == di->txout);
715 ASSERT (di->rxin == di->rxout);
716
717 /* free dma descriptor rings */
718 if (DMA64_ENAB (di))
719 {
720 if (di->txd64)
721 DMA_FREE_CONSISTENT (di->osh,
722 ((int8 *) (uintptr) di->txd64 - di->txdalign),
723 di->txdalloc, (di->txdpa - di->txdalign),
724 &di->tx_dmah);
725 if (di->rxd64)
726 DMA_FREE_CONSISTENT (di->osh,
727 ((int8 *) (uintptr) di->rxd64 - di->rxdalign),
728 di->rxdalloc, (di->rxdpa - di->rxdalign),
729 &di->rx_dmah);
730 }
731 else
732 {
733 if (di->txd32)
734 DMA_FREE_CONSISTENT (di->osh,
735 ((int8 *) (uintptr) di->txd32 - di->txdalign),
736 di->txdalloc, (di->txdpa - di->txdalign),
737 &di->tx_dmah);
738 if (di->rxd32)
739 DMA_FREE_CONSISTENT (di->osh,
740 ((int8 *) (uintptr) di->rxd32 - di->rxdalign),
741 di->rxdalloc, (di->rxdpa - di->rxdalign),
742 &di->rx_dmah);
743 }
744
745 /* free packet pointer vectors */
746 if (di->txp)
747 MFREE (di->osh, (void *) di->txp, (di->ntxd * sizeof (void *)));
748 if (di->rxp)
749 MFREE (di->osh, (void *) di->rxp, (di->nrxd * sizeof (void *)));
750
751 /* free tx packet DMA handles */
752 if (di->txp_dmah)
753 MFREE (di->osh, (void *) di->txp_dmah, di->ntxd * sizeof (osldma_t **));
754
755 /* free rx packet DMA handles */
756 if (di->rxp_dmah)
757 MFREE (di->osh, (void *) di->rxp_dmah, di->nrxd * sizeof (osldma_t **));
758
759 /* free our private info structure */
760 MFREE (di->osh, (void *) di, sizeof (dma_info_t));
761
762 }
763
764 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
765 static bool
766 _dma_isaddrext (dma_info_t * di)
767 {
768 if (DMA64_ENAB (di))
769 {
770 /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
771
772 /* not all tx or rx channel are available */
773 if (di->d64txregs != NULL)
774 {
775 if (!_dma64_addrext (di->osh, di->d64txregs))
776 {
777 DMA_ERROR (("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
778 ASSERT (0);
779 }
780 return TRUE;
781 }
782 else if (di->d64rxregs != NULL)
783 {
784 if (!_dma64_addrext (di->osh, di->d64rxregs))
785 {
786 DMA_ERROR (("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
787 ASSERT (0);
788 }
789 return TRUE;
790 }
791 return FALSE;
792 }
793 else if (di->d32txregs)
794 return (_dma32_addrext (di->osh, di->d32txregs));
795 else if (di->d32rxregs)
796 return (_dma32_addrext (di->osh, di->d32rxregs));
797 return FALSE;
798 }
799
800 /* initialize descriptor table base address */
801 static void
802 _dma_ddtable_init (dma_info_t * di, uint direction, ulong pa)
803 {
804 if (DMA64_ENAB (di))
805 {
806
807 if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
808 {
809 if (direction == DMA_TX)
810 {
811 W_REG (di->osh, &di->d64txregs->addrlow,
812 (pa + di->ddoffsetlow));
813 W_REG (di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh);
814 }
815 else
816 {
817 W_REG (di->osh, &di->d64rxregs->addrlow,
818 (pa + di->ddoffsetlow));
819 W_REG (di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh);
820 }
821 }
822 else
823 {
824 /* DMA64 32bits address extension */
825 uint32 ae;
826 ASSERT (di->addrext);
827
828 /* shift the high bit(s) from pa to ae */
829 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
830 pa &= ~PCI32ADDR_HIGH;
831
832 if (direction == DMA_TX)
833 {
834 W_REG (di->osh, &di->d64txregs->addrlow,
835 (pa + di->ddoffsetlow));
836 W_REG (di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh);
837 SET_REG (di->osh, &di->d64txregs->control, D64_XC_AE,
838 (ae << D64_XC_AE_SHIFT));
839 }
840 else
841 {
842 W_REG (di->osh, &di->d64rxregs->addrlow,
843 (pa + di->ddoffsetlow));
844 W_REG (di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh);
845 SET_REG (di->osh, &di->d64rxregs->control, D64_RC_AE,
846 (ae << D64_RC_AE_SHIFT));
847 }
848 }
849
850 }
851 else
852 {
853 if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH))
854 {
855 if (direction == DMA_TX)
856 W_REG (di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
857 else
858 W_REG (di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
859 }
860 else
861 {
862 /* dma32 address extension */
863 uint32 ae;
864 ASSERT (di->addrext);
865
866 /* shift the high bit(s) from pa to ae */
867 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
868 pa &= ~PCI32ADDR_HIGH;
869
870 if (direction == DMA_TX)
871 {
872 W_REG (di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
873 SET_REG (di->osh, &di->d32txregs->control, XC_AE,
874 ae << XC_AE_SHIFT);
875 }
876 else
877 {
878 W_REG (di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
879 SET_REG (di->osh, &di->d32rxregs->control, RC_AE,
880 ae << RC_AE_SHIFT);
881 }
882 }
883 }
884 }
885
886 static void
887 _dma_fifoloopbackenable (dma_info_t * di)
888 {
889 DMA_TRACE (("%s: dma_fifoloopbackenable\n", di->name));
890 if (DMA64_ENAB (di))
891 OR_REG (di->osh, &di->d64txregs->control, D64_XC_LE);
892 else
893 OR_REG (di->osh, &di->d32txregs->control, XC_LE);
894 }
895
896 static void
897 _dma_rxinit (dma_info_t * di)
898 {
899 DMA_TRACE (("%s: dma_rxinit\n", di->name));
900
901 if (di->nrxd == 0)
902 return;
903
904 di->rxin = di->rxout = 0;
905
906 /* clear rx descriptor ring */
907 if (DMA64_ENAB (di))
908 BZERO_SM ((void *) (uintptr) di->rxd64, (di->nrxd * sizeof (dma64dd_t)));
909 else
910 BZERO_SM ((void *) (uintptr) di->rxd32, (di->nrxd * sizeof (dma32dd_t)));
911
912 _dma_rxenable (di);
913 _dma_ddtable_init (di, DMA_RX, di->rxdpa);
914 }
915
916 static void
917 _dma_rxenable (dma_info_t * di)
918 {
919 DMA_TRACE (("%s: dma_rxenable\n", di->name));
920
921 if (DMA64_ENAB (di))
922 W_REG (di->osh, &di->d64rxregs->control,
923 ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
924 else
925 W_REG (di->osh, &di->d32rxregs->control,
926 ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
927 }
928
929 /* !! rx entry routine, returns a pointer to the next frame received,
930 * or NULL if there are no more
931 */
932 static void *
933 _dma_rx (dma_info_t * di)
934 {
935 void *p;
936 uint len;
937 int skiplen = 0;
938
939 while ((p = _dma_getnextrxp (di, FALSE)))
940 {
941 /* skip giant packets which span multiple rx descriptors */
942 if (skiplen > 0)
943 {
944 skiplen -= di->rxbufsize;
945 if (skiplen < 0)
946 skiplen = 0;
947 PKTFREE (di->osh, p, FALSE);
948 continue;
949 }
950
951 len = ltoh16 (*(uint16 *) (PKTDATA (di->osh, p)));
952 DMA_TRACE (("%s: dma_rx len %d\n", di->name, len));
953
954 /* bad frame length check */
955 if (len > (di->rxbufsize - di->rxoffset))
956 {
957 DMA_ERROR (("%s: dma_rx: bad frame length (%d)\n", di->name, len));
958 if (len > 0)
959 skiplen = len - (di->rxbufsize - di->rxoffset);
960 PKTFREE (di->osh, p, FALSE);
961 di->hnddma.rxgiants++;
962 continue;
963 }
964
965 /* set actual length */
966 PKTSETLEN (di->osh, p, (di->rxoffset + len));
967
968 break;
969 }
970
971 return (p);
972 }
973
974 /* post receive buffers */
975 static void
976 _dma_rxfill (dma_info_t * di)
977 {
978 void *p;
979 uint rxin, rxout;
980 uint32 flags = 0;
981 uint n;
982 uint i;
983 uint32 pa;
984 uint extra_offset = 0;
985
986 /*
987 * Determine how many receive buffers we're lacking
988 * from the full complement, allocate, initialize,
989 * and post them, then update the chip rx lastdscr.
990 */
991
992 rxin = di->rxin;
993 rxout = di->rxout;
994
995 n = di->nrxpost - NRXDACTIVE (rxin, rxout);
996
997 DMA_TRACE (("%s: dma_rxfill: post %d\n", di->name, n));
998
999 if (di->rxbufsize > BCMEXTRAHDROOM)
1000 extra_offset = BCMEXTRAHDROOM;
1001
1002 for (i = 0; i < n; i++)
1003 {
1004 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1005 size to be allocated
1006 */
1007 if ((p = PKTGET (di->osh, di->rxbufsize + extra_offset, FALSE)) == NULL)
1008 {
1009 DMA_ERROR (("%s: dma_rxfill: out of rxbufs\n", di->name));
1010 di->hnddma.rxnobuf++;
1011 break;
1012 }
1013 /* reserve an extra headroom, if applicable */
1014 if (extra_offset)
1015 PKTPULL (di->osh, p, extra_offset);
1016
1017 /* Do a cached write instead of uncached write since DMA_MAP
1018 * will flush the cache.
1019 */
1020 *(uint32 *) (PKTDATA (di->osh, p)) = 0;
1021
1022 pa = (uint32) DMA_MAP (di->osh, PKTDATA (di->osh, p),
1023 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
1024
1025 ASSERT (ISALIGNED (pa, 4));
1026
1027 /* save the free packet pointer */
1028 ASSERT (di->rxp[rxout] == NULL);
1029 di->rxp[rxout] = p;
1030
1031 /* reset flags for each descriptor */
1032 flags = 0;
1033 if (DMA64_ENAB (di))
1034 {
1035 if (rxout == (di->nrxd - 1))
1036 flags = D64_CTRL1_EOT;
1037
1038 dma64_dd_upd (di, di->rxd64, pa, rxout, &flags, di->rxbufsize);
1039 }
1040 else
1041 {
1042 if (rxout == (di->nrxd - 1))
1043 flags = CTRL_EOT;
1044
1045 dma32_dd_upd (di, di->rxd32, pa, rxout, &flags, di->rxbufsize);
1046 }
1047 rxout = NEXTRXD (rxout);
1048 }
1049
1050 di->rxout = rxout;
1051
1052 /* update the chip lastdscr pointer */
1053 if (DMA64_ENAB (di))
1054 {
1055 W_REG (di->osh, &di->d64rxregs->ptr, I2B (rxout, dma64dd_t));
1056 }
1057 else
1058 {
1059 W_REG (di->osh, &di->d32rxregs->ptr, I2B (rxout, dma32dd_t));
1060 }
1061 }
1062
1063 /* like getnexttxp but no reclaim */
1064 static void *
1065 _dma_peeknexttxp (dma_info_t * di)
1066 {
1067 uint end, i;
1068
1069 if (di->ntxd == 0)
1070 return (NULL);
1071
1072 if (DMA64_ENAB (di))
1073 {
1074 end =
1075 B2I (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK,
1076 dma64dd_t);
1077 }
1078 else
1079 {
1080 end =
1081 B2I (R_REG (di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
1082 }
1083
1084 for (i = di->txin; i != end; i = NEXTTXD (i))
1085 if (di->txp[i])
1086 return (di->txp[i]);
1087
1088 return (NULL);
1089 }
1090
1091 static void
1092 _dma_rxreclaim (dma_info_t * di)
1093 {
1094 void *p;
1095
1096 /* "unused local" warning suppression for OSLs that
1097 * define PKTFREE() without using the di->osh arg
1098 */
1099 di = di;
1100
1101 DMA_TRACE (("%s: dma_rxreclaim\n", di->name));
1102
1103 while ((p = _dma_getnextrxp (di, TRUE)))
1104 PKTFREE (di->osh, p, FALSE);
1105 }
1106
1107 static void *
1108 _dma_getnextrxp (dma_info_t * di, bool forceall)
1109 {
1110 if (di->nrxd == 0)
1111 return (NULL);
1112
1113 if (DMA64_ENAB (di))
1114 {
1115 return dma64_getnextrxp (di, forceall);
1116 }
1117 else
1118 {
1119 return dma32_getnextrxp (di, forceall);
1120 }
1121 }
1122
1123 static void
1124 _dma_txblock (dma_info_t * di)
1125 {
1126 di->hnddma.txavail = 0;
1127 }
1128
1129 static void
1130 _dma_txunblock (dma_info_t * di)
1131 {
1132 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1133 }
1134
1135 static uint
1136 _dma_txactive (dma_info_t * di)
1137 {
1138 return (NTXDACTIVE (di->txin, di->txout));
1139 }
1140
1141 static void
1142 _dma_counterreset (dma_info_t * di)
1143 {
1144 /* reset all software counter */
1145 di->hnddma.rxgiants = 0;
1146 di->hnddma.rxnobuf = 0;
1147 di->hnddma.txnobuf = 0;
1148 }
1149
1150 /* get the address of the var in order to change later */
1151 static uintptr
1152 _dma_getvar (dma_info_t * di, const char *name)
1153 {
1154 if (!strcmp (name, "&txavail"))
1155 return ((uintptr) & (di->hnddma.txavail));
1156 else
1157 {
1158 ASSERT (0);
1159 }
1160 return (0);
1161 }
1162
1163 void
1164 dma_txpioloopback (osl_t * osh, dma32regs_t * regs)
1165 {
1166 OR_REG (osh, &regs->control, XC_LE);
1167 }
1168
1169 #ifdef BCMDBG
1170 static void
1171 dma32_dumpring (dma_info_t * di, struct bcmstrbuf *b, dma32dd_t * ring,
1172 uint start, uint end, uint max_num)
1173 {
1174 uint i;
1175
1176 for (i = start; i != end; i = XXD ((i + 1), max_num))
1177 {
1178 /* in the format of high->low 8 bytes */
1179 bcm_bprintf (b, "ring index %d: 0x%x %x\n", i, ring[i].addr,
1180 ring[i].ctrl);
1181 }
1182 }
1183
1184 static void
1185 dma32_dumptx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1186 {
1187 if (di->ntxd == 0)
1188 return;
1189
1190 bcm_bprintf (b, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1191 "txavail %d\n", di->txd32, di->txdpa, di->txp, di->txin,
1192 di->txout, di->hnddma.txavail);
1193
1194 bcm_bprintf (b, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1195 R_REG (di->osh, &di->d32txregs->control),
1196 R_REG (di->osh, &di->d32txregs->addr),
1197 R_REG (di->osh, &di->d32txregs->ptr),
1198 R_REG (di->osh, &di->d32txregs->status));
1199
1200 if (dumpring && di->txd32)
1201 dma32_dumpring (di, b, di->txd32, di->txin, di->txout, di->ntxd);
1202 }
1203
1204 static void
1205 dma32_dumprx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1206 {
1207 if (di->nrxd == 0)
1208 return;
1209
1210 bcm_bprintf (b, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1211 di->rxd32, di->rxdpa, di->rxp, di->rxin, di->rxout);
1212
1213 bcm_bprintf (b, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1214 R_REG (di->osh, &di->d32rxregs->control),
1215 R_REG (di->osh, &di->d32rxregs->addr),
1216 R_REG (di->osh, &di->d32rxregs->ptr),
1217 R_REG (di->osh, &di->d32rxregs->status));
1218 if (di->rxd32 && dumpring)
1219 dma32_dumpring (di, b, di->rxd32, di->rxin, di->rxout, di->nrxd);
1220 }
1221
1222 static void
1223 dma32_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1224 {
1225 dma32_dumptx (di, b, dumpring);
1226 dma32_dumprx (di, b, dumpring);
1227 }
1228
1229 static void
1230 dma64_dumpring (dma_info_t * di, struct bcmstrbuf *b, dma64dd_t * ring,
1231 uint start, uint end, uint max_num)
1232 {
1233 uint i;
1234
1235 for (i = start; i != end; i = XXD ((i + 1), max_num))
1236 {
1237 /* in the format of high->low 16 bytes */
1238 bcm_bprintf (b, "ring index %d: 0x%x %x %x %x\n",
1239 i, ring[i].addrhigh, ring[i].addrlow, ring[i].ctrl2,
1240 ring[i].ctrl1);
1241 }
1242 }
1243
1244 static void
1245 dma64_dumptx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1246 {
1247 if (di->ntxd == 0)
1248 return;
1249
1250 bcm_bprintf (b, "DMA64: txd64 %p txdpa 0x%lx txp %p txin %d txout %d "
1251 "txavail %d\n", di->txd64, di->txdpa, di->txp, di->txin,
1252 di->txout, di->hnddma.txavail);
1253
1254 bcm_bprintf (b, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1255 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1256 R_REG (di->osh, &di->d64txregs->control),
1257 R_REG (di->osh, &di->d64txregs->addrlow),
1258 R_REG (di->osh, &di->d64txregs->addrhigh),
1259 R_REG (di->osh, &di->d64txregs->ptr),
1260 R_REG (di->osh, &di->d64txregs->status0),
1261 R_REG (di->osh, &di->d64txregs->status1));
1262
1263 if (dumpring && di->txd64)
1264 {
1265 dma64_dumpring (di, b, di->txd64, di->txin, di->txout, di->ntxd);
1266 }
1267 }
1268
1269 static void
1270 dma64_dumprx (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1271 {
1272 if (di->nrxd == 0)
1273 return;
1274
1275 bcm_bprintf (b, "DMA64: rxd64 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1276 di->rxd64, di->rxdpa, di->rxp, di->rxin, di->rxout);
1277
1278 bcm_bprintf (b, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1279 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1280 R_REG (di->osh, &di->d64rxregs->control),
1281 R_REG (di->osh, &di->d64rxregs->addrlow),
1282 R_REG (di->osh, &di->d64rxregs->addrhigh),
1283 R_REG (di->osh, &di->d64rxregs->ptr),
1284 R_REG (di->osh, &di->d64rxregs->status0),
1285 R_REG (di->osh, &di->d64rxregs->status1));
1286 if (di->rxd64 && dumpring)
1287 {
1288 dma64_dumpring (di, b, di->rxd64, di->rxin, di->rxout, di->nrxd);
1289 }
1290 }
1291
1292 static void
1293 dma64_dump (dma_info_t * di, struct bcmstrbuf *b, bool dumpring)
1294 {
1295 dma64_dumptx (di, b, dumpring);
1296 dma64_dumprx (di, b, dumpring);
1297 }
1298
1299 #endif /* BCMDBG */
1300
1301
1302 /* 32 bits DMA functions */
1303 static void
1304 dma32_txinit (dma_info_t * di)
1305 {
1306 DMA_TRACE (("%s: dma_txinit\n", di->name));
1307
1308 if (di->ntxd == 0)
1309 return;
1310
1311 di->txin = di->txout = 0;
1312 di->hnddma.txavail = di->ntxd - 1;
1313
1314 /* clear tx descriptor ring */
1315 BZERO_SM ((void *) (uintptr) di->txd32, (di->ntxd * sizeof (dma32dd_t)));
1316 W_REG (di->osh, &di->d32txregs->control, XC_XE);
1317 _dma_ddtable_init (di, DMA_TX, di->txdpa);
1318 }
1319
1320 static bool
1321 dma32_txenabled (dma_info_t * di)
1322 {
1323 uint32 xc;
1324
1325 /* If the chip is dead, it is not enabled :-) */
1326 xc = R_REG (di->osh, &di->d32txregs->control);
1327 return ((xc != 0xffffffff) && (xc & XC_XE));
1328 }
1329
1330 static void
1331 dma32_txsuspend (dma_info_t * di)
1332 {
1333 DMA_TRACE (("%s: dma_txsuspend\n", di->name));
1334
1335 if (di->ntxd == 0)
1336 return;
1337
1338 OR_REG (di->osh, &di->d32txregs->control, XC_SE);
1339 }
1340
1341 static void
1342 dma32_txresume (dma_info_t * di)
1343 {
1344 DMA_TRACE (("%s: dma_txresume\n", di->name));
1345
1346 if (di->ntxd == 0)
1347 return;
1348
1349 AND_REG (di->osh, &di->d32txregs->control, ~XC_SE);
1350 }
1351
1352 static bool
1353 dma32_txsuspended (dma_info_t * di)
1354 {
1355 return (di->ntxd == 0)
1356 || ((R_REG (di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
1357 }
1358
1359 static void
1360 dma32_txreclaim (dma_info_t * di, bool forceall)
1361 {
1362 void *p;
1363
1364 DMA_TRACE (("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
1365
1366 while ((p = dma32_getnexttxp (di, forceall)))
1367 PKTFREE (di->osh, p, TRUE);
1368 }
1369
1370 static bool
1371 dma32_txstopped (dma_info_t * di)
1372 {
1373 return ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1374 XS_XS_STOPPED);
1375 }
1376
1377 static bool
1378 dma32_rxstopped (dma_info_t * di)
1379 {
1380 return ((R_REG (di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
1381 RS_RS_STOPPED);
1382 }
1383
1384 static bool
1385 dma32_alloc (dma_info_t * di, uint direction)
1386 {
1387 uint size;
1388 uint ddlen;
1389 void *va;
1390
1391 ddlen = sizeof (dma32dd_t);
1392
1393 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1394
1395 if (!ISALIGNED (DMA_CONSISTENT_ALIGN, D32RINGALIGN))
1396 size += D32RINGALIGN;
1397
1398
1399 if (direction == DMA_TX)
1400 {
1401 if ((va =
1402 DMA_ALLOC_CONSISTENT (di->osh, size, &di->txdpa,
1403 &di->tx_dmah)) == NULL)
1404 {
1405 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1406 di->name));
1407 return FALSE;
1408 }
1409
1410 di->txd32 = (dma32dd_t *) ROUNDUP ((uintptr) va, D32RINGALIGN);
1411 di->txdalign = (uint) ((int8 *) (uintptr) di->txd32 - (int8 *) va);
1412 di->txdpa += di->txdalign;
1413 di->txdalloc = size;
1414 ASSERT (ISALIGNED ((uintptr) di->txd32, D32RINGALIGN));
1415 }
1416 else
1417 {
1418 if ((va =
1419 DMA_ALLOC_CONSISTENT (di->osh, size, &di->rxdpa,
1420 &di->rx_dmah)) == NULL)
1421 {
1422 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1423 di->name));
1424 return FALSE;
1425 }
1426 di->rxd32 = (dma32dd_t *) ROUNDUP ((uintptr) va, D32RINGALIGN);
1427 di->rxdalign = (uint) ((int8 *) (uintptr) di->rxd32 - (int8 *) va);
1428 di->rxdpa += di->rxdalign;
1429 di->rxdalloc = size;
1430 ASSERT (ISALIGNED ((uintptr) di->rxd32, D32RINGALIGN));
1431 }
1432
1433 return TRUE;
1434 }
1435
1436 static bool
1437 dma32_txreset (dma_info_t * di)
1438 {
1439 uint32 status;
1440
1441 if (di->ntxd == 0)
1442 return TRUE;
1443
1444 /* suspend tx DMA first */
1445 W_REG (di->osh, &di->d32txregs->control, XC_SE);
1446 SPINWAIT (((status = (R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK))
1447 != XS_XS_DISABLED) &&
1448 (status != XS_XS_IDLE) && (status != XS_XS_STOPPED), (10000));
1449
1450 W_REG (di->osh, &di->d32txregs->control, 0);
1451 SPINWAIT (((status = (R_REG (di->osh,
1452 &di->d32txregs->status) & XS_XS_MASK)) !=
1453 XS_XS_DISABLED), 10000);
1454
1455 /* wait for the last transaction to complete */
1456 OSL_DELAY (300);
1457
1458 return (status == XS_XS_DISABLED);
1459 }
1460
1461 static bool
1462 dma32_rxidle (dma_info_t * di)
1463 {
1464 DMA_TRACE (("%s: dma_rxidle\n", di->name));
1465
1466 if (di->nrxd == 0)
1467 return TRUE;
1468
1469 return ((R_REG (di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
1470 R_REG (di->osh, &di->d32rxregs->ptr));
1471 }
1472
1473 static bool
1474 dma32_rxreset (dma_info_t * di)
1475 {
1476 uint32 status;
1477
1478 if (di->nrxd == 0)
1479 return TRUE;
1480
1481 W_REG (di->osh, &di->d32rxregs->control, 0);
1482 SPINWAIT (((status = (R_REG (di->osh,
1483 &di->d32rxregs->status) & RS_RS_MASK)) !=
1484 RS_RS_DISABLED), 10000);
1485
1486 return (status == RS_RS_DISABLED);
1487 }
1488
1489 static bool
1490 dma32_rxenabled (dma_info_t * di)
1491 {
1492 uint32 rc;
1493
1494 rc = R_REG (di->osh, &di->d32rxregs->control);
1495 return ((rc != 0xffffffff) && (rc & RC_RE));
1496 }
1497
1498 static bool
1499 dma32_txsuspendedidle (dma_info_t * di)
1500 {
1501 if (di->ntxd == 0)
1502 return TRUE;
1503
1504 if (!(R_REG (di->osh, &di->d32txregs->control) & XC_SE))
1505 return 0;
1506
1507 if ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
1508 return 0;
1509
1510 OSL_DELAY (2);
1511 return ((R_REG (di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1512 XS_XS_IDLE);
1513 }
1514
1515 /* !! tx entry routine
1516 * supports full 32bit dma engine buffer addressing so
1517 * dma buffers can cross 4 Kbyte page boundaries.
1518 */
1519 static int
1520 dma32_txfast (dma_info_t * di, void *p0, bool commit)
1521 {
1522 void *p, *next;
1523 uchar *data;
1524 uint len;
1525 uint txout;
1526 uint32 flags = 0;
1527 uint32 pa;
1528
1529 DMA_TRACE (("%s: dma_txfast\n", di->name));
1530
1531 txout = di->txout;
1532
1533 /*
1534 * Walk the chain of packet buffers
1535 * allocating and initializing transmit descriptor entries.
1536 */
1537 for (p = p0; p; p = next)
1538 {
1539 data = PKTDATA (di->osh, p);
1540 len = PKTLEN (di->osh, p);
1541 next = PKTNEXT (di->osh, p);
1542
1543 /* return nonzero if out of tx descriptors */
1544 if (NEXTTXD (txout) == di->txin)
1545 goto outoftxd;
1546
1547 if (len == 0)
1548 continue;
1549
1550 /* get physical address of buffer start */
1551 pa =
1552 (uint32) DMA_MAP (di->osh, data, len, DMA_TX, p,
1553 &di->txp_dmah[txout]);
1554
1555 flags = 0;
1556 if (p == p0)
1557 flags |= CTRL_SOF;
1558 if (next == NULL)
1559 flags |= (CTRL_IOC | CTRL_EOF);
1560 if (txout == (di->ntxd - 1))
1561 flags |= CTRL_EOT;
1562
1563 dma32_dd_upd (di, di->txd32, pa, txout, &flags, len);
1564 ASSERT (di->txp[txout] == NULL);
1565
1566 txout = NEXTTXD (txout);
1567 }
1568
1569 /* if last txd eof not set, fix it */
1570 if (!(flags & CTRL_EOF))
1571 W_SM (&di->txd32[PREVTXD (txout)].ctrl,
1572 BUS_SWAP32 (flags | CTRL_IOC | CTRL_EOF));
1573
1574 /* save the packet */
1575 di->txp[PREVTXD (txout)] = p0;
1576
1577 /* bump the tx descriptor index */
1578 di->txout = txout;
1579
1580 /* kick the chip */
1581 if (commit)
1582 W_REG (di->osh, &di->d32txregs->ptr, I2B (txout, dma32dd_t));
1583
1584 /* tx flow control */
1585 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1586
1587 return (0);
1588
1589 outoftxd:
1590 DMA_ERROR (("%s: dma_txfast: out of txds\n", di->name));
1591 PKTFREE (di->osh, p0, TRUE);
1592 di->hnddma.txavail = 0;
1593 di->hnddma.txnobuf++;
1594 return (-1);
1595 }
1596
1597 /*
1598 * Reclaim next completed txd (txds if using chained buffers) and
1599 * return associated packet.
1600 * If 'force' is true, reclaim txd(s) and return associated packet
1601 * regardless of the value of the hardware "curr" pointer.
1602 */
1603 static void *
1604 dma32_getnexttxp (dma_info_t * di, bool forceall)
1605 {
1606 uint start, end, i;
1607 void *txp;
1608
1609 DMA_TRACE (("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
1610
1611 if (di->ntxd == 0)
1612 return (NULL);
1613
1614 txp = NULL;
1615
1616 start = di->txin;
1617 if (forceall)
1618 end = di->txout;
1619 else
1620 end =
1621 B2I (R_REG (di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
1622
1623 if ((start == 0) && (end > di->txout))
1624 goto bogus;
1625
1626 for (i = start; i != end && !txp; i = NEXTTXD (i))
1627 {
1628 DMA_UNMAP (di->osh,
1629 (BUS_SWAP32 (R_SM (&di->txd32[i].addr)) - di->dataoffsetlow),
1630 (BUS_SWAP32 (R_SM (&di->txd32[i].ctrl)) & CTRL_BC_MASK),
1631 DMA_TX, di->txp[i], &di->txp_dmah[i]);
1632
1633 W_SM (&di->txd32[i].addr, 0xdeadbeef);
1634 txp = di->txp[i];
1635 di->txp[i] = NULL;
1636 }
1637
1638 di->txin = i;
1639
1640 /* tx flow control */
1641 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1642
1643 return (txp);
1644
1645 bogus:
1646 /*
1647 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1648 start, end, di->txout, forceall));
1649 */
1650 return (NULL);
1651 }
1652
1653 static void *
1654 dma32_getnextrxp (dma_info_t * di, bool forceall)
1655 {
1656 uint i;
1657 void *rxp;
1658
1659 /* if forcing, dma engine must be disabled */
1660 ASSERT (!forceall || !dma32_rxenabled (di));
1661
1662 i = di->rxin;
1663
1664 /* return if no packets posted */
1665 if (i == di->rxout)
1666 return (NULL);
1667
1668 /* ignore curr if forceall */
1669 if (!forceall
1670 && (i ==
1671 B2I (R_REG (di->osh, &di->d32rxregs->status) & RS_CD_MASK,
1672 dma32dd_t)))
1673 return (NULL);
1674
1675 /* get the packet pointer that corresponds to the rx descriptor */
1676 rxp = di->rxp[i];
1677 ASSERT (rxp);
1678 di->rxp[i] = NULL;
1679
1680 /* clear this packet from the descriptor ring */
1681 DMA_UNMAP (di->osh,
1682 (BUS_SWAP32 (R_SM (&di->rxd32[i].addr)) - di->dataoffsetlow),
1683 di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1684
1685 W_SM (&di->rxd32[i].addr, 0xdeadbeef);
1686
1687 di->rxin = NEXTRXD (i);
1688
1689 return (rxp);
1690 }
1691
1692 /*
1693 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1694 */
1695 static void
1696 dma32_txrotate (dma_info_t * di)
1697 {
1698 uint ad;
1699 uint nactive;
1700 uint rot;
1701 uint old, new;
1702 uint32 w;
1703 uint first, last;
1704
1705 ASSERT (dma32_txsuspendedidle (di));
1706
1707 nactive = _dma_txactive (di);
1708 ad =
1709 B2I (((R_REG (di->osh, &di->d32txregs->status) & XS_AD_MASK) >>
1710 XS_AD_SHIFT), dma32dd_t);
1711 rot = TXD (ad - di->txin);
1712
1713 ASSERT (rot < di->ntxd);
1714
1715 /* full-ring case is a lot harder - don't worry about this */
1716 if (rot >= (di->ntxd - nactive))
1717 {
1718 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di->name));
1719 return;
1720 }
1721
1722 first = di->txin;
1723 last = PREVTXD (di->txout);
1724
1725 /* move entries starting at last and moving backwards to first */
1726 for (old = last; old != PREVTXD (first); old = PREVTXD (old))
1727 {
1728 new = TXD (old + rot);
1729
1730 /*
1731 * Move the tx dma descriptor.
1732 * EOT is set only in the last entry in the ring.
1733 */
1734 w = BUS_SWAP32 (R_SM (&di->txd32[old].ctrl)) & ~CTRL_EOT;
1735 if (new == (di->ntxd - 1))
1736 w |= CTRL_EOT;
1737 W_SM (&di->txd32[new].ctrl, BUS_SWAP32 (w));
1738 W_SM (&di->txd32[new].addr, R_SM (&di->txd32[old].addr));
1739
1740 /* zap the old tx dma descriptor address field */
1741 W_SM (&di->txd32[old].addr, BUS_SWAP32 (0xdeadbeef));
1742
1743 /* move the corresponding txp[] entry */
1744 ASSERT (di->txp[new] == NULL);
1745 di->txp[new] = di->txp[old];
1746 di->txp[old] = NULL;
1747 }
1748
1749 /* update txin and txout */
1750 di->txin = ad;
1751 di->txout = TXD (di->txout + rot);
1752 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
1753
1754 /* kick the chip */
1755 W_REG (di->osh, &di->d32txregs->ptr, I2B (di->txout, dma32dd_t));
1756 }
1757
1758 /* 64 bits DMA functions */
1759
1760 #ifdef BCMDMA64
1761 static void
1762 dma64_txinit (dma_info_t * di)
1763 {
1764 DMA_TRACE (("%s: dma_txinit\n", di->name));
1765
1766 if (di->ntxd == 0)
1767 return;
1768
1769 di->txin = di->txout = 0;
1770 di->hnddma.txavail = di->ntxd - 1;
1771
1772 /* clear tx descriptor ring */
1773 BZERO_SM ((void *) (uintptr) di->txd64, (di->ntxd * sizeof (dma64dd_t)));
1774 W_REG (di->osh, &di->d64txregs->control, D64_XC_XE);
1775 _dma_ddtable_init (di, DMA_TX, di->txdpa);
1776 }
1777
1778 static bool
1779 dma64_txenabled (dma_info_t * di)
1780 {
1781 uint32 xc;
1782
1783 /* If the chip is dead, it is not enabled :-) */
1784 xc = R_REG (di->osh, &di->d64txregs->control);
1785 return ((xc != 0xffffffff) && (xc & D64_XC_XE));
1786 }
1787
1788 static void
1789 dma64_txsuspend (dma_info_t * di)
1790 {
1791 DMA_TRACE (("%s: dma_txsuspend\n", di->name));
1792
1793 if (di->ntxd == 0)
1794 return;
1795
1796 OR_REG (di->osh, &di->d64txregs->control, D64_XC_SE);
1797 }
1798
1799 static void
1800 dma64_txresume (dma_info_t * di)
1801 {
1802 DMA_TRACE (("%s: dma_txresume\n", di->name));
1803
1804 if (di->ntxd == 0)
1805 return;
1806
1807 AND_REG (di->osh, &di->d64txregs->control, ~D64_XC_SE);
1808 }
1809
1810 static bool
1811 dma64_txsuspended (dma_info_t * di)
1812 {
1813 return (di->ntxd == 0)
1814 || ((R_REG (di->osh, &di->d64txregs->control) & D64_XC_SE) == D64_XC_SE);
1815 }
1816
1817 static void
1818 dma64_txreclaim (dma_info_t * di, bool forceall)
1819 {
1820 void *p;
1821
1822 DMA_TRACE (("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
1823
1824 while ((p = dma64_getnexttxp (di, forceall)))
1825 PKTFREE (di->osh, p, TRUE);
1826 }
1827
1828 static bool
1829 dma64_txstopped (dma_info_t * di)
1830 {
1831 return ((R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1832 D64_XS0_XS_STOPPED);
1833 }
1834
1835 static bool
1836 dma64_rxstopped (dma_info_t * di)
1837 {
1838 return ((R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
1839 D64_RS0_RS_STOPPED);
1840 }
1841
1842 static bool
1843 dma64_alloc (dma_info_t * di, uint direction)
1844 {
1845 uint size;
1846 uint ddlen;
1847 uint32 alignbytes;
1848 void *va;
1849
1850 ddlen = sizeof (dma64dd_t);
1851
1852 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1853
1854 alignbytes = di->dma64align;
1855
1856 if (!ISALIGNED (DMA_CONSISTENT_ALIGN, alignbytes))
1857 size += alignbytes;
1858
1859 if (direction == DMA_TX)
1860 {
1861 if ((va =
1862 DMA_ALLOC_CONSISTENT (di->osh, size, &di->txdpa,
1863 &di->tx_dmah)) == NULL)
1864 {
1865 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1866 di->name));
1867 return FALSE;
1868 }
1869
1870 di->txd64 = (dma64dd_t *) ROUNDUP ((uintptr) va, alignbytes);
1871 di->txdalign = (uint) ((int8 *) (uintptr) di->txd64 - (int8 *) va);
1872 di->txdpa += di->txdalign;
1873 di->txdalloc = size;
1874 ASSERT (ISALIGNED ((uintptr) di->txd64, alignbytes));
1875 }
1876 else
1877 {
1878 if ((va =
1879 DMA_ALLOC_CONSISTENT (di->osh, size, &di->rxdpa,
1880 &di->rx_dmah)) == NULL)
1881 {
1882 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1883 di->name));
1884 return FALSE;
1885 }
1886 di->rxd64 = (dma64dd_t *) ROUNDUP ((uintptr) va, alignbytes);
1887 di->rxdalign = (uint) ((int8 *) (uintptr) di->rxd64 - (int8 *) va);
1888 di->rxdpa += di->rxdalign;
1889 di->rxdalloc = size;
1890 ASSERT (ISALIGNED ((uintptr) di->rxd64, alignbytes));
1891 }
1892
1893 return TRUE;
1894 }
1895
1896 static bool
1897 dma64_txreset (dma_info_t * di)
1898 {
1899 uint32 status;
1900
1901 if (di->ntxd == 0)
1902 return TRUE;
1903
1904 /* suspend tx DMA first */
1905 W_REG (di->osh, &di->d64txregs->control, D64_XC_SE);
1906 SPINWAIT (((status =
1907 (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) !=
1908 D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1909 && (status != D64_XS0_XS_STOPPED), 10000);
1910
1911 W_REG (di->osh, &di->d64txregs->control, 0);
1912 SPINWAIT (((status =
1913 (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) !=
1914 D64_XS0_XS_DISABLED), 10000);
1915
1916 /* wait for the last transaction to complete */
1917 OSL_DELAY (300);
1918
1919 return (status == D64_XS0_XS_DISABLED);
1920 }
1921
1922 static bool
1923 dma64_rxidle (dma_info_t * di)
1924 {
1925 DMA_TRACE (("%s: dma_rxidle\n", di->name));
1926
1927 if (di->nrxd == 0)
1928 return TRUE;
1929
1930 return ((R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1931 R_REG (di->osh, &di->d64rxregs->ptr));
1932 }
1933
1934 static bool
1935 dma64_rxreset (dma_info_t * di)
1936 {
1937 uint32 status;
1938
1939 if (di->nrxd == 0)
1940 return TRUE;
1941
1942 W_REG (di->osh, &di->d64rxregs->control, 0);
1943 SPINWAIT (((status =
1944 (R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK)) !=
1945 D64_RS0_RS_DISABLED), 10000);
1946
1947 return (status == D64_RS0_RS_DISABLED);
1948 }
1949
1950 static bool
1951 dma64_rxenabled (dma_info_t * di)
1952 {
1953 uint32 rc;
1954
1955 rc = R_REG (di->osh, &di->d64rxregs->control);
1956 return ((rc != 0xffffffff) && (rc & D64_RC_RE));
1957 }
1958
1959 static bool
1960 dma64_txsuspendedidle (dma_info_t * di)
1961 {
1962
1963 if (di->ntxd == 0)
1964 return TRUE;
1965
1966 if (!(R_REG (di->osh, &di->d64txregs->control) & D64_XC_SE))
1967 return 0;
1968
1969 if ((R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1970 D64_XS0_XS_IDLE)
1971 return 1;
1972
1973 return 0;
1974 }
1975
1976
1977 /* !! tx entry routine */
1978 static int
1979 dma64_txfast (dma_info_t * di, void *p0, bool commit)
1980 {
1981 void *p, *next;
1982 uchar *data;
1983 uint len;
1984 uint txout;
1985 uint32 flags = 0;
1986 uint32 pa;
1987
1988 DMA_TRACE (("%s: dma_txfast\n", di->name));
1989
1990 txout = di->txout;
1991
1992 /*
1993 * Walk the chain of packet buffers
1994 * allocating and initializing transmit descriptor entries.
1995 */
1996 for (p = p0; p; p = next)
1997 {
1998 data = PKTDATA (di->osh, p);
1999 len = PKTLEN (di->osh, p);
2000 next = PKTNEXT (di->osh, p);
2001
2002 /* return nonzero if out of tx descriptors */
2003 if (NEXTTXD (txout) == di->txin)
2004 goto outoftxd;
2005
2006 if (len == 0)
2007 continue;
2008
2009 /* get physical address of buffer start */
2010 pa =
2011 (uint32) DMA_MAP (di->osh, data, len, DMA_TX, p,
2012 &di->txp_dmah[txout]);
2013
2014 flags = 0;
2015 if (p == p0)
2016 flags |= D64_CTRL1_SOF;
2017 if (next == NULL)
2018 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
2019 if (txout == (di->ntxd - 1))
2020 flags |= D64_CTRL1_EOT;
2021
2022 dma64_dd_upd (di, di->txd64, pa, txout, &flags, len);
2023 ASSERT (di->txp[txout] == NULL);
2024
2025 txout = NEXTTXD (txout);
2026 }
2027
2028 /* if last txd eof not set, fix it */
2029 if (!(flags & D64_CTRL1_EOF))
2030 W_SM (&di->txd64[PREVTXD (txout)].ctrl1,
2031 BUS_SWAP32 (flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
2032
2033 /* save the packet */
2034 di->txp[PREVTXD (txout)] = p0;
2035
2036 /* bump the tx descriptor index */
2037 di->txout = txout;
2038
2039 /* kick the chip */
2040 if (commit)
2041 W_REG (di->osh, &di->d64txregs->ptr, I2B (txout, dma64dd_t));
2042
2043 /* tx flow control */
2044 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2045
2046 return (0);
2047
2048 outoftxd:
2049 DMA_ERROR (("%s: dma_txfast: out of txds\n", di->name));
2050 PKTFREE (di->osh, p0, TRUE);
2051 di->hnddma.txavail = 0;
2052 di->hnddma.txnobuf++;
2053 return (-1);
2054 }
2055
2056 /*
2057 * Reclaim next completed txd (txds if using chained buffers) and
2058 * return associated packet.
2059 * If 'force' is true, reclaim txd(s) and return associated packet
2060 * regardless of the value of the hardware "curr" pointer.
2061 */
2062 static void *
2063 dma64_getnexttxp (dma_info_t * di, bool forceall)
2064 {
2065 uint start, end, i;
2066 void *txp;
2067
2068 DMA_TRACE (("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
2069
2070 if (di->ntxd == 0)
2071 return (NULL);
2072
2073 txp = NULL;
2074
2075 start = di->txin;
2076 if (forceall)
2077 end = di->txout;
2078 else
2079 end =
2080 B2I (R_REG (di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK,
2081 dma64dd_t);
2082
2083 if ((start == 0) && (end > di->txout))
2084 goto bogus;
2085
2086 for (i = start; i != end && !txp; i = NEXTTXD (i))
2087 {
2088 DMA_UNMAP (di->osh,
2089 (BUS_SWAP32 (R_SM (&di->txd64[i].addrlow)) -
2090 di->dataoffsetlow),
2091 (BUS_SWAP32 (R_SM (&di->txd64[i].ctrl2)) &
2092 D64_CTRL2_BC_MASK), DMA_TX, di->txp[i], &di->txp_dmah[i]);
2093
2094 W_SM (&di->txd64[i].addrlow, 0xdeadbeef);
2095 W_SM (&di->txd64[i].addrhigh, 0xdeadbeef);
2096
2097 txp = di->txp[i];
2098 di->txp[i] = NULL;
2099 }
2100
2101 di->txin = i;
2102
2103 /* tx flow control */
2104 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2105
2106 return (txp);
2107
2108 bogus:
2109 /*
2110 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2111 start, end, di->txout, forceall));
2112 */
2113 return (NULL);
2114 }
2115
2116 static void *
2117 dma64_getnextrxp (dma_info_t * di, bool forceall)
2118 {
2119 uint i;
2120 void *rxp;
2121
2122 /* if forcing, dma engine must be disabled */
2123 ASSERT (!forceall || !dma64_rxenabled (di));
2124
2125 i = di->rxin;
2126
2127 /* return if no packets posted */
2128 if (i == di->rxout)
2129 return (NULL);
2130
2131 /* ignore curr if forceall */
2132 if (!forceall &&
2133 (i ==
2134 B2I (R_REG (di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK,
2135 dma64dd_t)))
2136 return (NULL);
2137
2138 /* get the packet pointer that corresponds to the rx descriptor */
2139 rxp = di->rxp[i];
2140 ASSERT (rxp);
2141 di->rxp[i] = NULL;
2142
2143 /* clear this packet from the descriptor ring */
2144 DMA_UNMAP (di->osh,
2145 (BUS_SWAP32 (R_SM (&di->rxd64[i].addrlow)) - di->dataoffsetlow),
2146 di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
2147
2148 W_SM (&di->rxd64[i].addrlow, 0xdeadbeef);
2149 W_SM (&di->rxd64[i].addrhigh, 0xdeadbeef);
2150
2151 di->rxin = NEXTRXD (i);
2152
2153 return (rxp);
2154 }
2155
2156 static bool
2157 _dma64_addrext (osl_t * osh, dma64regs_t * dma64regs)
2158 {
2159 uint32 w;
2160 OR_REG (osh, &dma64regs->control, D64_XC_AE);
2161 w = R_REG (osh, &dma64regs->control);
2162 AND_REG (osh, &dma64regs->control, ~D64_XC_AE);
2163 return ((w & D64_XC_AE) == D64_XC_AE);
2164 }
2165
2166 /*
2167 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2168 */
2169 static void
2170 dma64_txrotate (dma_info_t * di)
2171 {
2172 uint ad;
2173 uint nactive;
2174 uint rot;
2175 uint old, new;
2176 uint32 w;
2177 uint first, last;
2178
2179 ASSERT (dma64_txsuspendedidle (di));
2180
2181 nactive = _dma_txactive (di);
2182 ad =
2183 B2I ((R_REG (di->osh, &di->d64txregs->status1) & D64_XS1_AD_MASK),
2184 dma64dd_t);
2185 rot = TXD (ad - di->txin);
2186
2187 ASSERT (rot < di->ntxd);
2188
2189 /* full-ring case is a lot harder - don't worry about this */
2190 if (rot >= (di->ntxd - nactive))
2191 {
2192 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di->name));
2193 return;
2194 }
2195
2196 first = di->txin;
2197 last = PREVTXD (di->txout);
2198
2199 /* move entries starting at last and moving backwards to first */
2200 for (old = last; old != PREVTXD (first); old = PREVTXD (old))
2201 {
2202 new = TXD (old + rot);
2203
2204 /*
2205 * Move the tx dma descriptor.
2206 * EOT is set only in the last entry in the ring.
2207 */
2208 w = BUS_SWAP32 (R_SM (&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
2209 if (new == (di->ntxd - 1))
2210 w |= D64_CTRL1_EOT;
2211 W_SM (&di->txd64[new].ctrl1, BUS_SWAP32 (w));
2212
2213 w = BUS_SWAP32 (R_SM (&di->txd64[old].ctrl2));
2214 W_SM (&di->txd64[new].ctrl2, BUS_SWAP32 (w));
2215
2216 W_SM (&di->txd64[new].addrlow, R_SM (&di->txd64[old].addrlow));
2217 W_SM (&di->txd64[new].addrhigh, R_SM (&di->txd64[old].addrhigh));
2218
2219 /* zap the old tx dma descriptor address field */
2220 W_SM (&di->txd64[old].addrlow, BUS_SWAP32 (0xdeadbeef));
2221 W_SM (&di->txd64[old].addrhigh, BUS_SWAP32 (0xdeadbeef));
2222
2223 /* move the corresponding txp[] entry */
2224 ASSERT (di->txp[new] == NULL);
2225 di->txp[new] = di->txp[old];
2226 di->txp[old] = NULL;
2227 }
2228
2229 /* update txin and txout */
2230 di->txin = ad;
2231 di->txout = TXD (di->txout + rot);
2232 di->hnddma.txavail = di->ntxd - NTXDACTIVE (di->txin, di->txout) - 1;
2233
2234 /* kick the chip */
2235 W_REG (di->osh, &di->d64txregs->ptr, I2B (di->txout, dma64dd_t));
2236 }
2237
2238 #endif /* BCMDMA64 */
2239
2240 uint
2241 dma_addrwidth (sb_t * sbh, void *dmaregs)
2242 {
2243 dma32regs_t *dma32regs;
2244 osl_t *osh;
2245
2246 osh = sb_osh (sbh);
2247
2248 if (DMA64_CAP)
2249 {
2250 /* DMA engine is 64-bit capable */
2251 if (((sb_coreflagshi (sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64))
2252 {
2253 /* backplane are 64 bits capable */
2254 if (sb_backplane64 (sbh))
2255 /* If bus is System Backplane or PCIE then we can access 64-bits */
2256 if ((BUSTYPE (sbh->bustype) == SB_BUS) ||
2257 ((BUSTYPE (sbh->bustype) == PCI_BUS) &&
2258 sbh->buscoretype == SB_PCIE))
2259 return (DMADDRWIDTH_64);
2260
2261 /* DMA64 is always 32 bits capable, AE is always TRUE */
2262 #ifdef BCMDMA64
2263 ASSERT (_dma64_addrext (osh, (dma64regs_t *) dmaregs));
2264 #endif
2265 return (DMADDRWIDTH_32);
2266 }
2267 }
2268
2269 /* Start checking for 32-bit / 30-bit addressing */
2270 dma32regs = (dma32regs_t *) dmaregs;
2271
2272 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2273 if ((BUSTYPE (sbh->bustype) == SB_BUS) ||
2274 ((BUSTYPE (sbh->bustype) == PCI_BUS) && sbh->buscoretype == SB_PCIE) ||
2275 (_dma32_addrext (osh, dma32regs)))
2276 return (DMADDRWIDTH_32);
2277
2278 /* Fallthru */
2279 return (DMADDRWIDTH_30);
2280 }
This page took 0.16224 seconds and 5 git commands to generate.