use perl instead of sed for replacing stuff in wl
[openwrt.git] / package / broadcom-wl / src / kmod / hnddma.c
1 /*
2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
4 *
5 * Copyright 2006, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 *
13 * $Id: hnddma.c,v 1.11 2006/04/08 07:12:42 honor Exp $
14 */
15
16 #include <typedefs.h>
17 #include <bcmdefs.h>
18 #include <osl.h>
19 #include "linux_osl.h"
20 #include <bcmendian.h>
21 #include <sbconfig.h>
22 #include <bcmutils.h>
23 #include <bcmdevs.h>
24 #include <sbutils.h>
25
26 #include "sbhnddma.h"
27 #include "hnddma.h"
28
29 /* debug/trace */
30 #define DMA_ERROR(args)
31 #define DMA_TRACE(args)
32
33 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
34 static uint dma_msg_level =
35 0;
36
37 #define MAXNAMEL 8 /* 8 char names */
38
39 #define DI_INFO(dmah) (dma_info_t *)dmah
40
41 /* dma engine software state */
42 typedef struct dma_info {
43 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
44 * which could be const
45 */
46 uint *msg_level; /* message level pointer */
47 char name[MAXNAMEL]; /* callers name for diag msgs */
48
49 void *osh; /* os handle */
50 sb_t *sbh; /* sb handle */
51
52 bool dma64; /* dma64 enabled */
53 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
54
55 dma32regs_t *d32txregs; /* 32 bits dma tx engine registers */
56 dma32regs_t *d32rxregs; /* 32 bits dma rx engine registers */
57 dma64regs_t *d64txregs; /* 64 bits dma tx engine registers */
58 dma64regs_t *d64rxregs; /* 64 bits dma rx engine registers */
59
60 uint32 dma64align; /* either 8k or 4k depends on number of dd */
61 dma32dd_t *txd32; /* pointer to dma32 tx descriptor ring */
62 dma64dd_t *txd64; /* pointer to dma64 tx descriptor ring */
63 uint ntxd; /* # tx descriptors tunable */
64 uint txin; /* index of next descriptor to reclaim */
65 uint txout; /* index of next descriptor to post */
66 void **txp; /* pointer to parallel array of pointers to packets */
67 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
68 osldma_t **txp_dmah; /* DMA TX packet data handle */
69 ulong txdpa; /* physical address of descriptor ring */
70 uint txdalign; /* #bytes added to alloc'd mem to align txd */
71 uint txdalloc; /* #bytes allocated for the ring */
72
73 dma32dd_t *rxd32; /* pointer to dma32 rx descriptor ring */
74 dma64dd_t *rxd64; /* pointer to dma64 rx descriptor ring */
75 uint nrxd; /* # rx descriptors tunable */
76 uint rxin; /* index of next descriptor to reclaim */
77 uint rxout; /* index of next descriptor to post */
78 void **rxp; /* pointer to parallel array of pointers to packets */
79 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
80 osldma_t **rxp_dmah; /* DMA RX packet data handle */
81 ulong rxdpa; /* physical address of descriptor ring */
82 uint rxdalign; /* #bytes added to alloc'd mem to align rxd */
83 uint rxdalloc; /* #bytes allocated for the ring */
84
85 /* tunables */
86 uint rxbufsize; /* rx buffer size in bytes,
87 not including the extra headroom
88 */
89 uint nrxpost; /* # rx buffers to keep posted */
90 uint rxoffset; /* rxcontrol offset */
91 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
92 uint ddoffsethigh; /* high 32 bits */
93 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
94 uint dataoffsethigh; /* high 32 bits */
95 } dma_info_t;
96
97 /* descriptor bumping macros */
98 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
99 #define TXD(x) XXD((x), di->ntxd)
100 #define RXD(x) XXD((x), di->nrxd)
101 #define NEXTTXD(i) TXD(i + 1)
102 #define PREVTXD(i) TXD(i - 1)
103 #define NEXTRXD(i) RXD(i + 1)
104 #define NTXDACTIVE(h, t) TXD(t - h)
105 #define NRXDACTIVE(h, t) RXD(t - h)
106
107 /* macros to convert between byte offsets and indexes */
108 #define B2I(bytes, type) ((bytes) / sizeof(type))
109 #define I2B(index, type) ((index) * sizeof(type))
110
111 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
112 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
113
114
115 /* common prototypes */
116 static bool _dma_isaddrext(dma_info_t *di);
117 static bool dma32_alloc(dma_info_t *di, uint direction);
118 static void _dma_detach(dma_info_t *di);
119 static void _dma_ddtable_init(dma_info_t *di, uint direction, ulong pa);
120 static void _dma_rxinit(dma_info_t *di);
121 static void *_dma_rx(dma_info_t *di);
122 static void _dma_rxfill(dma_info_t *di);
123 static void _dma_rxreclaim(dma_info_t *di);
124 static void _dma_rxenable(dma_info_t *di);
125 static void * _dma_getnextrxp(dma_info_t *di, bool forceall);
126
127 static void _dma_txblock(dma_info_t *di);
128 static void _dma_txunblock(dma_info_t *di);
129 static uint _dma_txactive(dma_info_t *di);
130
131 static void* _dma_peeknexttxp(dma_info_t *di);
132 static uintptr _dma_getvar(dma_info_t *di, char *name);
133 static void _dma_counterreset(dma_info_t *di);
134 static void _dma_fifoloopbackenable(dma_info_t *di);
135
136 /* ** 32 bit DMA prototypes */
137 static bool dma32_alloc(dma_info_t *di, uint direction);
138 static bool dma32_txreset(dma_info_t *di);
139 static bool dma32_rxreset(dma_info_t *di);
140 static bool dma32_txsuspendedidle(dma_info_t *di);
141 static int dma32_txfast(dma_info_t *di, void *p0, bool commit);
142 static void *dma32_getnexttxp(dma_info_t *di, bool forceall);
143 static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
144 static void dma32_txrotate(dma_info_t *di);
145 static bool dma32_rxidle(dma_info_t *di);
146 static void dma32_txinit(dma_info_t *di);
147 static bool dma32_txenabled(dma_info_t *di);
148 static void dma32_txsuspend(dma_info_t *di);
149 static void dma32_txresume(dma_info_t *di);
150 static bool dma32_txsuspended(dma_info_t *di);
151 static void dma32_txreclaim(dma_info_t *di, bool forceall);
152 static bool dma32_txstopped(dma_info_t *di);
153 static bool dma32_rxstopped(dma_info_t *di);
154 static bool dma32_rxenabled(dma_info_t *di);
155 static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs);
156
157
158 static di_fcn_t dma32proc = {
159 (di_detach_t)_dma_detach,
160 (di_txinit_t)dma32_txinit,
161 (di_txreset_t)dma32_txreset,
162 (di_txenabled_t)dma32_txenabled,
163 (di_txsuspend_t)dma32_txsuspend,
164 (di_txresume_t)dma32_txresume,
165 (di_txsuspended_t)dma32_txsuspended,
166 (di_txsuspendedidle_t)dma32_txsuspendedidle,
167 (di_txfast_t)dma32_txfast,
168 (di_txstopped_t)dma32_txstopped,
169 (di_txreclaim_t)dma32_txreclaim,
170 (di_getnexttxp_t)dma32_getnexttxp,
171 (di_peeknexttxp_t)_dma_peeknexttxp,
172 (di_txblock_t)_dma_txblock,
173 (di_txunblock_t)_dma_txunblock,
174 (di_txactive_t)_dma_txactive,
175 (di_txrotate_t)dma32_txrotate,
176
177 (di_rxinit_t)_dma_rxinit,
178 (di_rxreset_t)dma32_rxreset,
179 (di_rxidle_t)dma32_rxidle,
180 (di_rxstopped_t)dma32_rxstopped,
181 (di_rxenable_t)_dma_rxenable,
182 (di_rxenabled_t)dma32_rxenabled,
183 (di_rx_t)_dma_rx,
184 (di_rxfill_t)_dma_rxfill,
185 (di_rxreclaim_t)_dma_rxreclaim,
186 (di_getnextrxp_t)_dma_getnextrxp,
187
188 (di_fifoloopbackenable_t)_dma_fifoloopbackenable,
189 (di_getvar_t)_dma_getvar,
190 (di_counterreset_t)_dma_counterreset,
191
192 NULL,
193 NULL,
194 NULL,
195 34
196 };
197
198 hnddma_t *
199 dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx,
200 uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level)
201 {
202 dma_info_t *di;
203 uint size;
204
205 /* allocate private info structure */
206 if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) {
207 return (NULL);
208 }
209 bzero((char *)di, sizeof(dma_info_t));
210
211 di->msg_level = msg_level ? msg_level : &dma_msg_level;
212
213 /* old chips w/o sb is no longer supported */
214 ASSERT(sbh != NULL);
215
216 /* check arguments */
217 ASSERT(ISPOWEROF2(ntxd));
218 ASSERT(ISPOWEROF2(nrxd));
219 if (nrxd == 0)
220 ASSERT(dmaregsrx == NULL);
221 if (ntxd == 0)
222 ASSERT(dmaregstx == NULL);
223
224
225 /* init dma reg pointer */
226 ASSERT(ntxd <= D32MAXDD);
227 ASSERT(nrxd <= D32MAXDD);
228 di->d32txregs = (dma32regs_t *)dmaregstx;
229 di->d32rxregs = (dma32regs_t *)dmaregsrx;
230
231 DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d "
232 "rxoffset %d dmaregstx %p dmaregsrx %p\n",
233 name, "DMA32", osh, ntxd, nrxd, rxbufsize,
234 nrxpost, rxoffset, dmaregstx, dmaregsrx));
235
236 /* make a private copy of our callers name */
237 strncpy(di->name, name, MAXNAMEL);
238 di->name[MAXNAMEL-1] = '\0';
239
240 di->osh = osh;
241 di->sbh = sbh;
242
243 /* save tunables */
244 di->ntxd = ntxd;
245 di->nrxd = nrxd;
246
247 /* the actual dma size doesn't include the extra headroom */
248 if (rxbufsize > BCMEXTRAHDROOM)
249 di->rxbufsize = rxbufsize - BCMEXTRAHDROOM;
250 else
251 di->rxbufsize = rxbufsize;
252
253 di->nrxpost = nrxpost;
254 di->rxoffset = rxoffset;
255
256 /*
257 * figure out the DMA physical address offset for dd and data
258 * for old chips w/o sb, use zero
259 * for new chips w sb,
260 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
261 * Other bus: use zero
262 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
263 */
264 di->ddoffsetlow = 0;
265 di->dataoffsetlow = 0;
266 /* for pci bus, add offset */
267 if (sbh->bustype == PCI_BUS) {
268 di->ddoffsetlow = SB_PCI_DMA;
269 di->ddoffsethigh = 0;
270 di->dataoffsetlow = di->ddoffsetlow;
271 di->dataoffsethigh = di->ddoffsethigh;
272 }
273
274 #if defined(__mips__) && defined(IL_BIGENDIAN)
275 di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED;
276 #endif
277
278 di->addrext = _dma_isaddrext(di);
279
280 /* allocate tx packet pointer vector */
281 if (ntxd) {
282 size = ntxd * sizeof(void *);
283 if ((di->txp = MALLOC(osh, size)) == NULL) {
284 DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n",
285 di->name, MALLOCED(osh)));
286 goto fail;
287 }
288 bzero((char *)di->txp, size);
289 }
290
291 /* allocate rx packet pointer vector */
292 if (nrxd) {
293 size = nrxd * sizeof(void *);
294 if ((di->rxp = MALLOC(osh, size)) == NULL) {
295 DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n",
296 di->name, MALLOCED(osh)));
297 goto fail;
298 }
299 bzero((char *)di->rxp, size);
300 }
301
302 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
303 if (ntxd) {
304 if (!dma32_alloc(di, DMA_TX))
305 goto fail;
306 }
307
308 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
309 if (nrxd) {
310 if (!dma32_alloc(di, DMA_RX))
311 goto fail;
312 }
313
314 if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) {
315 DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
316 di->name, di->txdpa));
317 goto fail;
318 }
319 if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) {
320 DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
321 di->name, di->rxdpa));
322 goto fail;
323 }
324
325 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
326 "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow,
327 di->dataoffsethigh, di->addrext));
328
329 /* allocate tx packet pointer vector and DMA mapping vectors */
330 if (ntxd) {
331
332 size = ntxd * sizeof(osldma_t **);
333 if ((di->txp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL)
334 goto fail;
335 bzero((char*)di->txp_dmah, size);
336 }else
337 di->txp_dmah = NULL;
338
339 /* allocate rx packet pointer vector and DMA mapping vectors */
340 if (nrxd) {
341
342 size = nrxd * sizeof(osldma_t **);
343 if ((di->rxp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL)
344 goto fail;
345 bzero((char*)di->rxp_dmah, size);
346
347 } else
348 di->rxp_dmah = NULL;
349
350 /* initialize opsvec of function pointers */
351 di->hnddma.di_fn = dma32proc;
352
353 return ((hnddma_t *)di);
354
355 fail:
356 _dma_detach(di);
357 return (NULL);
358 }
359
360 /* init the tx or rx descriptor */
361 static INLINE void
362 dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, ulong pa, uint outidx, uint32 *flags,
363 uint32 bufcount)
364 {
365 /* dma32 uses 32 bits control to fit both flags and bufcounter */
366 *flags = *flags | (bufcount & CTRL_BC_MASK);
367
368 if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
369 W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + di->dataoffsetlow));
370 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
371 } else {
372 /* address extension */
373 uint32 ae;
374 ASSERT(di->addrext);
375 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
376 pa &= ~PCI32ADDR_HIGH;
377
378 *flags |= (ae << CTRL_AE_SHIFT);
379 W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + di->dataoffsetlow));
380 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
381 }
382 }
383
384 static bool
385 _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs)
386 {
387 uint32 w;
388
389 OR_REG(osh, &dma32regs->control, XC_AE);
390 w = R_REG(osh, &dma32regs->control);
391 AND_REG(osh, &dma32regs->control, ~XC_AE);
392 return ((w & XC_AE) == XC_AE);
393 }
394
395 /* !! may be called with core in reset */
396 static void
397 _dma_detach(dma_info_t *di)
398 {
399 if (di == NULL)
400 return;
401
402 DMA_TRACE(("%s: dma_detach\n", di->name));
403
404 /* shouldn't be here if descriptors are unreclaimed */
405 ASSERT(di->txin == di->txout);
406 ASSERT(di->rxin == di->rxout);
407
408 /* free dma descriptor rings */
409 if (di->txd32)
410 DMA_FREE_CONSISTENT(di->osh, ((int8*)di->txd32 - di->txdalign),
411 di->txdalloc, (di->txdpa - di->txdalign), &di->tx_dmah);
412 if (di->rxd32)
413 DMA_FREE_CONSISTENT(di->osh, ((int8*)di->rxd32 - di->rxdalign),
414 di->rxdalloc, (di->rxdpa - di->rxdalign), &di->rx_dmah);
415
416 /* free packet pointer vectors */
417 if (di->txp)
418 MFREE(di->osh, (void *)di->txp, (di->ntxd * sizeof(void *)));
419 if (di->rxp)
420 MFREE(di->osh, (void *)di->rxp, (di->nrxd * sizeof(void *)));
421
422 /* free tx packet DMA handles */
423 if (di->txp_dmah)
424 MFREE(di->osh, (void *)di->txp_dmah, di->ntxd * sizeof(osldma_t **));
425
426 /* free rx packet DMA handles */
427 if (di->rxp_dmah)
428 MFREE(di->osh, (void *)di->rxp_dmah, di->nrxd * sizeof(osldma_t **));
429
430 /* free our private info structure */
431 MFREE(di->osh, (void *)di, sizeof(dma_info_t));
432
433 }
434
435 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
436 static bool
437 _dma_isaddrext(dma_info_t *di)
438 {
439 if (di->d32txregs)
440 return (_dma32_addrext(di->osh, di->d32txregs));
441 else if (di->d32rxregs)
442 return (_dma32_addrext(di->osh, di->d32rxregs));
443 return FALSE;
444 }
445
446 /* initialize descriptor table base address */
447 static void
448 _dma_ddtable_init(dma_info_t *di, uint direction, ulong pa)
449 {
450 if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
451 if (direction == DMA_TX)
452 W_REG(di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
453 else
454 W_REG(di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
455 } else {
456 /* dma32 address extension */
457 uint32 ae;
458 ASSERT(di->addrext);
459
460 /* shift the high bit(s) from pa to ae */
461 ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
462 pa &= ~PCI32ADDR_HIGH;
463
464 if (direction == DMA_TX) {
465 W_REG(di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
466 SET_REG(di->osh, &di->d32txregs->control, XC_AE, ae <<XC_AE_SHIFT);
467 } else {
468 W_REG(di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
469 SET_REG(di->osh, &di->d32rxregs->control, RC_AE, ae <<RC_AE_SHIFT);
470 }
471 }
472 }
473
474 static void
475 _dma_fifoloopbackenable(dma_info_t *di)
476 {
477 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
478 OR_REG(di->osh, &di->d32txregs->control, XC_LE);
479 }
480
481 static void
482 _dma_rxinit(dma_info_t *di)
483 {
484 DMA_TRACE(("%s: dma_rxinit\n", di->name));
485
486 if (di->nrxd == 0)
487 return;
488
489 di->rxin = di->rxout = 0;
490
491 /* clear rx descriptor ring */
492 BZERO_SM((void *)di->rxd32, (di->nrxd * sizeof(dma32dd_t)));
493 _dma_rxenable(di);
494 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
495 }
496
497 static void
498 _dma_rxenable(dma_info_t *di)
499 {
500 DMA_TRACE(("%s: dma_rxenable\n", di->name));
501
502 W_REG(di->osh, &di->d32rxregs->control, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
503 }
504
505 /* !! rx entry routine, returns a pointer to the next frame received,
506 * or NULL if there are no more
507 */
508 static void *
509 _dma_rx(dma_info_t *di)
510 {
511 void *p;
512 uint len;
513 int skiplen = 0;
514
515 while ((p = _dma_getnextrxp(di, FALSE))) {
516 /* skip giant packets which span multiple rx descriptors */
517 if (skiplen > 0) {
518 skiplen -= di->rxbufsize;
519 if (skiplen < 0)
520 skiplen = 0;
521 PKTFREE(di->osh, p, FALSE);
522 continue;
523 }
524
525 len = ltoh16(*(uint16*)(PKTDATA(di->osh, p)));
526 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
527
528 /* bad frame length check */
529 if (len > (di->rxbufsize - di->rxoffset)) {
530 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len));
531 if (len > 0)
532 skiplen = len - (di->rxbufsize - di->rxoffset);
533 PKTFREE(di->osh, p, FALSE);
534 di->hnddma.rxgiants++;
535 continue;
536 }
537
538 /* set actual length */
539 PKTSETLEN(di->osh, p, (di->rxoffset + len));
540
541 break;
542 }
543
544 return (p);
545 }
546
547 /* post receive buffers */
548 static void
549 _dma_rxfill(dma_info_t *di)
550 {
551 void *p;
552 uint rxin, rxout;
553 uint32 flags = 0;
554 uint n;
555 uint i;
556 uint32 pa;
557 uint extra_offset = 0;
558
559 /*
560 * Determine how many receive buffers we're lacking
561 * from the full complement, allocate, initialize,
562 * and post them, then update the chip rx lastdscr.
563 */
564
565 rxin = di->rxin;
566 rxout = di->rxout;
567
568 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
569
570 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
571
572 if (di->rxbufsize > BCMEXTRAHDROOM)
573 extra_offset = BCMEXTRAHDROOM;
574
575 for (i = 0; i < n; i++) {
576 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
577 size to be allocated
578 */
579 if ((p = PKTGET(di->osh, di->rxbufsize + extra_offset,
580 FALSE)) == NULL) {
581 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
582 di->hnddma.rxnobuf++;
583 break;
584 }
585 /* reserve an extra headroom, if applicable */
586 if (extra_offset)
587 PKTPULL(di->osh, p, extra_offset);
588
589 /* Do a cached write instead of uncached write since DMA_MAP
590 * will flush the cache.
591 */
592 *(uint32*)(PKTDATA(di->osh, p)) = 0;
593
594 pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->osh, p),
595 di->rxbufsize, DMA_RX, p);
596
597 ASSERT(ISALIGNED(pa, 4));
598
599 /* save the free packet pointer */
600 ASSERT(di->rxp[rxout] == NULL);
601 di->rxp[rxout] = p;
602
603 /* reset flags for each descriptor */
604 flags = 0;
605 if (rxout == (di->nrxd - 1))
606 flags = CTRL_EOT;
607 dma32_dd_upd(di, di->rxd32, pa, rxout, &flags, di->rxbufsize);
608 rxout = NEXTRXD(rxout);
609 }
610
611 di->rxout = rxout;
612
613 /* update the chip lastdscr pointer */
614 W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
615 }
616
617 /* like getnexttxp but no reclaim */
618 static void *
619 _dma_peeknexttxp(dma_info_t *di)
620 {
621 uint end, i;
622
623 if (di->ntxd == 0)
624 return (NULL);
625
626 end = B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
627
628 for (i = di->txin; i != end; i = NEXTTXD(i))
629 if (di->txp[i])
630 return (di->txp[i]);
631
632 return (NULL);
633 }
634
635 static void
636 _dma_rxreclaim(dma_info_t *di)
637 {
638 void *p;
639
640 /* "unused local" warning suppression for OSLs that
641 * define PKTFREE() without using the di->osh arg
642 */
643 di = di;
644
645 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
646
647 while ((p = _dma_getnextrxp(di, TRUE)))
648 PKTFREE(di->osh, p, FALSE);
649 }
650
651 static void *
652 _dma_getnextrxp(dma_info_t *di, bool forceall)
653 {
654 if (di->nrxd == 0)
655 return (NULL);
656
657 return dma32_getnextrxp(di, forceall);
658 }
659
660 static void
661 _dma_txblock(dma_info_t *di)
662 {
663 di->hnddma.txavail = 0;
664 }
665
666 static void
667 _dma_txunblock(dma_info_t *di)
668 {
669 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
670 }
671
672 static uint
673 _dma_txactive(dma_info_t *di)
674 {
675 return (NTXDACTIVE(di->txin, di->txout));
676 }
677
678 static void
679 _dma_counterreset(dma_info_t *di)
680 {
681 /* reset all software counter */
682 di->hnddma.rxgiants = 0;
683 di->hnddma.rxnobuf = 0;
684 di->hnddma.txnobuf = 0;
685 }
686
687 /* get the address of the var in order to change later */
688 static uintptr
689 _dma_getvar(dma_info_t *di, char *name)
690 {
691 if (!strcmp(name, "&txavail"))
692 return ((uintptr) &(di->hnddma.txavail));
693 else {
694 ASSERT(0);
695 }
696 return (0);
697 }
698
699 void
700 dma_txpioloopback(osl_t *osh, dma32regs_t *regs)
701 {
702 OR_REG(osh, &regs->control, XC_LE);
703 }
704
705
706
707 /* 32 bits DMA functions */
708 static void
709 dma32_txinit(dma_info_t *di)
710 {
711 DMA_TRACE(("%s: dma_txinit\n", di->name));
712
713 if (di->ntxd == 0)
714 return;
715
716 di->txin = di->txout = 0;
717 di->hnddma.txavail = di->ntxd - 1;
718
719 /* clear tx descriptor ring */
720 BZERO_SM((void *)di->txd32, (di->ntxd * sizeof(dma32dd_t)));
721 W_REG(di->osh, &di->d32txregs->control, XC_XE);
722 _dma_ddtable_init(di, DMA_TX, di->txdpa);
723 }
724
725 static bool
726 dma32_txenabled(dma_info_t *di)
727 {
728 uint32 xc;
729
730 /* If the chip is dead, it is not enabled :-) */
731 xc = R_REG(di->osh, &di->d32txregs->control);
732 return ((xc != 0xffffffff) && (xc & XC_XE));
733 }
734
735 static void
736 dma32_txsuspend(dma_info_t *di)
737 {
738 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
739
740 if (di->ntxd == 0)
741 return;
742
743 OR_REG(di->osh, &di->d32txregs->control, XC_SE);
744 }
745
746 static void
747 dma32_txresume(dma_info_t *di)
748 {
749 DMA_TRACE(("%s: dma_txresume\n", di->name));
750
751 if (di->ntxd == 0)
752 return;
753
754 AND_REG(di->osh, &di->d32txregs->control, ~XC_SE);
755 }
756
757 static bool
758 dma32_txsuspended(dma_info_t *di)
759 {
760 return (di->ntxd == 0) || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
761 }
762
763 static void
764 dma32_txreclaim(dma_info_t *di, bool forceall)
765 {
766 void *p;
767
768 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
769
770 while ((p = dma32_getnexttxp(di, forceall)))
771 PKTFREE(di->osh, p, TRUE);
772 }
773
774 static bool
775 dma32_txstopped(dma_info_t *di)
776 {
777 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) == XS_XS_STOPPED);
778 }
779
780 static bool
781 dma32_rxstopped(dma_info_t *di)
782 {
783 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) == RS_RS_STOPPED);
784 }
785
786 static bool
787 dma32_alloc(dma_info_t *di, uint direction)
788 {
789 uint size;
790 uint ddlen;
791 void *va;
792
793 ddlen = sizeof(dma32dd_t);
794
795 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
796
797 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, D32RINGALIGN))
798 size += D32RINGALIGN;
799
800
801 if (direction == DMA_TX) {
802 if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa, &di->tx_dmah)) == NULL) {
803 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
804 di->name));
805 return FALSE;
806 }
807
808 di->txd32 = (dma32dd_t *) ROUNDUP((uintptr)va, D32RINGALIGN);
809 di->txdalign = (uint)((int8*)di->txd32 - (int8*)va);
810 di->txdpa += di->txdalign;
811 di->txdalloc = size;
812 ASSERT(ISALIGNED((uintptr)di->txd32, D32RINGALIGN));
813 } else {
814 if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa, &di->rx_dmah)) == NULL) {
815 DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
816 di->name));
817 return FALSE;
818 }
819 di->rxd32 = (dma32dd_t *) ROUNDUP((uintptr)va, D32RINGALIGN);
820 di->rxdalign = (uint)((int8*)di->rxd32 - (int8*)va);
821 di->rxdpa += di->rxdalign;
822 di->rxdalloc = size;
823 ASSERT(ISALIGNED((uintptr)di->rxd32, D32RINGALIGN));
824 }
825
826 return TRUE;
827 }
828
829 static bool
830 dma32_txreset(dma_info_t *di)
831 {
832 uint32 status;
833
834 if (di->ntxd == 0)
835 return TRUE;
836
837 /* suspend tx DMA first */
838 W_REG(di->osh, &di->d32txregs->control, XC_SE);
839 SPINWAIT(((status = (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK))
840 != XS_XS_DISABLED) &&
841 (status != XS_XS_IDLE) &&
842 (status != XS_XS_STOPPED),
843 (10000));
844
845 W_REG(di->osh, &di->d32txregs->control, 0);
846 SPINWAIT(((status = (R_REG(di->osh,
847 &di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED),
848 10000);
849
850 /* wait for the last transaction to complete */
851 OSL_DELAY(300);
852
853 return (status == XS_XS_DISABLED);
854 }
855
856 static bool
857 dma32_rxidle(dma_info_t *di)
858 {
859 DMA_TRACE(("%s: dma_rxidle\n", di->name));
860
861 if (di->nrxd == 0)
862 return TRUE;
863
864 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
865 R_REG(di->osh, &di->d32rxregs->ptr));
866 }
867
868 static bool
869 dma32_rxreset(dma_info_t *di)
870 {
871 uint32 status;
872
873 if (di->nrxd == 0)
874 return TRUE;
875
876 W_REG(di->osh, &di->d32rxregs->control, 0);
877 SPINWAIT(((status = (R_REG(di->osh,
878 &di->d32rxregs->status) & RS_RS_MASK)) != RS_RS_DISABLED),
879 10000);
880
881 return (status == RS_RS_DISABLED);
882 }
883
884 static bool
885 dma32_rxenabled(dma_info_t *di)
886 {
887 uint32 rc;
888
889 rc = R_REG(di->osh, &di->d32rxregs->control);
890 return ((rc != 0xffffffff) && (rc & RC_RE));
891 }
892
893 static bool
894 dma32_txsuspendedidle(dma_info_t *di)
895 {
896 if (di->ntxd == 0)
897 return TRUE;
898
899 if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE))
900 return 0;
901
902 if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
903 return 0;
904
905 OSL_DELAY(2);
906 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) == XS_XS_IDLE);
907 }
908
909 /* !! tx entry routine
910 * supports full 32bit dma engine buffer addressing so
911 * dma buffers can cross 4 Kbyte page boundaries.
912 */
913 static int
914 dma32_txfast(dma_info_t *di, void *p0, bool commit)
915 {
916 void *p, *next;
917 uchar *data;
918 uint len;
919 uint txout;
920 uint32 flags = 0;
921 uint32 pa;
922
923 DMA_TRACE(("%s: dma_txfast\n", di->name));
924
925 txout = di->txout;
926
927 /*
928 * Walk the chain of packet buffers
929 * allocating and initializing transmit descriptor entries.
930 */
931 for (p = p0; p; p = next) {
932 data = PKTDATA(di->osh, p);
933 len = PKTLEN(di->osh, p);
934 next = PKTNEXT(di->osh, p);
935
936 /* return nonzero if out of tx descriptors */
937 if (NEXTTXD(txout) == di->txin)
938 goto outoftxd;
939
940 if (len == 0)
941 continue;
942
943 /* get physical address of buffer start */
944 pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
945
946 flags = 0;
947 if (p == p0)
948 flags |= CTRL_SOF;
949 if (next == NULL)
950 flags |= (CTRL_IOC | CTRL_EOF);
951 if (txout == (di->ntxd - 1))
952 flags |= CTRL_EOT;
953
954 dma32_dd_upd(di, di->txd32, pa, txout, &flags, len);
955 ASSERT(di->txp[txout] == NULL);
956
957 txout = NEXTTXD(txout);
958 }
959
960 /* if last txd eof not set, fix it */
961 if (!(flags & CTRL_EOF))
962 W_SM(&di->txd32[PREVTXD(txout)].ctrl, BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF));
963
964 /* save the packet */
965 di->txp[PREVTXD(txout)] = p0;
966
967 /* bump the tx descriptor index */
968 di->txout = txout;
969
970 /* kick the chip */
971 if (commit)
972 W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t));
973
974 /* tx flow control */
975 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
976
977 return (0);
978
979 outoftxd:
980 DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
981 PKTFREE(di->osh, p0, TRUE);
982 di->hnddma.txavail = 0;
983 di->hnddma.txnobuf++;
984 return (-1);
985 }
986
987 /*
988 * Reclaim next completed txd (txds if using chained buffers) and
989 * return associated packet.
990 * If 'force' is true, reclaim txd(s) and return associated packet
991 * regardless of the value of the hardware "curr" pointer.
992 */
993 static void *
994 dma32_getnexttxp(dma_info_t *di, bool forceall)
995 {
996 uint start, end, i;
997 void *txp;
998
999 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
1000
1001 if (di->ntxd == 0)
1002 return (NULL);
1003
1004 txp = NULL;
1005
1006 start = di->txin;
1007 if (forceall)
1008 end = di->txout;
1009 else
1010 end = B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
1011
1012 if ((start == 0) && (end > di->txout))
1013 goto bogus;
1014
1015 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1016 DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - di->dataoffsetlow),
1017 (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & CTRL_BC_MASK),
1018 DMA_TX, di->txp[i]);
1019
1020 W_SM(&di->txd32[i].addr, 0xdeadbeef);
1021 txp = di->txp[i];
1022 di->txp[i] = NULL;
1023 }
1024
1025 di->txin = i;
1026
1027 /* tx flow control */
1028 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1029
1030 return (txp);
1031
1032 bogus:
1033 /*
1034 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1035 start, end, di->txout, forceall));
1036 */
1037 return (NULL);
1038 }
1039
1040 static void *
1041 dma32_getnextrxp(dma_info_t *di, bool forceall)
1042 {
1043 uint i;
1044 void *rxp;
1045
1046 /* if forcing, dma engine must be disabled */
1047 ASSERT(!forceall || !dma32_rxenabled(di));
1048
1049 i = di->rxin;
1050
1051 /* return if no packets posted */
1052 if (i == di->rxout)
1053 return (NULL);
1054
1055 /* ignore curr if forceall */
1056 if (!forceall && (i == B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t)))
1057 return (NULL);
1058
1059 /* get the packet pointer that corresponds to the rx descriptor */
1060 rxp = di->rxp[i];
1061 ASSERT(rxp);
1062 di->rxp[i] = NULL;
1063
1064 /* clear this packet from the descriptor ring */
1065 DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - di->dataoffsetlow),
1066 di->rxbufsize, DMA_RX, rxp);
1067
1068 W_SM(&di->rxd32[i].addr, 0xdeadbeef);
1069
1070 di->rxin = NEXTRXD(i);
1071
1072 return (rxp);
1073 }
1074
1075 /*
1076 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1077 */
1078 static void
1079 dma32_txrotate(dma_info_t *di)
1080 {
1081 uint ad;
1082 uint nactive;
1083 uint rot;
1084 uint old, new;
1085 uint32 w;
1086 uint first, last;
1087
1088 ASSERT(dma32_txsuspendedidle(di));
1089
1090 nactive = _dma_txactive(di);
1091 ad = B2I(((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK) >> XS_AD_SHIFT), dma32dd_t);
1092 rot = TXD(ad - di->txin);
1093
1094 ASSERT(rot < di->ntxd);
1095
1096 /* full-ring case is a lot harder - don't worry about this */
1097 if (rot >= (di->ntxd - nactive)) {
1098 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1099 return;
1100 }
1101
1102 first = di->txin;
1103 last = PREVTXD(di->txout);
1104
1105 /* move entries starting at last and moving backwards to first */
1106 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1107 new = TXD(old + rot);
1108
1109 /*
1110 * Move the tx dma descriptor.
1111 * EOT is set only in the last entry in the ring.
1112 */
1113 w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT;
1114 if (new == (di->ntxd - 1))
1115 w |= CTRL_EOT;
1116 W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w));
1117 W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
1118
1119 /* zap the old tx dma descriptor address field */
1120 W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef));
1121
1122 /* move the corresponding txp[] entry */
1123 ASSERT(di->txp[new] == NULL);
1124 di->txp[new] = di->txp[old];
1125 di->txp[old] = NULL;
1126 }
1127
1128 /* update txin and txout */
1129 di->txin = ad;
1130 di->txout = TXD(di->txout + rot);
1131 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1132
1133 /* kick the chip */
1134 W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
1135 }
1136
1137
1138 uint
1139 dma_addrwidth(sb_t *sbh, void *dmaregs)
1140 {
1141 dma32regs_t *dma32regs;
1142 osl_t *osh;
1143
1144 osh = sb_osh(sbh);
1145
1146 /* Start checking for 32-bit / 30-bit addressing */
1147 dma32regs = (dma32regs_t *)dmaregs;
1148
1149 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
1150 if ((BUSTYPE(sbh->bustype) == SB_BUS) ||
1151 ((BUSTYPE(sbh->bustype) == PCI_BUS) && sbh->buscoretype == SB_PCIE) ||
1152 (_dma32_addrext(osh, dma32regs)))
1153 return (DMADDRWIDTH_32);
1154
1155 /* Fallthru */
1156 return (DMADDRWIDTH_30);
1157 }
This page took 0.101247 seconds and 5 git commands to generate.