2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright 2007, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
18 #include "linux_osl.h"
19 #include <bcmendian.h>
30 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
31 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
33 #define DMA_ERROR(args)
34 #define DMA_TRACE(args)
37 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
38 static uint dma_msg_level
= 0;
40 #define MAXNAMEL 8 /* 8 char names */
42 #define DI_INFO(dmah) (dma_info_t *)dmah
43 typedef struct osl_dmainfo osldma_t
;
45 /* dma engine software state */
46 typedef struct dma_info
48 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
49 * which could be const
51 uint
*msg_level
; /* message level pointer */
52 char name
[MAXNAMEL
]; /* callers name for diag msgs */
54 void *osh
; /* os handle */
55 sb_t
*sbh
; /* sb handle */
57 bool dma64
; /* dma64 enabled */
58 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
60 dma32regs_t
*d32txregs
; /* 32 bits dma tx engine registers */
61 dma32regs_t
*d32rxregs
; /* 32 bits dma rx engine registers */
62 dma64regs_t
*d64txregs
; /* 64 bits dma tx engine registers */
63 dma64regs_t
*d64rxregs
; /* 64 bits dma rx engine registers */
65 uint32 dma64align
; /* either 8k or 4k depends on number of dd */
66 dma32dd_t
*txd32
; /* pointer to dma32 tx descriptor ring */
67 dma64dd_t
*txd64
; /* pointer to dma64 tx descriptor ring */
68 uint ntxd
; /* # tx descriptors tunable */
69 uint txin
; /* index of next descriptor to reclaim */
70 uint txout
; /* index of next descriptor to post */
71 void **txp
; /* pointer to parallel array of pointers to packets */
72 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
73 osldma_t
**txp_dmah
; /* DMA TX packet data handle */
74 ulong txdpa
; /* physical address of descriptor ring */
75 uint txdalign
; /* #bytes added to alloc'd mem to align txd */
76 uint txdalloc
; /* #bytes allocated for the ring */
78 dma32dd_t
*rxd32
; /* pointer to dma32 rx descriptor ring */
79 dma64dd_t
*rxd64
; /* pointer to dma64 rx descriptor ring */
80 uint nrxd
; /* # rx descriptors tunable */
81 uint rxin
; /* index of next descriptor to reclaim */
82 uint rxout
; /* index of next descriptor to post */
83 void **rxp
; /* pointer to parallel array of pointers to packets */
84 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
85 osldma_t
**rxp_dmah
; /* DMA RX packet data handle */
86 ulong rxdpa
; /* physical address of descriptor ring */
87 uint rxdalign
; /* #bytes added to alloc'd mem to align rxd */
88 uint rxdalloc
; /* #bytes allocated for the ring */
91 uint rxbufsize
; /* rx buffer size in bytes,
92 not including the extra headroom
94 uint nrxpost
; /* # rx buffers to keep posted */
95 uint rxoffset
; /* rxcontrol offset */
96 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
97 uint ddoffsethigh
; /* high 32 bits */
98 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
99 uint dataoffsethigh
; /* high 32 bits */
103 #define DMA64_ENAB(di) ((di)->dma64)
104 #define DMA64_CAP TRUE
106 #define DMA64_ENAB(di) (0)
107 #define DMA64_CAP FALSE
110 /* descriptor bumping macros */
111 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
112 #define TXD(x) XXD((x), di->ntxd)
113 #define RXD(x) XXD((x), di->nrxd)
114 #define NEXTTXD(i) TXD(i + 1)
115 #define PREVTXD(i) TXD(i - 1)
116 #define NEXTRXD(i) RXD(i + 1)
117 #define NTXDACTIVE(h, t) TXD(t - h)
118 #define NRXDACTIVE(h, t) RXD(t - h)
120 /* macros to convert between byte offsets and indexes */
121 #define B2I(bytes, type) ((bytes) / sizeof(type))
122 #define I2B(index, type) ((index) * sizeof(type))
124 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
125 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
128 /* common prototypes */
129 static bool _dma_isaddrext (dma_info_t
* di
);
130 static bool _dma_alloc (dma_info_t
* di
, uint direction
);
131 static void _dma_detach (dma_info_t
* di
);
132 static void _dma_ddtable_init (dma_info_t
* di
, uint direction
, ulong pa
);
133 static void _dma_rxinit (dma_info_t
* di
);
134 static void *_dma_rx (dma_info_t
* di
);
135 static void _dma_rxfill (dma_info_t
* di
);
136 static void _dma_rxreclaim (dma_info_t
* di
);
137 static void _dma_rxenable (dma_info_t
* di
);
138 static void *_dma_getnextrxp (dma_info_t
* di
, bool forceall
);
140 static void _dma_txblock (dma_info_t
* di
);
141 static void _dma_txunblock (dma_info_t
* di
);
142 static uint
_dma_txactive (dma_info_t
* di
);
144 static void *_dma_peeknexttxp (dma_info_t
* di
);
145 static uintptr
_dma_getvar (dma_info_t
* di
, const char *name
);
146 static void _dma_counterreset (dma_info_t
* di
);
147 static void _dma_fifoloopbackenable (dma_info_t
* di
);
149 /* ** 32 bit DMA prototypes */
150 static bool dma32_alloc (dma_info_t
* di
, uint direction
);
151 static bool dma32_txreset (dma_info_t
* di
);
152 static bool dma32_rxreset (dma_info_t
* di
);
153 static bool dma32_txsuspendedidle (dma_info_t
* di
);
154 static int dma32_txfast (dma_info_t
* di
, void *p0
, bool commit
);
155 static void *dma32_getnexttxp (dma_info_t
* di
, bool forceall
);
156 static void *dma32_getnextrxp (dma_info_t
* di
, bool forceall
);
157 static void dma32_txrotate (dma_info_t
* di
);
158 static bool dma32_rxidle (dma_info_t
* di
);
159 static void dma32_txinit (dma_info_t
* di
);
160 static bool dma32_txenabled (dma_info_t
* di
);
161 static void dma32_txsuspend (dma_info_t
* di
);
162 static void dma32_txresume (dma_info_t
* di
);
163 static bool dma32_txsuspended (dma_info_t
* di
);
164 static void dma32_txreclaim (dma_info_t
* di
, bool forceall
);
165 static bool dma32_txstopped (dma_info_t
* di
);
166 static bool dma32_rxstopped (dma_info_t
* di
);
167 static bool dma32_rxenabled (dma_info_t
* di
);
168 static bool _dma32_addrext (osl_t
* osh
, dma32regs_t
* dma32regs
);
170 /* ** 64 bit DMA prototypes and stubs */
172 static bool dma64_alloc (dma_info_t
* di
, uint direction
);
173 static bool dma64_txreset (dma_info_t
* di
);
174 static bool dma64_rxreset (dma_info_t
* di
);
175 static bool dma64_txsuspendedidle (dma_info_t
* di
);
176 static int dma64_txfast (dma_info_t
* di
, void *p0
, bool commit
);
177 static void *dma64_getnexttxp (dma_info_t
* di
, bool forceall
);
178 static void *dma64_getnextrxp (dma_info_t
* di
, bool forceall
);
179 static void dma64_txrotate (dma_info_t
* di
);
181 static bool dma64_rxidle (dma_info_t
* di
);
182 static void dma64_txinit (dma_info_t
* di
);
183 static bool dma64_txenabled (dma_info_t
* di
);
184 static void dma64_txsuspend (dma_info_t
* di
);
185 static void dma64_txresume (dma_info_t
* di
);
186 static bool dma64_txsuspended (dma_info_t
* di
);
187 static void dma64_txreclaim (dma_info_t
* di
, bool forceall
);
188 static bool dma64_txstopped (dma_info_t
* di
);
189 static bool dma64_rxstopped (dma_info_t
* di
);
190 static bool dma64_rxenabled (dma_info_t
* di
);
191 static bool _dma64_addrext (osl_t
* osh
, dma64regs_t
* dma64regs
);
195 dma64_alloc (dma_info_t
* di
, uint direction
)
200 dma64_txreset (dma_info_t
* di
)
205 dma64_rxreset (dma_info_t
* di
)
210 dma64_txsuspendedidle (dma_info_t
* di
)
215 dma64_txfast (dma_info_t
* di
, void *p0
, bool commit
)
220 dma64_getnexttxp (dma_info_t
* di
, bool forceall
)
225 dma64_getnextrxp (dma_info_t
* di
, bool forceall
)
230 dma64_txrotate (dma_info_t
* di
)
236 dma64_rxidle (dma_info_t
* di
)
241 dma64_txinit (dma_info_t
* di
)
246 dma64_txenabled (dma_info_t
* di
)
251 dma64_txsuspend (dma_info_t
* di
)
256 dma64_txresume (dma_info_t
* di
)
261 dma64_txsuspended (dma_info_t
* di
)
266 dma64_txreclaim (dma_info_t
* di
, bool forceall
)
271 dma64_txstopped (dma_info_t
* di
)
276 dma64_rxstopped (dma_info_t
* di
)
281 dma64_rxenabled (dma_info_t
* di
)
286 _dma64_addrext (osl_t
* osh
, dma64regs_t
* dma64regs
)
291 #endif /* BCMDMA64 */
294 static void dma32_dumpring (dma_info_t
* di
, struct bcmstrbuf
*b
,
295 dma32dd_t
* ring
, uint start
, uint end
,
297 static void dma32_dump (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
);
298 static void dma32_dumptx (dma_info_t
* di
, struct bcmstrbuf
*b
,
300 static void dma32_dumprx (dma_info_t
* di
, struct bcmstrbuf
*b
,
303 static void dma64_dumpring (dma_info_t
* di
, struct bcmstrbuf
*b
,
304 dma64dd_t
* ring
, uint start
, uint end
,
306 static void dma64_dump (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
);
307 static void dma64_dumptx (dma_info_t
* di
, struct bcmstrbuf
*b
,
309 static void dma64_dumprx (dma_info_t
* di
, struct bcmstrbuf
*b
,
314 static di_fcn_t dma64proc
= {
315 (di_detach_t
) _dma_detach
,
316 (di_txinit_t
) dma64_txinit
,
317 (di_txreset_t
) dma64_txreset
,
318 (di_txenabled_t
) dma64_txenabled
,
319 (di_txsuspend_t
) dma64_txsuspend
,
320 (di_txresume_t
) dma64_txresume
,
321 (di_txsuspended_t
) dma64_txsuspended
,
322 (di_txsuspendedidle_t
) dma64_txsuspendedidle
,
323 (di_txfast_t
) dma64_txfast
,
324 (di_txstopped_t
) dma64_txstopped
,
325 (di_txreclaim_t
) dma64_txreclaim
,
326 (di_getnexttxp_t
) dma64_getnexttxp
,
327 (di_peeknexttxp_t
) _dma_peeknexttxp
,
328 (di_txblock_t
) _dma_txblock
,
329 (di_txunblock_t
) _dma_txunblock
,
330 (di_txactive_t
) _dma_txactive
,
331 (di_txrotate_t
) dma64_txrotate
,
333 (di_rxinit_t
) _dma_rxinit
,
334 (di_rxreset_t
) dma64_rxreset
,
335 (di_rxidle_t
) dma64_rxidle
,
336 (di_rxstopped_t
) dma64_rxstopped
,
337 (di_rxenable_t
) _dma_rxenable
,
338 (di_rxenabled_t
) dma64_rxenabled
,
340 (di_rxfill_t
) _dma_rxfill
,
341 (di_rxreclaim_t
) _dma_rxreclaim
,
342 (di_getnextrxp_t
) _dma_getnextrxp
,
344 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
345 (di_getvar_t
) _dma_getvar
,
346 (di_counterreset_t
) _dma_counterreset
,
349 (di_dump_t
) dma64_dump
,
350 (di_dumptx_t
) dma64_dumptx
,
351 (di_dumprx_t
) dma64_dumprx
,
360 static di_fcn_t dma32proc
= {
361 (di_detach_t
) _dma_detach
,
362 (di_txinit_t
) dma32_txinit
,
363 (di_txreset_t
) dma32_txreset
,
364 (di_txenabled_t
) dma32_txenabled
,
365 (di_txsuspend_t
) dma32_txsuspend
,
366 (di_txresume_t
) dma32_txresume
,
367 (di_txsuspended_t
) dma32_txsuspended
,
368 (di_txsuspendedidle_t
) dma32_txsuspendedidle
,
369 (di_txfast_t
) dma32_txfast
,
370 (di_txstopped_t
) dma32_txstopped
,
371 (di_txreclaim_t
) dma32_txreclaim
,
372 (di_getnexttxp_t
) dma32_getnexttxp
,
373 (di_peeknexttxp_t
) _dma_peeknexttxp
,
374 (di_txblock_t
) _dma_txblock
,
375 (di_txunblock_t
) _dma_txunblock
,
376 (di_txactive_t
) _dma_txactive
,
377 (di_txrotate_t
) dma32_txrotate
,
379 (di_rxinit_t
) _dma_rxinit
,
380 (di_rxreset_t
) dma32_rxreset
,
381 (di_rxidle_t
) dma32_rxidle
,
382 (di_rxstopped_t
) dma32_rxstopped
,
383 (di_rxenable_t
) _dma_rxenable
,
384 (di_rxenabled_t
) dma32_rxenabled
,
386 (di_rxfill_t
) _dma_rxfill
,
387 (di_rxreclaim_t
) _dma_rxreclaim
,
388 (di_getnextrxp_t
) _dma_getnextrxp
,
390 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
391 (di_getvar_t
) _dma_getvar
,
392 (di_counterreset_t
) _dma_counterreset
,
395 (di_dump_t
) dma32_dump
,
396 (di_dumptx_t
) dma32_dumptx
,
397 (di_dumprx_t
) dma32_dumprx
,
407 dma_attach (osl_t
* osh
, char *name
, sb_t
* sbh
, void *dmaregstx
,
408 void *dmaregsrx
, uint ntxd
, uint nrxd
, uint rxbufsize
,
409 uint nrxpost
, uint rxoffset
, uint
* msg_level
)
414 /* allocate private info structure */
415 if ((di
= MALLOC (osh
, sizeof (dma_info_t
))) == NULL
)
418 printf ("dma_attach: out of memory, malloced %d bytes\n",
423 bzero ((char *) di
, sizeof (dma_info_t
));
425 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
427 /* old chips w/o sb is no longer supported */
428 ASSERT (sbh
!= NULL
);
430 di
->dma64
= ((sb_coreflagshi (sbh
, 0, 0) & SBTMH_DMA64
) == SBTMH_DMA64
);
435 DMA_ERROR (("dma_attach: driver doesn't have the capability to support "
441 /* check arguments */
442 ASSERT (ISPOWEROF2 (ntxd
));
443 ASSERT (ISPOWEROF2 (nrxd
));
445 ASSERT (dmaregsrx
== NULL
);
447 ASSERT (dmaregstx
== NULL
);
450 /* init dma reg pointer */
453 ASSERT (ntxd
<= D64MAXDD
);
454 ASSERT (nrxd
<= D64MAXDD
);
455 di
->d64txregs
= (dma64regs_t
*) dmaregstx
;
456 di
->d64rxregs
= (dma64regs_t
*) dmaregsrx
;
458 di
->dma64align
= D64RINGALIGN
;
459 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2))
461 /* for smaller dd table, HW relax the alignment requirement */
462 di
->dma64align
= D64RINGALIGN
/ 2;
467 ASSERT (ntxd
<= D32MAXDD
);
468 ASSERT (nrxd
<= D32MAXDD
);
469 di
->d32txregs
= (dma32regs_t
*) dmaregstx
;
470 di
->d32rxregs
= (dma32regs_t
*) dmaregsrx
;
473 DMA_TRACE (("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name
, (di
->dma64
? "DMA64" : "DMA32"), osh
, ntxd
, nrxd
, rxbufsize
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
475 /* make a private copy of our callers name */
476 strncpy (di
->name
, name
, MAXNAMEL
);
477 di
->name
[MAXNAMEL
- 1] = '\0';
486 /* the actual dma size doesn't include the extra headroom */
487 if (rxbufsize
> BCMEXTRAHDROOM
)
488 di
->rxbufsize
= rxbufsize
- BCMEXTRAHDROOM
;
490 di
->rxbufsize
= rxbufsize
;
492 di
->nrxpost
= nrxpost
;
493 di
->rxoffset
= rxoffset
;
496 * figure out the DMA physical address offset for dd and data
497 * for old chips w/o sb, use zero
498 * for new chips w sb,
499 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
500 * Other bus: use zero
501 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
504 di
->dataoffsetlow
= 0;
505 /* for pci bus, add offset */
506 if (sbh
->bustype
== PCI_BUS
)
508 if ((sbh
->buscoretype
== SB_PCIE
) && di
->dma64
)
510 /* pcie with DMA64 */
512 di
->ddoffsethigh
= SB_PCIE_DMA_H32
;
516 /* pci(DMA32/DMA64) or pcie with DMA32 */
517 di
->ddoffsetlow
= SB_PCI_DMA
;
518 di
->ddoffsethigh
= 0;
520 di
->dataoffsetlow
= di
->ddoffsetlow
;
521 di
->dataoffsethigh
= di
->ddoffsethigh
;
524 #if defined(__mips__) && defined(IL_BIGENDIAN)
525 di
->dataoffsetlow
= di
->dataoffsetlow
+ SB_SDRAM_SWAPPED
;
528 di
->addrext
= _dma_isaddrext (di
);
530 /* allocate tx packet pointer vector */
533 size
= ntxd
* sizeof (void *);
534 if ((di
->txp
= MALLOC (osh
, size
)) == NULL
)
536 DMA_ERROR (("%s: dma_attach: out of tx memory, malloced %d bytes\n",
537 di
->name
, MALLOCED (osh
)));
540 bzero ((char *) di
->txp
, size
);
543 /* allocate rx packet pointer vector */
546 size
= nrxd
* sizeof (void *);
547 if ((di
->rxp
= MALLOC (osh
, size
)) == NULL
)
549 DMA_ERROR (("%s: dma_attach: out of rx memory, malloced %d bytes\n",
550 di
->name
, MALLOCED (osh
)));
553 bzero ((char *) di
->rxp
, size
);
556 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
559 if (!_dma_alloc (di
, DMA_TX
))
563 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
566 if (!_dma_alloc (di
, DMA_RX
))
570 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->txdpa
> SB_PCI_DMA_SZ
)
573 DMA_ERROR (("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
574 di
->name
, di
->txdpa
));
577 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->rxdpa
> SB_PCI_DMA_SZ
)
580 DMA_ERROR (("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
581 di
->name
, di
->rxdpa
));
585 DMA_TRACE (("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
, di
->dataoffsethigh
, di
->addrext
));
587 /* allocate tx packet pointer vector and DMA mapping vectors */
591 size
= ntxd
* sizeof (osldma_t
**);
592 if ((di
->txp_dmah
= (osldma_t
**) MALLOC (osh
, size
)) == NULL
)
594 bzero ((char *) di
->txp_dmah
, size
);
599 /* allocate rx packet pointer vector and DMA mapping vectors */
603 size
= nrxd
* sizeof (osldma_t
**);
604 if ((di
->rxp_dmah
= (osldma_t
**) MALLOC (osh
, size
)) == NULL
)
606 bzero ((char *) di
->rxp_dmah
, size
);
612 /* initialize opsvec of function pointers */
613 di
->hnddma
.di_fn
= DMA64_ENAB (di
) ? dma64proc
: dma32proc
;
615 return ((hnddma_t
*) di
);
622 /* init the tx or rx descriptor */
624 dma32_dd_upd (dma_info_t
* di
, dma32dd_t
* ddring
, ulong pa
, uint outidx
,
625 uint32
* flags
, uint32 bufcount
)
627 /* dma32 uses 32 bits control to fit both flags and bufcounter */
628 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
630 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
))
632 W_SM (&ddring
[outidx
].addr
, BUS_SWAP32 (pa
+ di
->dataoffsetlow
));
633 W_SM (&ddring
[outidx
].ctrl
, BUS_SWAP32 (*flags
));
637 /* address extension */
639 ASSERT (di
->addrext
);
640 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
641 pa
&= ~PCI32ADDR_HIGH
;
643 *flags
|= (ae
<< CTRL_AE_SHIFT
);
644 W_SM (&ddring
[outidx
].addr
, BUS_SWAP32 (pa
+ di
->dataoffsetlow
));
645 W_SM (&ddring
[outidx
].ctrl
, BUS_SWAP32 (*flags
));
650 dma64_dd_upd (dma_info_t
* di
, dma64dd_t
* ddring
, ulong pa
, uint outidx
,
651 uint32
* flags
, uint32 bufcount
)
653 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
655 /* PCI bus with big(>1G) physical address, use address extension */
656 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
))
658 W_SM (&ddring
[outidx
].addrlow
, BUS_SWAP32 (pa
+ di
->dataoffsetlow
));
659 W_SM (&ddring
[outidx
].addrhigh
, BUS_SWAP32 (0 + di
->dataoffsethigh
));
660 W_SM (&ddring
[outidx
].ctrl1
, BUS_SWAP32 (*flags
));
661 W_SM (&ddring
[outidx
].ctrl2
, BUS_SWAP32 (ctrl2
));
665 /* address extension */
667 ASSERT (di
->addrext
);
669 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
670 pa
&= ~PCI32ADDR_HIGH
;
672 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
673 W_SM (&ddring
[outidx
].addrlow
, BUS_SWAP32 (pa
+ di
->dataoffsetlow
));
674 W_SM (&ddring
[outidx
].addrhigh
, BUS_SWAP32 (0 + di
->dataoffsethigh
));
675 W_SM (&ddring
[outidx
].ctrl1
, BUS_SWAP32 (*flags
));
676 W_SM (&ddring
[outidx
].ctrl2
, BUS_SWAP32 (ctrl2
));
681 _dma32_addrext (osl_t
* osh
, dma32regs_t
* dma32regs
)
685 OR_REG (osh
, &dma32regs
->control
, XC_AE
);
686 w
= R_REG (osh
, &dma32regs
->control
);
687 AND_REG (osh
, &dma32regs
->control
, ~XC_AE
);
688 return ((w
& XC_AE
) == XC_AE
);
692 _dma_alloc (dma_info_t
* di
, uint direction
)
696 return dma64_alloc (di
, direction
);
700 return dma32_alloc (di
, direction
);
704 /* !! may be called with core in reset */
706 _dma_detach (dma_info_t
* di
)
711 DMA_TRACE (("%s: dma_detach\n", di
->name
));
713 /* shouldn't be here if descriptors are unreclaimed */
714 ASSERT (di
->txin
== di
->txout
);
715 ASSERT (di
->rxin
== di
->rxout
);
717 /* free dma descriptor rings */
721 DMA_FREE_CONSISTENT (di
->osh
,
722 ((int8
*) (uintptr
) di
->txd64
- di
->txdalign
),
723 di
->txdalloc
, (di
->txdpa
- di
->txdalign
),
726 DMA_FREE_CONSISTENT (di
->osh
,
727 ((int8
*) (uintptr
) di
->rxd64
- di
->rxdalign
),
728 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
),
734 DMA_FREE_CONSISTENT (di
->osh
,
735 ((int8
*) (uintptr
) di
->txd32
- di
->txdalign
),
736 di
->txdalloc
, (di
->txdpa
- di
->txdalign
),
739 DMA_FREE_CONSISTENT (di
->osh
,
740 ((int8
*) (uintptr
) di
->rxd32
- di
->rxdalign
),
741 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
),
745 /* free packet pointer vectors */
747 MFREE (di
->osh
, (void *) di
->txp
, (di
->ntxd
* sizeof (void *)));
749 MFREE (di
->osh
, (void *) di
->rxp
, (di
->nrxd
* sizeof (void *)));
751 /* free tx packet DMA handles */
753 MFREE (di
->osh
, (void *) di
->txp_dmah
, di
->ntxd
* sizeof (osldma_t
**));
755 /* free rx packet DMA handles */
757 MFREE (di
->osh
, (void *) di
->rxp_dmah
, di
->nrxd
* sizeof (osldma_t
**));
759 /* free our private info structure */
760 MFREE (di
->osh
, (void *) di
, sizeof (dma_info_t
));
764 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
766 _dma_isaddrext (dma_info_t
* di
)
770 /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
772 /* not all tx or rx channel are available */
773 if (di
->d64txregs
!= NULL
)
775 if (!_dma64_addrext (di
->osh
, di
->d64txregs
))
777 DMA_ERROR (("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di
->name
));
782 else if (di
->d64rxregs
!= NULL
)
784 if (!_dma64_addrext (di
->osh
, di
->d64rxregs
))
786 DMA_ERROR (("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di
->name
));
793 else if (di
->d32txregs
)
794 return (_dma32_addrext (di
->osh
, di
->d32txregs
));
795 else if (di
->d32rxregs
)
796 return (_dma32_addrext (di
->osh
, di
->d32rxregs
));
800 /* initialize descriptor table base address */
802 _dma_ddtable_init (dma_info_t
* di
, uint direction
, ulong pa
)
807 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
))
809 if (direction
== DMA_TX
)
811 W_REG (di
->osh
, &di
->d64txregs
->addrlow
,
812 (pa
+ di
->ddoffsetlow
));
813 W_REG (di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
817 W_REG (di
->osh
, &di
->d64rxregs
->addrlow
,
818 (pa
+ di
->ddoffsetlow
));
819 W_REG (di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
824 /* DMA64 32bits address extension */
826 ASSERT (di
->addrext
);
828 /* shift the high bit(s) from pa to ae */
829 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
830 pa
&= ~PCI32ADDR_HIGH
;
832 if (direction
== DMA_TX
)
834 W_REG (di
->osh
, &di
->d64txregs
->addrlow
,
835 (pa
+ di
->ddoffsetlow
));
836 W_REG (di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
837 SET_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_AE
,
838 (ae
<< D64_XC_AE_SHIFT
));
842 W_REG (di
->osh
, &di
->d64rxregs
->addrlow
,
843 (pa
+ di
->ddoffsetlow
));
844 W_REG (di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
845 SET_REG (di
->osh
, &di
->d64rxregs
->control
, D64_RC_AE
,
846 (ae
<< D64_RC_AE_SHIFT
));
853 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
))
855 if (direction
== DMA_TX
)
856 W_REG (di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
858 W_REG (di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
862 /* dma32 address extension */
864 ASSERT (di
->addrext
);
866 /* shift the high bit(s) from pa to ae */
867 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
868 pa
&= ~PCI32ADDR_HIGH
;
870 if (direction
== DMA_TX
)
872 W_REG (di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
873 SET_REG (di
->osh
, &di
->d32txregs
->control
, XC_AE
,
878 W_REG (di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
879 SET_REG (di
->osh
, &di
->d32rxregs
->control
, RC_AE
,
887 _dma_fifoloopbackenable (dma_info_t
* di
)
889 DMA_TRACE (("%s: dma_fifoloopbackenable\n", di
->name
));
891 OR_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
893 OR_REG (di
->osh
, &di
->d32txregs
->control
, XC_LE
);
897 _dma_rxinit (dma_info_t
* di
)
899 DMA_TRACE (("%s: dma_rxinit\n", di
->name
));
904 di
->rxin
= di
->rxout
= 0;
906 /* clear rx descriptor ring */
908 BZERO_SM ((void *) (uintptr
) di
->rxd64
, (di
->nrxd
* sizeof (dma64dd_t
)));
910 BZERO_SM ((void *) (uintptr
) di
->rxd32
, (di
->nrxd
* sizeof (dma32dd_t
)));
913 _dma_ddtable_init (di
, DMA_RX
, di
->rxdpa
);
917 _dma_rxenable (dma_info_t
* di
)
919 DMA_TRACE (("%s: dma_rxenable\n", di
->name
));
922 W_REG (di
->osh
, &di
->d64rxregs
->control
,
923 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | D64_RC_RE
));
925 W_REG (di
->osh
, &di
->d32rxregs
->control
,
926 ((di
->rxoffset
<< RC_RO_SHIFT
) | RC_RE
));
929 /* !! rx entry routine, returns a pointer to the next frame received,
930 * or NULL if there are no more
933 _dma_rx (dma_info_t
* di
)
939 while ((p
= _dma_getnextrxp (di
, FALSE
)))
941 /* skip giant packets which span multiple rx descriptors */
944 skiplen
-= di
->rxbufsize
;
947 PKTFREE (di
->osh
, p
, FALSE
);
951 len
= ltoh16 (*(uint16
*) (PKTDATA (di
->osh
, p
)));
952 DMA_TRACE (("%s: dma_rx len %d\n", di
->name
, len
));
954 /* bad frame length check */
955 if (len
> (di
->rxbufsize
- di
->rxoffset
))
957 DMA_ERROR (("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
959 skiplen
= len
- (di
->rxbufsize
- di
->rxoffset
);
960 PKTFREE (di
->osh
, p
, FALSE
);
961 di
->hnddma
.rxgiants
++;
965 /* set actual length */
966 PKTSETLEN (di
->osh
, p
, (di
->rxoffset
+ len
));
974 /* post receive buffers */
976 _dma_rxfill (dma_info_t
* di
)
984 uint extra_offset
= 0;
987 * Determine how many receive buffers we're lacking
988 * from the full complement, allocate, initialize,
989 * and post them, then update the chip rx lastdscr.
995 n
= di
->nrxpost
- NRXDACTIVE (rxin
, rxout
);
997 DMA_TRACE (("%s: dma_rxfill: post %d\n", di
->name
, n
));
999 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1000 extra_offset
= BCMEXTRAHDROOM
;
1002 for (i
= 0; i
< n
; i
++)
1004 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1005 size to be allocated
1007 if ((p
= PKTGET (di
->osh
, di
->rxbufsize
+ extra_offset
, FALSE
)) == NULL
)
1009 DMA_ERROR (("%s: dma_rxfill: out of rxbufs\n", di
->name
));
1010 di
->hnddma
.rxnobuf
++;
1013 /* reserve an extra headroom, if applicable */
1015 PKTPULL (di
->osh
, p
, extra_offset
);
1017 /* Do a cached write instead of uncached write since DMA_MAP
1018 * will flush the cache.
1020 *(uint32
*) (PKTDATA (di
->osh
, p
)) = 0;
1022 pa
= (uint32
) DMA_MAP (di
->osh
, PKTDATA (di
->osh
, p
),
1023 di
->rxbufsize
, DMA_RX
, p
, &di
->rxp_dmah
[rxout
]);
1025 ASSERT (ISALIGNED (pa
, 4));
1027 /* save the free packet pointer */
1028 ASSERT (di
->rxp
[rxout
] == NULL
);
1031 /* reset flags for each descriptor */
1033 if (DMA64_ENAB (di
))
1035 if (rxout
== (di
->nrxd
- 1))
1036 flags
= D64_CTRL1_EOT
;
1038 dma64_dd_upd (di
, di
->rxd64
, pa
, rxout
, &flags
, di
->rxbufsize
);
1042 if (rxout
== (di
->nrxd
- 1))
1045 dma32_dd_upd (di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
1047 rxout
= NEXTRXD (rxout
);
1052 /* update the chip lastdscr pointer */
1053 if (DMA64_ENAB (di
))
1055 W_REG (di
->osh
, &di
->d64rxregs
->ptr
, I2B (rxout
, dma64dd_t
));
1059 W_REG (di
->osh
, &di
->d32rxregs
->ptr
, I2B (rxout
, dma32dd_t
));
1063 /* like getnexttxp but no reclaim */
1065 _dma_peeknexttxp (dma_info_t
* di
)
1072 if (DMA64_ENAB (di
))
1075 B2I (R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
,
1081 B2I (R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1084 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD (i
))
1086 return (di
->txp
[i
]);
1092 _dma_rxreclaim (dma_info_t
* di
)
1096 /* "unused local" warning suppression for OSLs that
1097 * define PKTFREE() without using the di->osh arg
1101 DMA_TRACE (("%s: dma_rxreclaim\n", di
->name
));
1103 while ((p
= _dma_getnextrxp (di
, TRUE
)))
1104 PKTFREE (di
->osh
, p
, FALSE
);
1108 _dma_getnextrxp (dma_info_t
* di
, bool forceall
)
1113 if (DMA64_ENAB (di
))
1115 return dma64_getnextrxp (di
, forceall
);
1119 return dma32_getnextrxp (di
, forceall
);
1124 _dma_txblock (dma_info_t
* di
)
1126 di
->hnddma
.txavail
= 0;
1130 _dma_txunblock (dma_info_t
* di
)
1132 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
1136 _dma_txactive (dma_info_t
* di
)
1138 return (NTXDACTIVE (di
->txin
, di
->txout
));
1142 _dma_counterreset (dma_info_t
* di
)
1144 /* reset all software counter */
1145 di
->hnddma
.rxgiants
= 0;
1146 di
->hnddma
.rxnobuf
= 0;
1147 di
->hnddma
.txnobuf
= 0;
1150 /* get the address of the var in order to change later */
1152 _dma_getvar (dma_info_t
* di
, const char *name
)
1154 if (!strcmp (name
, "&txavail"))
1155 return ((uintptr
) & (di
->hnddma
.txavail
));
1164 dma_txpioloopback (osl_t
* osh
, dma32regs_t
* regs
)
1166 OR_REG (osh
, ®s
->control
, XC_LE
);
1171 dma32_dumpring (dma_info_t
* di
, struct bcmstrbuf
*b
, dma32dd_t
* ring
,
1172 uint start
, uint end
, uint max_num
)
1176 for (i
= start
; i
!= end
; i
= XXD ((i
+ 1), max_num
))
1178 /* in the format of high->low 8 bytes */
1179 bcm_bprintf (b
, "ring index %d: 0x%x %x\n", i
, ring
[i
].addr
,
1185 dma32_dumptx (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1190 bcm_bprintf (b
, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1191 "txavail %d\n", di
->txd32
, di
->txdpa
, di
->txp
, di
->txin
,
1192 di
->txout
, di
->hnddma
.txavail
);
1194 bcm_bprintf (b
, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1195 R_REG (di
->osh
, &di
->d32txregs
->control
),
1196 R_REG (di
->osh
, &di
->d32txregs
->addr
),
1197 R_REG (di
->osh
, &di
->d32txregs
->ptr
),
1198 R_REG (di
->osh
, &di
->d32txregs
->status
));
1200 if (dumpring
&& di
->txd32
)
1201 dma32_dumpring (di
, b
, di
->txd32
, di
->txin
, di
->txout
, di
->ntxd
);
1205 dma32_dumprx (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1210 bcm_bprintf (b
, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1211 di
->rxd32
, di
->rxdpa
, di
->rxp
, di
->rxin
, di
->rxout
);
1213 bcm_bprintf (b
, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1214 R_REG (di
->osh
, &di
->d32rxregs
->control
),
1215 R_REG (di
->osh
, &di
->d32rxregs
->addr
),
1216 R_REG (di
->osh
, &di
->d32rxregs
->ptr
),
1217 R_REG (di
->osh
, &di
->d32rxregs
->status
));
1218 if (di
->rxd32
&& dumpring
)
1219 dma32_dumpring (di
, b
, di
->rxd32
, di
->rxin
, di
->rxout
, di
->nrxd
);
1223 dma32_dump (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1225 dma32_dumptx (di
, b
, dumpring
);
1226 dma32_dumprx (di
, b
, dumpring
);
1230 dma64_dumpring (dma_info_t
* di
, struct bcmstrbuf
*b
, dma64dd_t
* ring
,
1231 uint start
, uint end
, uint max_num
)
1235 for (i
= start
; i
!= end
; i
= XXD ((i
+ 1), max_num
))
1237 /* in the format of high->low 16 bytes */
1238 bcm_bprintf (b
, "ring index %d: 0x%x %x %x %x\n",
1239 i
, ring
[i
].addrhigh
, ring
[i
].addrlow
, ring
[i
].ctrl2
,
1245 dma64_dumptx (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1250 bcm_bprintf (b
, "DMA64: txd64 %p txdpa 0x%lx txp %p txin %d txout %d "
1251 "txavail %d\n", di
->txd64
, di
->txdpa
, di
->txp
, di
->txin
,
1252 di
->txout
, di
->hnddma
.txavail
);
1254 bcm_bprintf (b
, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1255 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1256 R_REG (di
->osh
, &di
->d64txregs
->control
),
1257 R_REG (di
->osh
, &di
->d64txregs
->addrlow
),
1258 R_REG (di
->osh
, &di
->d64txregs
->addrhigh
),
1259 R_REG (di
->osh
, &di
->d64txregs
->ptr
),
1260 R_REG (di
->osh
, &di
->d64txregs
->status0
),
1261 R_REG (di
->osh
, &di
->d64txregs
->status1
));
1263 if (dumpring
&& di
->txd64
)
1265 dma64_dumpring (di
, b
, di
->txd64
, di
->txin
, di
->txout
, di
->ntxd
);
1270 dma64_dumprx (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1275 bcm_bprintf (b
, "DMA64: rxd64 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1276 di
->rxd64
, di
->rxdpa
, di
->rxp
, di
->rxin
, di
->rxout
);
1278 bcm_bprintf (b
, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1279 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1280 R_REG (di
->osh
, &di
->d64rxregs
->control
),
1281 R_REG (di
->osh
, &di
->d64rxregs
->addrlow
),
1282 R_REG (di
->osh
, &di
->d64rxregs
->addrhigh
),
1283 R_REG (di
->osh
, &di
->d64rxregs
->ptr
),
1284 R_REG (di
->osh
, &di
->d64rxregs
->status0
),
1285 R_REG (di
->osh
, &di
->d64rxregs
->status1
));
1286 if (di
->rxd64
&& dumpring
)
1288 dma64_dumpring (di
, b
, di
->rxd64
, di
->rxin
, di
->rxout
, di
->nrxd
);
1293 dma64_dump (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1295 dma64_dumptx (di
, b
, dumpring
);
1296 dma64_dumprx (di
, b
, dumpring
);
1302 /* 32 bits DMA functions */
1304 dma32_txinit (dma_info_t
* di
)
1306 DMA_TRACE (("%s: dma_txinit\n", di
->name
));
1311 di
->txin
= di
->txout
= 0;
1312 di
->hnddma
.txavail
= di
->ntxd
- 1;
1314 /* clear tx descriptor ring */
1315 BZERO_SM ((void *) (uintptr
) di
->txd32
, (di
->ntxd
* sizeof (dma32dd_t
)));
1316 W_REG (di
->osh
, &di
->d32txregs
->control
, XC_XE
);
1317 _dma_ddtable_init (di
, DMA_TX
, di
->txdpa
);
1321 dma32_txenabled (dma_info_t
* di
)
1325 /* If the chip is dead, it is not enabled :-) */
1326 xc
= R_REG (di
->osh
, &di
->d32txregs
->control
);
1327 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
1331 dma32_txsuspend (dma_info_t
* di
)
1333 DMA_TRACE (("%s: dma_txsuspend\n", di
->name
));
1338 OR_REG (di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1342 dma32_txresume (dma_info_t
* di
)
1344 DMA_TRACE (("%s: dma_txresume\n", di
->name
));
1349 AND_REG (di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1353 dma32_txsuspended (dma_info_t
* di
)
1355 return (di
->ntxd
== 0)
1356 || ((R_REG (di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1360 dma32_txreclaim (dma_info_t
* di
, bool forceall
)
1364 DMA_TRACE (("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
1366 while ((p
= dma32_getnexttxp (di
, forceall
)))
1367 PKTFREE (di
->osh
, p
, TRUE
);
1371 dma32_txstopped (dma_info_t
* di
)
1373 return ((R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) ==
1378 dma32_rxstopped (dma_info_t
* di
)
1380 return ((R_REG (di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) ==
1385 dma32_alloc (dma_info_t
* di
, uint direction
)
1391 ddlen
= sizeof (dma32dd_t
);
1393 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1395 if (!ISALIGNED (DMA_CONSISTENT_ALIGN
, D32RINGALIGN
))
1396 size
+= D32RINGALIGN
;
1399 if (direction
== DMA_TX
)
1402 DMA_ALLOC_CONSISTENT (di
->osh
, size
, &di
->txdpa
,
1403 &di
->tx_dmah
)) == NULL
)
1405 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1410 di
->txd32
= (dma32dd_t
*) ROUNDUP ((uintptr
) va
, D32RINGALIGN
);
1411 di
->txdalign
= (uint
) ((int8
*) (uintptr
) di
->txd32
- (int8
*) va
);
1412 di
->txdpa
+= di
->txdalign
;
1413 di
->txdalloc
= size
;
1414 ASSERT (ISALIGNED ((uintptr
) di
->txd32
, D32RINGALIGN
));
1419 DMA_ALLOC_CONSISTENT (di
->osh
, size
, &di
->rxdpa
,
1420 &di
->rx_dmah
)) == NULL
)
1422 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1426 di
->rxd32
= (dma32dd_t
*) ROUNDUP ((uintptr
) va
, D32RINGALIGN
);
1427 di
->rxdalign
= (uint
) ((int8
*) (uintptr
) di
->rxd32
- (int8
*) va
);
1428 di
->rxdpa
+= di
->rxdalign
;
1429 di
->rxdalloc
= size
;
1430 ASSERT (ISALIGNED ((uintptr
) di
->rxd32
, D32RINGALIGN
));
1437 dma32_txreset (dma_info_t
* di
)
1444 /* suspend tx DMA first */
1445 W_REG (di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1446 SPINWAIT (((status
= (R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1447 != XS_XS_DISABLED
) &&
1448 (status
!= XS_XS_IDLE
) && (status
!= XS_XS_STOPPED
), (10000));
1450 W_REG (di
->osh
, &di
->d32txregs
->control
, 0);
1451 SPINWAIT (((status
= (R_REG (di
->osh
,
1452 &di
->d32txregs
->status
) & XS_XS_MASK
)) !=
1453 XS_XS_DISABLED
), 10000);
1455 /* wait for the last transaction to complete */
1458 return (status
== XS_XS_DISABLED
);
1462 dma32_rxidle (dma_info_t
* di
)
1464 DMA_TRACE (("%s: dma_rxidle\n", di
->name
));
1469 return ((R_REG (di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
1470 R_REG (di
->osh
, &di
->d32rxregs
->ptr
));
1474 dma32_rxreset (dma_info_t
* di
)
1481 W_REG (di
->osh
, &di
->d32rxregs
->control
, 0);
1482 SPINWAIT (((status
= (R_REG (di
->osh
,
1483 &di
->d32rxregs
->status
) & RS_RS_MASK
)) !=
1484 RS_RS_DISABLED
), 10000);
1486 return (status
== RS_RS_DISABLED
);
1490 dma32_rxenabled (dma_info_t
* di
)
1494 rc
= R_REG (di
->osh
, &di
->d32rxregs
->control
);
1495 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
1499 dma32_txsuspendedidle (dma_info_t
* di
)
1504 if (!(R_REG (di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
1507 if ((R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
1511 return ((R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) ==
1515 /* !! tx entry routine
1516 * supports full 32bit dma engine buffer addressing so
1517 * dma buffers can cross 4 Kbyte page boundaries.
1520 dma32_txfast (dma_info_t
* di
, void *p0
, bool commit
)
1529 DMA_TRACE (("%s: dma_txfast\n", di
->name
));
1534 * Walk the chain of packet buffers
1535 * allocating and initializing transmit descriptor entries.
1537 for (p
= p0
; p
; p
= next
)
1539 data
= PKTDATA (di
->osh
, p
);
1540 len
= PKTLEN (di
->osh
, p
);
1541 next
= PKTNEXT (di
->osh
, p
);
1543 /* return nonzero if out of tx descriptors */
1544 if (NEXTTXD (txout
) == di
->txin
)
1550 /* get physical address of buffer start */
1552 (uint32
) DMA_MAP (di
->osh
, data
, len
, DMA_TX
, p
,
1553 &di
->txp_dmah
[txout
]);
1559 flags
|= (CTRL_IOC
| CTRL_EOF
);
1560 if (txout
== (di
->ntxd
- 1))
1563 dma32_dd_upd (di
, di
->txd32
, pa
, txout
, &flags
, len
);
1564 ASSERT (di
->txp
[txout
] == NULL
);
1566 txout
= NEXTTXD (txout
);
1569 /* if last txd eof not set, fix it */
1570 if (!(flags
& CTRL_EOF
))
1571 W_SM (&di
->txd32
[PREVTXD (txout
)].ctrl
,
1572 BUS_SWAP32 (flags
| CTRL_IOC
| CTRL_EOF
));
1574 /* save the packet */
1575 di
->txp
[PREVTXD (txout
)] = p0
;
1577 /* bump the tx descriptor index */
1582 W_REG (di
->osh
, &di
->d32txregs
->ptr
, I2B (txout
, dma32dd_t
));
1584 /* tx flow control */
1585 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
1590 DMA_ERROR (("%s: dma_txfast: out of txds\n", di
->name
));
1591 PKTFREE (di
->osh
, p0
, TRUE
);
1592 di
->hnddma
.txavail
= 0;
1593 di
->hnddma
.txnobuf
++;
1598 * Reclaim next completed txd (txds if using chained buffers) and
1599 * return associated packet.
1600 * If 'force' is true, reclaim txd(s) and return associated packet
1601 * regardless of the value of the hardware "curr" pointer.
1604 dma32_getnexttxp (dma_info_t
* di
, bool forceall
)
1609 DMA_TRACE (("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
1621 B2I (R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1623 if ((start
== 0) && (end
> di
->txout
))
1626 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD (i
))
1629 (BUS_SWAP32 (R_SM (&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
),
1630 (BUS_SWAP32 (R_SM (&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
),
1631 DMA_TX
, di
->txp
[i
], &di
->txp_dmah
[i
]);
1633 W_SM (&di
->txd32
[i
].addr
, 0xdeadbeef);
1640 /* tx flow control */
1641 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
1647 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1648 start, end, di->txout, forceall));
1654 dma32_getnextrxp (dma_info_t
* di
, bool forceall
)
1659 /* if forcing, dma engine must be disabled */
1660 ASSERT (!forceall
|| !dma32_rxenabled (di
));
1664 /* return if no packets posted */
1668 /* ignore curr if forceall */
1671 B2I (R_REG (di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
,
1675 /* get the packet pointer that corresponds to the rx descriptor */
1680 /* clear this packet from the descriptor ring */
1682 (BUS_SWAP32 (R_SM (&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
),
1683 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
1685 W_SM (&di
->rxd32
[i
].addr
, 0xdeadbeef);
1687 di
->rxin
= NEXTRXD (i
);
1693 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1696 dma32_txrotate (dma_info_t
* di
)
1705 ASSERT (dma32_txsuspendedidle (di
));
1707 nactive
= _dma_txactive (di
);
1709 B2I (((R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >>
1710 XS_AD_SHIFT
), dma32dd_t
);
1711 rot
= TXD (ad
- di
->txin
);
1713 ASSERT (rot
< di
->ntxd
);
1715 /* full-ring case is a lot harder - don't worry about this */
1716 if (rot
>= (di
->ntxd
- nactive
))
1718 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di
->name
));
1723 last
= PREVTXD (di
->txout
);
1725 /* move entries starting at last and moving backwards to first */
1726 for (old
= last
; old
!= PREVTXD (first
); old
= PREVTXD (old
))
1728 new = TXD (old
+ rot
);
1731 * Move the tx dma descriptor.
1732 * EOT is set only in the last entry in the ring.
1734 w
= BUS_SWAP32 (R_SM (&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
1735 if (new == (di
->ntxd
- 1))
1737 W_SM (&di
->txd32
[new].ctrl
, BUS_SWAP32 (w
));
1738 W_SM (&di
->txd32
[new].addr
, R_SM (&di
->txd32
[old
].addr
));
1740 /* zap the old tx dma descriptor address field */
1741 W_SM (&di
->txd32
[old
].addr
, BUS_SWAP32 (0xdeadbeef));
1743 /* move the corresponding txp[] entry */
1744 ASSERT (di
->txp
[new] == NULL
);
1745 di
->txp
[new] = di
->txp
[old
];
1746 di
->txp
[old
] = NULL
;
1749 /* update txin and txout */
1751 di
->txout
= TXD (di
->txout
+ rot
);
1752 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
1755 W_REG (di
->osh
, &di
->d32txregs
->ptr
, I2B (di
->txout
, dma32dd_t
));
1758 /* 64 bits DMA functions */
1762 dma64_txinit (dma_info_t
* di
)
1764 DMA_TRACE (("%s: dma_txinit\n", di
->name
));
1769 di
->txin
= di
->txout
= 0;
1770 di
->hnddma
.txavail
= di
->ntxd
- 1;
1772 /* clear tx descriptor ring */
1773 BZERO_SM ((void *) (uintptr
) di
->txd64
, (di
->ntxd
* sizeof (dma64dd_t
)));
1774 W_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_XE
);
1775 _dma_ddtable_init (di
, DMA_TX
, di
->txdpa
);
1779 dma64_txenabled (dma_info_t
* di
)
1783 /* If the chip is dead, it is not enabled :-) */
1784 xc
= R_REG (di
->osh
, &di
->d64txregs
->control
);
1785 return ((xc
!= 0xffffffff) && (xc
& D64_XC_XE
));
1789 dma64_txsuspend (dma_info_t
* di
)
1791 DMA_TRACE (("%s: dma_txsuspend\n", di
->name
));
1796 OR_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
1800 dma64_txresume (dma_info_t
* di
)
1802 DMA_TRACE (("%s: dma_txresume\n", di
->name
));
1807 AND_REG (di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
1811 dma64_txsuspended (dma_info_t
* di
)
1813 return (di
->ntxd
== 0)
1814 || ((R_REG (di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
) == D64_XC_SE
);
1818 dma64_txreclaim (dma_info_t
* di
, bool forceall
)
1822 DMA_TRACE (("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
1824 while ((p
= dma64_getnexttxp (di
, forceall
)))
1825 PKTFREE (di
->osh
, p
, TRUE
);
1829 dma64_txstopped (dma_info_t
* di
)
1831 return ((R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
1832 D64_XS0_XS_STOPPED
);
1836 dma64_rxstopped (dma_info_t
* di
)
1838 return ((R_REG (di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) ==
1839 D64_RS0_RS_STOPPED
);
1843 dma64_alloc (dma_info_t
* di
, uint direction
)
1850 ddlen
= sizeof (dma64dd_t
);
1852 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1854 alignbytes
= di
->dma64align
;
1856 if (!ISALIGNED (DMA_CONSISTENT_ALIGN
, alignbytes
))
1859 if (direction
== DMA_TX
)
1862 DMA_ALLOC_CONSISTENT (di
->osh
, size
, &di
->txdpa
,
1863 &di
->tx_dmah
)) == NULL
)
1865 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1870 di
->txd64
= (dma64dd_t
*) ROUNDUP ((uintptr
) va
, alignbytes
);
1871 di
->txdalign
= (uint
) ((int8
*) (uintptr
) di
->txd64
- (int8
*) va
);
1872 di
->txdpa
+= di
->txdalign
;
1873 di
->txdalloc
= size
;
1874 ASSERT (ISALIGNED ((uintptr
) di
->txd64
, alignbytes
));
1879 DMA_ALLOC_CONSISTENT (di
->osh
, size
, &di
->rxdpa
,
1880 &di
->rx_dmah
)) == NULL
)
1882 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1886 di
->rxd64
= (dma64dd_t
*) ROUNDUP ((uintptr
) va
, alignbytes
);
1887 di
->rxdalign
= (uint
) ((int8
*) (uintptr
) di
->rxd64
- (int8
*) va
);
1888 di
->rxdpa
+= di
->rxdalign
;
1889 di
->rxdalloc
= size
;
1890 ASSERT (ISALIGNED ((uintptr
) di
->rxd64
, alignbytes
));
1897 dma64_txreset (dma_info_t
* di
)
1904 /* suspend tx DMA first */
1905 W_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
1906 SPINWAIT (((status
=
1907 (R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
1908 D64_XS0_XS_DISABLED
) && (status
!= D64_XS0_XS_IDLE
)
1909 && (status
!= D64_XS0_XS_STOPPED
), 10000);
1911 W_REG (di
->osh
, &di
->d64txregs
->control
, 0);
1912 SPINWAIT (((status
=
1913 (R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
1914 D64_XS0_XS_DISABLED
), 10000);
1916 /* wait for the last transaction to complete */
1919 return (status
== D64_XS0_XS_DISABLED
);
1923 dma64_rxidle (dma_info_t
* di
)
1925 DMA_TRACE (("%s: dma_rxidle\n", di
->name
));
1930 return ((R_REG (di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
1931 R_REG (di
->osh
, &di
->d64rxregs
->ptr
));
1935 dma64_rxreset (dma_info_t
* di
)
1942 W_REG (di
->osh
, &di
->d64rxregs
->control
, 0);
1943 SPINWAIT (((status
=
1944 (R_REG (di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
)) !=
1945 D64_RS0_RS_DISABLED
), 10000);
1947 return (status
== D64_RS0_RS_DISABLED
);
1951 dma64_rxenabled (dma_info_t
* di
)
1955 rc
= R_REG (di
->osh
, &di
->d64rxregs
->control
);
1956 return ((rc
!= 0xffffffff) && (rc
& D64_RC_RE
));
1960 dma64_txsuspendedidle (dma_info_t
* di
)
1966 if (!(R_REG (di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
1969 if ((R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
1977 /* !! tx entry routine */
1979 dma64_txfast (dma_info_t
* di
, void *p0
, bool commit
)
1988 DMA_TRACE (("%s: dma_txfast\n", di
->name
));
1993 * Walk the chain of packet buffers
1994 * allocating and initializing transmit descriptor entries.
1996 for (p
= p0
; p
; p
= next
)
1998 data
= PKTDATA (di
->osh
, p
);
1999 len
= PKTLEN (di
->osh
, p
);
2000 next
= PKTNEXT (di
->osh
, p
);
2002 /* return nonzero if out of tx descriptors */
2003 if (NEXTTXD (txout
) == di
->txin
)
2009 /* get physical address of buffer start */
2011 (uint32
) DMA_MAP (di
->osh
, data
, len
, DMA_TX
, p
,
2012 &di
->txp_dmah
[txout
]);
2016 flags
|= D64_CTRL1_SOF
;
2018 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2019 if (txout
== (di
->ntxd
- 1))
2020 flags
|= D64_CTRL1_EOT
;
2022 dma64_dd_upd (di
, di
->txd64
, pa
, txout
, &flags
, len
);
2023 ASSERT (di
->txp
[txout
] == NULL
);
2025 txout
= NEXTTXD (txout
);
2028 /* if last txd eof not set, fix it */
2029 if (!(flags
& D64_CTRL1_EOF
))
2030 W_SM (&di
->txd64
[PREVTXD (txout
)].ctrl1
,
2031 BUS_SWAP32 (flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
2033 /* save the packet */
2034 di
->txp
[PREVTXD (txout
)] = p0
;
2036 /* bump the tx descriptor index */
2041 W_REG (di
->osh
, &di
->d64txregs
->ptr
, I2B (txout
, dma64dd_t
));
2043 /* tx flow control */
2044 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
2049 DMA_ERROR (("%s: dma_txfast: out of txds\n", di
->name
));
2050 PKTFREE (di
->osh
, p0
, TRUE
);
2051 di
->hnddma
.txavail
= 0;
2052 di
->hnddma
.txnobuf
++;
2057 * Reclaim next completed txd (txds if using chained buffers) and
2058 * return associated packet.
2059 * If 'force' is true, reclaim txd(s) and return associated packet
2060 * regardless of the value of the hardware "curr" pointer.
2063 dma64_getnexttxp (dma_info_t
* di
, bool forceall
)
2068 DMA_TRACE (("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
2080 B2I (R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
,
2083 if ((start
== 0) && (end
> di
->txout
))
2086 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD (i
))
2089 (BUS_SWAP32 (R_SM (&di
->txd64
[i
].addrlow
)) -
2091 (BUS_SWAP32 (R_SM (&di
->txd64
[i
].ctrl2
)) &
2092 D64_CTRL2_BC_MASK
), DMA_TX
, di
->txp
[i
], &di
->txp_dmah
[i
]);
2094 W_SM (&di
->txd64
[i
].addrlow
, 0xdeadbeef);
2095 W_SM (&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
2103 /* tx flow control */
2104 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
2110 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2111 start, end, di->txout, forceall));
2117 dma64_getnextrxp (dma_info_t
* di
, bool forceall
)
2122 /* if forcing, dma engine must be disabled */
2123 ASSERT (!forceall
|| !dma64_rxenabled (di
));
2127 /* return if no packets posted */
2131 /* ignore curr if forceall */
2134 B2I (R_REG (di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
,
2138 /* get the packet pointer that corresponds to the rx descriptor */
2143 /* clear this packet from the descriptor ring */
2145 (BUS_SWAP32 (R_SM (&di
->rxd64
[i
].addrlow
)) - di
->dataoffsetlow
),
2146 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2148 W_SM (&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
2149 W_SM (&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
2151 di
->rxin
= NEXTRXD (i
);
2157 _dma64_addrext (osl_t
* osh
, dma64regs_t
* dma64regs
)
2160 OR_REG (osh
, &dma64regs
->control
, D64_XC_AE
);
2161 w
= R_REG (osh
, &dma64regs
->control
);
2162 AND_REG (osh
, &dma64regs
->control
, ~D64_XC_AE
);
2163 return ((w
& D64_XC_AE
) == D64_XC_AE
);
2167 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2170 dma64_txrotate (dma_info_t
* di
)
2179 ASSERT (dma64_txsuspendedidle (di
));
2181 nactive
= _dma_txactive (di
);
2183 B2I ((R_REG (di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
),
2185 rot
= TXD (ad
- di
->txin
);
2187 ASSERT (rot
< di
->ntxd
);
2189 /* full-ring case is a lot harder - don't worry about this */
2190 if (rot
>= (di
->ntxd
- nactive
))
2192 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di
->name
));
2197 last
= PREVTXD (di
->txout
);
2199 /* move entries starting at last and moving backwards to first */
2200 for (old
= last
; old
!= PREVTXD (first
); old
= PREVTXD (old
))
2202 new = TXD (old
+ rot
);
2205 * Move the tx dma descriptor.
2206 * EOT is set only in the last entry in the ring.
2208 w
= BUS_SWAP32 (R_SM (&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
2209 if (new == (di
->ntxd
- 1))
2211 W_SM (&di
->txd64
[new].ctrl1
, BUS_SWAP32 (w
));
2213 w
= BUS_SWAP32 (R_SM (&di
->txd64
[old
].ctrl2
));
2214 W_SM (&di
->txd64
[new].ctrl2
, BUS_SWAP32 (w
));
2216 W_SM (&di
->txd64
[new].addrlow
, R_SM (&di
->txd64
[old
].addrlow
));
2217 W_SM (&di
->txd64
[new].addrhigh
, R_SM (&di
->txd64
[old
].addrhigh
));
2219 /* zap the old tx dma descriptor address field */
2220 W_SM (&di
->txd64
[old
].addrlow
, BUS_SWAP32 (0xdeadbeef));
2221 W_SM (&di
->txd64
[old
].addrhigh
, BUS_SWAP32 (0xdeadbeef));
2223 /* move the corresponding txp[] entry */
2224 ASSERT (di
->txp
[new] == NULL
);
2225 di
->txp
[new] = di
->txp
[old
];
2226 di
->txp
[old
] = NULL
;
2229 /* update txin and txout */
2231 di
->txout
= TXD (di
->txout
+ rot
);
2232 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
2235 W_REG (di
->osh
, &di
->d64txregs
->ptr
, I2B (di
->txout
, dma64dd_t
));
2238 #endif /* BCMDMA64 */
2241 dma_addrwidth (sb_t
* sbh
, void *dmaregs
)
2243 dma32regs_t
*dma32regs
;
2250 /* DMA engine is 64-bit capable */
2251 if (((sb_coreflagshi (sbh
, 0, 0) & SBTMH_DMA64
) == SBTMH_DMA64
))
2253 /* backplane are 64 bits capable */
2254 if (sb_backplane64 (sbh
))
2255 /* If bus is System Backplane or PCIE then we can access 64-bits */
2256 if ((BUSTYPE (sbh
->bustype
) == SB_BUS
) ||
2257 ((BUSTYPE (sbh
->bustype
) == PCI_BUS
) &&
2258 sbh
->buscoretype
== SB_PCIE
))
2259 return (DMADDRWIDTH_64
);
2261 /* DMA64 is always 32 bits capable, AE is always TRUE */
2263 ASSERT (_dma64_addrext (osh
, (dma64regs_t
*) dmaregs
));
2265 return (DMADDRWIDTH_32
);
2269 /* Start checking for 32-bit / 30-bit addressing */
2270 dma32regs
= (dma32regs_t
*) dmaregs
;
2272 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2273 if ((BUSTYPE (sbh
->bustype
) == SB_BUS
) ||
2274 ((BUSTYPE (sbh
->bustype
) == PCI_BUS
) && sbh
->buscoretype
== SB_PCIE
) ||
2275 (_dma32_addrext (osh
, dma32regs
)))
2276 return (DMADDRWIDTH_32
);
2279 return (DMADDRWIDTH_30
);
This page took 0.147661 seconds and 5 git commands to generate.