2 * Generic Broadcom Home Networking Division (HND) DMA module.
3 * This supports the following chips: BCM42xx, 44xx, 47xx .
5 * Copyright 2007, Broadcom Corporation
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
19 #include "linux_osl.h"
20 #include <bcmendian.h>
31 #define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
32 #define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
34 #define DMA_ERROR(args)
35 #define DMA_TRACE(args)
38 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
39 static uint dma_msg_level
= 0;
41 #define MAXNAMEL 8 /* 8 char names */
43 #define DI_INFO(dmah) (dma_info_t *)dmah
44 typedef struct osl_dmainfo osldma_t
;
46 /* dma engine software state */
47 typedef struct dma_info
49 struct hnddma_pub hnddma
; /* exported structure, don't use hnddma_t,
50 * which could be const
52 uint
*msg_level
; /* message level pointer */
53 char name
[MAXNAMEL
]; /* callers name for diag msgs */
55 void *osh
; /* os handle */
56 sb_t
*sbh
; /* sb handle */
58 bool dma64
; /* dma64 enabled */
59 bool addrext
; /* this dma engine supports DmaExtendedAddrChanges */
61 dma32regs_t
*d32txregs
; /* 32 bits dma tx engine registers */
62 dma32regs_t
*d32rxregs
; /* 32 bits dma rx engine registers */
63 dma64regs_t
*d64txregs
; /* 64 bits dma tx engine registers */
64 dma64regs_t
*d64rxregs
; /* 64 bits dma rx engine registers */
66 uint32 dma64align
; /* either 8k or 4k depends on number of dd */
67 dma32dd_t
*txd32
; /* pointer to dma32 tx descriptor ring */
68 dma64dd_t
*txd64
; /* pointer to dma64 tx descriptor ring */
69 uint ntxd
; /* # tx descriptors tunable */
70 uint txin
; /* index of next descriptor to reclaim */
71 uint txout
; /* index of next descriptor to post */
72 void **txp
; /* pointer to parallel array of pointers to packets */
73 osldma_t
*tx_dmah
; /* DMA TX descriptor ring handle */
74 osldma_t
**txp_dmah
; /* DMA TX packet data handle */
75 ulong txdpa
; /* physical address of descriptor ring */
76 uint txdalign
; /* #bytes added to alloc'd mem to align txd */
77 uint txdalloc
; /* #bytes allocated for the ring */
79 dma32dd_t
*rxd32
; /* pointer to dma32 rx descriptor ring */
80 dma64dd_t
*rxd64
; /* pointer to dma64 rx descriptor ring */
81 uint nrxd
; /* # rx descriptors tunable */
82 uint rxin
; /* index of next descriptor to reclaim */
83 uint rxout
; /* index of next descriptor to post */
84 void **rxp
; /* pointer to parallel array of pointers to packets */
85 osldma_t
*rx_dmah
; /* DMA RX descriptor ring handle */
86 osldma_t
**rxp_dmah
; /* DMA RX packet data handle */
87 ulong rxdpa
; /* physical address of descriptor ring */
88 uint rxdalign
; /* #bytes added to alloc'd mem to align rxd */
89 uint rxdalloc
; /* #bytes allocated for the ring */
92 uint rxbufsize
; /* rx buffer size in bytes,
93 not including the extra headroom
95 uint nrxpost
; /* # rx buffers to keep posted */
96 uint rxoffset
; /* rxcontrol offset */
97 uint ddoffsetlow
; /* add to get dma address of descriptor ring, low 32 bits */
98 uint ddoffsethigh
; /* high 32 bits */
99 uint dataoffsetlow
; /* add to get dma address of data buffer, low 32 bits */
100 uint dataoffsethigh
; /* high 32 bits */
104 #define DMA64_ENAB(di) ((di)->dma64)
105 #define DMA64_CAP TRUE
107 #define DMA64_ENAB(di) (0)
108 #define DMA64_CAP FALSE
111 /* descriptor bumping macros */
112 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
113 #define TXD(x) XXD((x), di->ntxd)
114 #define RXD(x) XXD((x), di->nrxd)
115 #define NEXTTXD(i) TXD(i + 1)
116 #define PREVTXD(i) TXD(i - 1)
117 #define NEXTRXD(i) RXD(i + 1)
118 #define NTXDACTIVE(h, t) TXD(t - h)
119 #define NRXDACTIVE(h, t) RXD(t - h)
121 /* macros to convert between byte offsets and indexes */
122 #define B2I(bytes, type) ((bytes) / sizeof(type))
123 #define I2B(index, type) ((index) * sizeof(type))
125 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
126 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
129 /* common prototypes */
130 static bool _dma_isaddrext (dma_info_t
* di
);
131 static bool _dma_alloc (dma_info_t
* di
, uint direction
);
132 static void _dma_detach (dma_info_t
* di
);
133 static void _dma_ddtable_init (dma_info_t
* di
, uint direction
, ulong pa
);
134 static void _dma_rxinit (dma_info_t
* di
);
135 static void *_dma_rx (dma_info_t
* di
);
136 static void _dma_rxfill (dma_info_t
* di
);
137 static void _dma_rxreclaim (dma_info_t
* di
);
138 static void _dma_rxenable (dma_info_t
* di
);
139 static void *_dma_getnextrxp (dma_info_t
* di
, bool forceall
);
141 static void _dma_txblock (dma_info_t
* di
);
142 static void _dma_txunblock (dma_info_t
* di
);
143 static uint
_dma_txactive (dma_info_t
* di
);
145 static void *_dma_peeknexttxp (dma_info_t
* di
);
146 static uintptr
_dma_getvar (dma_info_t
* di
, const char *name
);
147 static void _dma_counterreset (dma_info_t
* di
);
148 static void _dma_fifoloopbackenable (dma_info_t
* di
);
150 /* ** 32 bit DMA prototypes */
151 static bool dma32_alloc (dma_info_t
* di
, uint direction
);
152 static bool dma32_txreset (dma_info_t
* di
);
153 static bool dma32_rxreset (dma_info_t
* di
);
154 static bool dma32_txsuspendedidle (dma_info_t
* di
);
155 static int dma32_txfast (dma_info_t
* di
, void *p0
, bool commit
);
156 static void *dma32_getnexttxp (dma_info_t
* di
, bool forceall
);
157 static void *dma32_getnextrxp (dma_info_t
* di
, bool forceall
);
158 static void dma32_txrotate (dma_info_t
* di
);
159 static bool dma32_rxidle (dma_info_t
* di
);
160 static void dma32_txinit (dma_info_t
* di
);
161 static bool dma32_txenabled (dma_info_t
* di
);
162 static void dma32_txsuspend (dma_info_t
* di
);
163 static void dma32_txresume (dma_info_t
* di
);
164 static bool dma32_txsuspended (dma_info_t
* di
);
165 static void dma32_txreclaim (dma_info_t
* di
, bool forceall
);
166 static bool dma32_txstopped (dma_info_t
* di
);
167 static bool dma32_rxstopped (dma_info_t
* di
);
168 static bool dma32_rxenabled (dma_info_t
* di
);
169 static bool _dma32_addrext (osl_t
* osh
, dma32regs_t
* dma32regs
);
171 /* ** 64 bit DMA prototypes and stubs */
173 static bool dma64_alloc (dma_info_t
* di
, uint direction
);
174 static bool dma64_txreset (dma_info_t
* di
);
175 static bool dma64_rxreset (dma_info_t
* di
);
176 static bool dma64_txsuspendedidle (dma_info_t
* di
);
177 static int dma64_txfast (dma_info_t
* di
, void *p0
, bool commit
);
178 static void *dma64_getnexttxp (dma_info_t
* di
, bool forceall
);
179 static void *dma64_getnextrxp (dma_info_t
* di
, bool forceall
);
180 static void dma64_txrotate (dma_info_t
* di
);
182 static bool dma64_rxidle (dma_info_t
* di
);
183 static void dma64_txinit (dma_info_t
* di
);
184 static bool dma64_txenabled (dma_info_t
* di
);
185 static void dma64_txsuspend (dma_info_t
* di
);
186 static void dma64_txresume (dma_info_t
* di
);
187 static bool dma64_txsuspended (dma_info_t
* di
);
188 static void dma64_txreclaim (dma_info_t
* di
, bool forceall
);
189 static bool dma64_txstopped (dma_info_t
* di
);
190 static bool dma64_rxstopped (dma_info_t
* di
);
191 static bool dma64_rxenabled (dma_info_t
* di
);
192 static bool _dma64_addrext (osl_t
* osh
, dma64regs_t
* dma64regs
);
196 dma64_alloc (dma_info_t
* di
, uint direction
)
201 dma64_txreset (dma_info_t
* di
)
206 dma64_rxreset (dma_info_t
* di
)
211 dma64_txsuspendedidle (dma_info_t
* di
)
216 dma64_txfast (dma_info_t
* di
, void *p0
, bool commit
)
221 dma64_getnexttxp (dma_info_t
* di
, bool forceall
)
226 dma64_getnextrxp (dma_info_t
* di
, bool forceall
)
231 dma64_txrotate (dma_info_t
* di
)
237 dma64_rxidle (dma_info_t
* di
)
242 dma64_txinit (dma_info_t
* di
)
247 dma64_txenabled (dma_info_t
* di
)
252 dma64_txsuspend (dma_info_t
* di
)
257 dma64_txresume (dma_info_t
* di
)
262 dma64_txsuspended (dma_info_t
* di
)
267 dma64_txreclaim (dma_info_t
* di
, bool forceall
)
272 dma64_txstopped (dma_info_t
* di
)
277 dma64_rxstopped (dma_info_t
* di
)
282 dma64_rxenabled (dma_info_t
* di
)
287 _dma64_addrext (osl_t
* osh
, dma64regs_t
* dma64regs
)
292 #endif /* BCMDMA64 */
295 static void dma32_dumpring (dma_info_t
* di
, struct bcmstrbuf
*b
,
296 dma32dd_t
* ring
, uint start
, uint end
,
298 static void dma32_dump (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
);
299 static void dma32_dumptx (dma_info_t
* di
, struct bcmstrbuf
*b
,
301 static void dma32_dumprx (dma_info_t
* di
, struct bcmstrbuf
*b
,
304 static void dma64_dumpring (dma_info_t
* di
, struct bcmstrbuf
*b
,
305 dma64dd_t
* ring
, uint start
, uint end
,
307 static void dma64_dump (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
);
308 static void dma64_dumptx (dma_info_t
* di
, struct bcmstrbuf
*b
,
310 static void dma64_dumprx (dma_info_t
* di
, struct bcmstrbuf
*b
,
315 static di_fcn_t dma64proc
= {
316 (di_detach_t
) _dma_detach
,
317 (di_txinit_t
) dma64_txinit
,
318 (di_txreset_t
) dma64_txreset
,
319 (di_txenabled_t
) dma64_txenabled
,
320 (di_txsuspend_t
) dma64_txsuspend
,
321 (di_txresume_t
) dma64_txresume
,
322 (di_txsuspended_t
) dma64_txsuspended
,
323 (di_txsuspendedidle_t
) dma64_txsuspendedidle
,
324 (di_txfast_t
) dma64_txfast
,
325 (di_txstopped_t
) dma64_txstopped
,
326 (di_txreclaim_t
) dma64_txreclaim
,
327 (di_getnexttxp_t
) dma64_getnexttxp
,
328 (di_peeknexttxp_t
) _dma_peeknexttxp
,
329 (di_txblock_t
) _dma_txblock
,
330 (di_txunblock_t
) _dma_txunblock
,
331 (di_txactive_t
) _dma_txactive
,
332 (di_txrotate_t
) dma64_txrotate
,
334 (di_rxinit_t
) _dma_rxinit
,
335 (di_rxreset_t
) dma64_rxreset
,
336 (di_rxidle_t
) dma64_rxidle
,
337 (di_rxstopped_t
) dma64_rxstopped
,
338 (di_rxenable_t
) _dma_rxenable
,
339 (di_rxenabled_t
) dma64_rxenabled
,
341 (di_rxfill_t
) _dma_rxfill
,
342 (di_rxreclaim_t
) _dma_rxreclaim
,
343 (di_getnextrxp_t
) _dma_getnextrxp
,
345 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
346 (di_getvar_t
) _dma_getvar
,
347 (di_counterreset_t
) _dma_counterreset
,
350 (di_dump_t
) dma64_dump
,
351 (di_dumptx_t
) dma64_dumptx
,
352 (di_dumprx_t
) dma64_dumprx
,
361 static di_fcn_t dma32proc
= {
362 (di_detach_t
) _dma_detach
,
363 (di_txinit_t
) dma32_txinit
,
364 (di_txreset_t
) dma32_txreset
,
365 (di_txenabled_t
) dma32_txenabled
,
366 (di_txsuspend_t
) dma32_txsuspend
,
367 (di_txresume_t
) dma32_txresume
,
368 (di_txsuspended_t
) dma32_txsuspended
,
369 (di_txsuspendedidle_t
) dma32_txsuspendedidle
,
370 (di_txfast_t
) dma32_txfast
,
371 (di_txstopped_t
) dma32_txstopped
,
372 (di_txreclaim_t
) dma32_txreclaim
,
373 (di_getnexttxp_t
) dma32_getnexttxp
,
374 (di_peeknexttxp_t
) _dma_peeknexttxp
,
375 (di_txblock_t
) _dma_txblock
,
376 (di_txunblock_t
) _dma_txunblock
,
377 (di_txactive_t
) _dma_txactive
,
378 (di_txrotate_t
) dma32_txrotate
,
380 (di_rxinit_t
) _dma_rxinit
,
381 (di_rxreset_t
) dma32_rxreset
,
382 (di_rxidle_t
) dma32_rxidle
,
383 (di_rxstopped_t
) dma32_rxstopped
,
384 (di_rxenable_t
) _dma_rxenable
,
385 (di_rxenabled_t
) dma32_rxenabled
,
387 (di_rxfill_t
) _dma_rxfill
,
388 (di_rxreclaim_t
) _dma_rxreclaim
,
389 (di_getnextrxp_t
) _dma_getnextrxp
,
391 (di_fifoloopbackenable_t
) _dma_fifoloopbackenable
,
392 (di_getvar_t
) _dma_getvar
,
393 (di_counterreset_t
) _dma_counterreset
,
396 (di_dump_t
) dma32_dump
,
397 (di_dumptx_t
) dma32_dumptx
,
398 (di_dumprx_t
) dma32_dumprx
,
408 dma_attach (osl_t
* osh
, char *name
, sb_t
* sbh
, void *dmaregstx
,
409 void *dmaregsrx
, uint ntxd
, uint nrxd
, uint rxbufsize
,
410 uint nrxpost
, uint rxoffset
, uint
* msg_level
)
415 /* allocate private info structure */
416 if ((di
= MALLOC (osh
, sizeof (dma_info_t
))) == NULL
)
419 printf ("dma_attach: out of memory, malloced %d bytes\n",
424 bzero ((char *) di
, sizeof (dma_info_t
));
426 di
->msg_level
= msg_level
? msg_level
: &dma_msg_level
;
428 /* old chips w/o sb is no longer supported */
429 ASSERT (sbh
!= NULL
);
431 di
->dma64
= ((sb_coreflagshi (sbh
, 0, 0) & SBTMH_DMA64
) == SBTMH_DMA64
);
436 DMA_ERROR (("dma_attach: driver doesn't have the capability to support "
442 /* check arguments */
443 ASSERT (ISPOWEROF2 (ntxd
));
444 ASSERT (ISPOWEROF2 (nrxd
));
446 ASSERT (dmaregsrx
== NULL
);
448 ASSERT (dmaregstx
== NULL
);
451 /* init dma reg pointer */
454 ASSERT (ntxd
<= D64MAXDD
);
455 ASSERT (nrxd
<= D64MAXDD
);
456 di
->d64txregs
= (dma64regs_t
*) dmaregstx
;
457 di
->d64rxregs
= (dma64regs_t
*) dmaregsrx
;
459 di
->dma64align
= D64RINGALIGN
;
460 if ((ntxd
< D64MAXDD
/ 2) && (nrxd
< D64MAXDD
/ 2))
462 /* for smaller dd table, HW relax the alignment requirement */
463 di
->dma64align
= D64RINGALIGN
/ 2;
468 ASSERT (ntxd
<= D32MAXDD
);
469 ASSERT (nrxd
<= D32MAXDD
);
470 di
->d32txregs
= (dma32regs_t
*) dmaregstx
;
471 di
->d32rxregs
= (dma32regs_t
*) dmaregsrx
;
474 DMA_TRACE (("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name
, (di
->dma64
? "DMA64" : "DMA32"), osh
, ntxd
, nrxd
, rxbufsize
, nrxpost
, rxoffset
, dmaregstx
, dmaregsrx
));
476 /* make a private copy of our callers name */
477 strncpy (di
->name
, name
, MAXNAMEL
);
478 di
->name
[MAXNAMEL
- 1] = '\0';
487 /* the actual dma size doesn't include the extra headroom */
488 if (rxbufsize
> BCMEXTRAHDROOM
)
489 di
->rxbufsize
= rxbufsize
- BCMEXTRAHDROOM
;
491 di
->rxbufsize
= rxbufsize
;
493 di
->nrxpost
= nrxpost
;
494 di
->rxoffset
= rxoffset
;
497 * figure out the DMA physical address offset for dd and data
498 * for old chips w/o sb, use zero
499 * for new chips w sb,
500 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
501 * Other bus: use zero
502 * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
505 di
->dataoffsetlow
= 0;
506 /* for pci bus, add offset */
507 if (sbh
->bustype
== PCI_BUS
)
509 if ((sbh
->buscoretype
== SB_PCIE
) && di
->dma64
)
511 /* pcie with DMA64 */
513 di
->ddoffsethigh
= SB_PCIE_DMA_H32
;
517 /* pci(DMA32/DMA64) or pcie with DMA32 */
518 di
->ddoffsetlow
= SB_PCI_DMA
;
519 di
->ddoffsethigh
= 0;
521 di
->dataoffsetlow
= di
->ddoffsetlow
;
522 di
->dataoffsethigh
= di
->ddoffsethigh
;
525 #if defined(__mips__) && defined(IL_BIGENDIAN)
526 di
->dataoffsetlow
= di
->dataoffsetlow
+ SB_SDRAM_SWAPPED
;
529 di
->addrext
= _dma_isaddrext (di
);
531 /* allocate tx packet pointer vector */
534 size
= ntxd
* sizeof (void *);
535 if ((di
->txp
= MALLOC (osh
, size
)) == NULL
)
537 DMA_ERROR (("%s: dma_attach: out of tx memory, malloced %d bytes\n",
538 di
->name
, MALLOCED (osh
)));
541 bzero ((char *) di
->txp
, size
);
544 /* allocate rx packet pointer vector */
547 size
= nrxd
* sizeof (void *);
548 if ((di
->rxp
= MALLOC (osh
, size
)) == NULL
)
550 DMA_ERROR (("%s: dma_attach: out of rx memory, malloced %d bytes\n",
551 di
->name
, MALLOCED (osh
)));
554 bzero ((char *) di
->rxp
, size
);
557 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
560 if (!_dma_alloc (di
, DMA_TX
))
564 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
567 if (!_dma_alloc (di
, DMA_RX
))
571 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->txdpa
> SB_PCI_DMA_SZ
)
574 DMA_ERROR (("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
575 di
->name
, di
->txdpa
));
578 if ((di
->ddoffsetlow
== SB_PCI_DMA
) && (di
->rxdpa
> SB_PCI_DMA_SZ
)
581 DMA_ERROR (("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
582 di
->name
, di
->rxdpa
));
586 DMA_TRACE (("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di
->ddoffsetlow
, di
->ddoffsethigh
, di
->dataoffsetlow
, di
->dataoffsethigh
, di
->addrext
));
588 /* allocate tx packet pointer vector and DMA mapping vectors */
592 size
= ntxd
* sizeof (osldma_t
**);
593 if ((di
->txp_dmah
= (osldma_t
**) MALLOC (osh
, size
)) == NULL
)
595 bzero ((char *) di
->txp_dmah
, size
);
600 /* allocate rx packet pointer vector and DMA mapping vectors */
604 size
= nrxd
* sizeof (osldma_t
**);
605 if ((di
->rxp_dmah
= (osldma_t
**) MALLOC (osh
, size
)) == NULL
)
607 bzero ((char *) di
->rxp_dmah
, size
);
613 /* initialize opsvec of function pointers */
614 di
->hnddma
.di_fn
= DMA64_ENAB (di
) ? dma64proc
: dma32proc
;
616 return ((hnddma_t
*) di
);
623 /* init the tx or rx descriptor */
625 dma32_dd_upd (dma_info_t
* di
, dma32dd_t
* ddring
, ulong pa
, uint outidx
,
626 uint32
* flags
, uint32 bufcount
)
628 /* dma32 uses 32 bits control to fit both flags and bufcounter */
629 *flags
= *flags
| (bufcount
& CTRL_BC_MASK
);
631 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
))
633 W_SM (&ddring
[outidx
].addr
, BUS_SWAP32 (pa
+ di
->dataoffsetlow
));
634 W_SM (&ddring
[outidx
].ctrl
, BUS_SWAP32 (*flags
));
638 /* address extension */
640 ASSERT (di
->addrext
);
641 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
642 pa
&= ~PCI32ADDR_HIGH
;
644 *flags
|= (ae
<< CTRL_AE_SHIFT
);
645 W_SM (&ddring
[outidx
].addr
, BUS_SWAP32 (pa
+ di
->dataoffsetlow
));
646 W_SM (&ddring
[outidx
].ctrl
, BUS_SWAP32 (*flags
));
651 dma64_dd_upd (dma_info_t
* di
, dma64dd_t
* ddring
, ulong pa
, uint outidx
,
652 uint32
* flags
, uint32 bufcount
)
654 uint32 ctrl2
= bufcount
& D64_CTRL2_BC_MASK
;
656 /* PCI bus with big(>1G) physical address, use address extension */
657 if ((di
->dataoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
))
659 W_SM (&ddring
[outidx
].addrlow
, BUS_SWAP32 (pa
+ di
->dataoffsetlow
));
660 W_SM (&ddring
[outidx
].addrhigh
, BUS_SWAP32 (0 + di
->dataoffsethigh
));
661 W_SM (&ddring
[outidx
].ctrl1
, BUS_SWAP32 (*flags
));
662 W_SM (&ddring
[outidx
].ctrl2
, BUS_SWAP32 (ctrl2
));
666 /* address extension */
668 ASSERT (di
->addrext
);
670 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
671 pa
&= ~PCI32ADDR_HIGH
;
673 ctrl2
|= (ae
<< D64_CTRL2_AE_SHIFT
) & D64_CTRL2_AE
;
674 W_SM (&ddring
[outidx
].addrlow
, BUS_SWAP32 (pa
+ di
->dataoffsetlow
));
675 W_SM (&ddring
[outidx
].addrhigh
, BUS_SWAP32 (0 + di
->dataoffsethigh
));
676 W_SM (&ddring
[outidx
].ctrl1
, BUS_SWAP32 (*flags
));
677 W_SM (&ddring
[outidx
].ctrl2
, BUS_SWAP32 (ctrl2
));
682 _dma32_addrext (osl_t
* osh
, dma32regs_t
* dma32regs
)
686 OR_REG (osh
, &dma32regs
->control
, XC_AE
);
687 w
= R_REG (osh
, &dma32regs
->control
);
688 AND_REG (osh
, &dma32regs
->control
, ~XC_AE
);
689 return ((w
& XC_AE
) == XC_AE
);
693 _dma_alloc (dma_info_t
* di
, uint direction
)
697 return dma64_alloc (di
, direction
);
701 return dma32_alloc (di
, direction
);
705 /* !! may be called with core in reset */
707 _dma_detach (dma_info_t
* di
)
712 DMA_TRACE (("%s: dma_detach\n", di
->name
));
714 /* shouldn't be here if descriptors are unreclaimed */
715 ASSERT (di
->txin
== di
->txout
);
716 ASSERT (di
->rxin
== di
->rxout
);
718 /* free dma descriptor rings */
722 DMA_FREE_CONSISTENT (di
->osh
,
723 ((int8
*) (uintptr
) di
->txd64
- di
->txdalign
),
724 di
->txdalloc
, (di
->txdpa
- di
->txdalign
),
727 DMA_FREE_CONSISTENT (di
->osh
,
728 ((int8
*) (uintptr
) di
->rxd64
- di
->rxdalign
),
729 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
),
735 DMA_FREE_CONSISTENT (di
->osh
,
736 ((int8
*) (uintptr
) di
->txd32
- di
->txdalign
),
737 di
->txdalloc
, (di
->txdpa
- di
->txdalign
),
740 DMA_FREE_CONSISTENT (di
->osh
,
741 ((int8
*) (uintptr
) di
->rxd32
- di
->rxdalign
),
742 di
->rxdalloc
, (di
->rxdpa
- di
->rxdalign
),
746 /* free packet pointer vectors */
748 MFREE (di
->osh
, (void *) di
->txp
, (di
->ntxd
* sizeof (void *)));
750 MFREE (di
->osh
, (void *) di
->rxp
, (di
->nrxd
* sizeof (void *)));
752 /* free tx packet DMA handles */
754 MFREE (di
->osh
, (void *) di
->txp_dmah
, di
->ntxd
* sizeof (osldma_t
**));
756 /* free rx packet DMA handles */
758 MFREE (di
->osh
, (void *) di
->rxp_dmah
, di
->nrxd
* sizeof (osldma_t
**));
760 /* free our private info structure */
761 MFREE (di
->osh
, (void *) di
, sizeof (dma_info_t
));
765 /* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
767 _dma_isaddrext (dma_info_t
* di
)
771 /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
773 /* not all tx or rx channel are available */
774 if (di
->d64txregs
!= NULL
)
776 if (!_dma64_addrext (di
->osh
, di
->d64txregs
))
778 DMA_ERROR (("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di
->name
));
783 else if (di
->d64rxregs
!= NULL
)
785 if (!_dma64_addrext (di
->osh
, di
->d64rxregs
))
787 DMA_ERROR (("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di
->name
));
794 else if (di
->d32txregs
)
795 return (_dma32_addrext (di
->osh
, di
->d32txregs
));
796 else if (di
->d32rxregs
)
797 return (_dma32_addrext (di
->osh
, di
->d32rxregs
));
801 /* initialize descriptor table base address */
803 _dma_ddtable_init (dma_info_t
* di
, uint direction
, ulong pa
)
808 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
))
810 if (direction
== DMA_TX
)
812 W_REG (di
->osh
, &di
->d64txregs
->addrlow
,
813 (pa
+ di
->ddoffsetlow
));
814 W_REG (di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
818 W_REG (di
->osh
, &di
->d64rxregs
->addrlow
,
819 (pa
+ di
->ddoffsetlow
));
820 W_REG (di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
825 /* DMA64 32bits address extension */
827 ASSERT (di
->addrext
);
829 /* shift the high bit(s) from pa to ae */
830 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
831 pa
&= ~PCI32ADDR_HIGH
;
833 if (direction
== DMA_TX
)
835 W_REG (di
->osh
, &di
->d64txregs
->addrlow
,
836 (pa
+ di
->ddoffsetlow
));
837 W_REG (di
->osh
, &di
->d64txregs
->addrhigh
, di
->ddoffsethigh
);
838 SET_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_AE
,
839 (ae
<< D64_XC_AE_SHIFT
));
843 W_REG (di
->osh
, &di
->d64rxregs
->addrlow
,
844 (pa
+ di
->ddoffsetlow
));
845 W_REG (di
->osh
, &di
->d64rxregs
->addrhigh
, di
->ddoffsethigh
);
846 SET_REG (di
->osh
, &di
->d64rxregs
->control
, D64_RC_AE
,
847 (ae
<< D64_RC_AE_SHIFT
));
854 if ((di
->ddoffsetlow
!= SB_PCI_DMA
) || !(pa
& PCI32ADDR_HIGH
))
856 if (direction
== DMA_TX
)
857 W_REG (di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
859 W_REG (di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
863 /* dma32 address extension */
865 ASSERT (di
->addrext
);
867 /* shift the high bit(s) from pa to ae */
868 ae
= (pa
& PCI32ADDR_HIGH
) >> PCI32ADDR_HIGH_SHIFT
;
869 pa
&= ~PCI32ADDR_HIGH
;
871 if (direction
== DMA_TX
)
873 W_REG (di
->osh
, &di
->d32txregs
->addr
, (pa
+ di
->ddoffsetlow
));
874 SET_REG (di
->osh
, &di
->d32txregs
->control
, XC_AE
,
879 W_REG (di
->osh
, &di
->d32rxregs
->addr
, (pa
+ di
->ddoffsetlow
));
880 SET_REG (di
->osh
, &di
->d32rxregs
->control
, RC_AE
,
888 _dma_fifoloopbackenable (dma_info_t
* di
)
890 DMA_TRACE (("%s: dma_fifoloopbackenable\n", di
->name
));
892 OR_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_LE
);
894 OR_REG (di
->osh
, &di
->d32txregs
->control
, XC_LE
);
898 _dma_rxinit (dma_info_t
* di
)
900 DMA_TRACE (("%s: dma_rxinit\n", di
->name
));
905 di
->rxin
= di
->rxout
= 0;
907 /* clear rx descriptor ring */
909 BZERO_SM ((void *) (uintptr
) di
->rxd64
, (di
->nrxd
* sizeof (dma64dd_t
)));
911 BZERO_SM ((void *) (uintptr
) di
->rxd32
, (di
->nrxd
* sizeof (dma32dd_t
)));
914 _dma_ddtable_init (di
, DMA_RX
, di
->rxdpa
);
918 _dma_rxenable (dma_info_t
* di
)
920 DMA_TRACE (("%s: dma_rxenable\n", di
->name
));
923 W_REG (di
->osh
, &di
->d64rxregs
->control
,
924 ((di
->rxoffset
<< D64_RC_RO_SHIFT
) | D64_RC_RE
));
926 W_REG (di
->osh
, &di
->d32rxregs
->control
,
927 ((di
->rxoffset
<< RC_RO_SHIFT
) | RC_RE
));
930 /* !! rx entry routine, returns a pointer to the next frame received,
931 * or NULL if there are no more
934 _dma_rx (dma_info_t
* di
)
940 while ((p
= _dma_getnextrxp (di
, FALSE
)))
942 /* skip giant packets which span multiple rx descriptors */
945 skiplen
-= di
->rxbufsize
;
948 PKTFREE (di
->osh
, p
, FALSE
);
952 len
= ltoh16 (*(uint16
*) (PKTDATA (di
->osh
, p
)));
953 DMA_TRACE (("%s: dma_rx len %d\n", di
->name
, len
));
955 /* bad frame length check */
956 if (len
> (di
->rxbufsize
- di
->rxoffset
))
958 DMA_ERROR (("%s: dma_rx: bad frame length (%d)\n", di
->name
, len
));
960 skiplen
= len
- (di
->rxbufsize
- di
->rxoffset
);
961 PKTFREE (di
->osh
, p
, FALSE
);
962 di
->hnddma
.rxgiants
++;
966 /* set actual length */
967 PKTSETLEN (di
->osh
, p
, (di
->rxoffset
+ len
));
975 /* post receive buffers */
977 _dma_rxfill (dma_info_t
* di
)
985 uint extra_offset
= 0;
988 * Determine how many receive buffers we're lacking
989 * from the full complement, allocate, initialize,
990 * and post them, then update the chip rx lastdscr.
996 n
= di
->nrxpost
- NRXDACTIVE (rxin
, rxout
);
998 DMA_TRACE (("%s: dma_rxfill: post %d\n", di
->name
, n
));
1000 if (di
->rxbufsize
> BCMEXTRAHDROOM
)
1001 extra_offset
= BCMEXTRAHDROOM
;
1003 for (i
= 0; i
< n
; i
++)
1005 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1006 size to be allocated
1008 if ((p
= PKTGET (di
->osh
, di
->rxbufsize
+ extra_offset
, FALSE
)) == NULL
)
1010 DMA_ERROR (("%s: dma_rxfill: out of rxbufs\n", di
->name
));
1011 di
->hnddma
.rxnobuf
++;
1014 /* reserve an extra headroom, if applicable */
1016 PKTPULL (di
->osh
, p
, extra_offset
);
1018 /* Do a cached write instead of uncached write since DMA_MAP
1019 * will flush the cache.
1021 *(uint32
*) (PKTDATA (di
->osh
, p
)) = 0;
1023 pa
= (uint32
) DMA_MAP (di
->osh
, PKTDATA (di
->osh
, p
),
1024 di
->rxbufsize
, DMA_RX
, p
, &di
->rxp_dmah
[rxout
]);
1026 ASSERT (ISALIGNED (pa
, 4));
1028 /* save the free packet pointer */
1029 ASSERT (di
->rxp
[rxout
] == NULL
);
1032 /* reset flags for each descriptor */
1034 if (DMA64_ENAB (di
))
1036 if (rxout
== (di
->nrxd
- 1))
1037 flags
= D64_CTRL1_EOT
;
1039 dma64_dd_upd (di
, di
->rxd64
, pa
, rxout
, &flags
, di
->rxbufsize
);
1043 if (rxout
== (di
->nrxd
- 1))
1046 dma32_dd_upd (di
, di
->rxd32
, pa
, rxout
, &flags
, di
->rxbufsize
);
1048 rxout
= NEXTRXD (rxout
);
1053 /* update the chip lastdscr pointer */
1054 if (DMA64_ENAB (di
))
1056 W_REG (di
->osh
, &di
->d64rxregs
->ptr
, I2B (rxout
, dma64dd_t
));
1060 W_REG (di
->osh
, &di
->d32rxregs
->ptr
, I2B (rxout
, dma32dd_t
));
1064 /* like getnexttxp but no reclaim */
1066 _dma_peeknexttxp (dma_info_t
* di
)
1073 if (DMA64_ENAB (di
))
1076 B2I (R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
,
1082 B2I (R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1085 for (i
= di
->txin
; i
!= end
; i
= NEXTTXD (i
))
1087 return (di
->txp
[i
]);
1093 _dma_rxreclaim (dma_info_t
* di
)
1097 /* "unused local" warning suppression for OSLs that
1098 * define PKTFREE() without using the di->osh arg
1102 DMA_TRACE (("%s: dma_rxreclaim\n", di
->name
));
1104 while ((p
= _dma_getnextrxp (di
, TRUE
)))
1105 PKTFREE (di
->osh
, p
, FALSE
);
1109 _dma_getnextrxp (dma_info_t
* di
, bool forceall
)
1114 if (DMA64_ENAB (di
))
1116 return dma64_getnextrxp (di
, forceall
);
1120 return dma32_getnextrxp (di
, forceall
);
1125 _dma_txblock (dma_info_t
* di
)
1127 di
->hnddma
.txavail
= 0;
1131 _dma_txunblock (dma_info_t
* di
)
1133 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
1137 _dma_txactive (dma_info_t
* di
)
1139 return (NTXDACTIVE (di
->txin
, di
->txout
));
1143 _dma_counterreset (dma_info_t
* di
)
1145 /* reset all software counter */
1146 di
->hnddma
.rxgiants
= 0;
1147 di
->hnddma
.rxnobuf
= 0;
1148 di
->hnddma
.txnobuf
= 0;
1151 /* get the address of the var in order to change later */
1153 _dma_getvar (dma_info_t
* di
, const char *name
)
1155 if (!strcmp (name
, "&txavail"))
1156 return ((uintptr
) & (di
->hnddma
.txavail
));
1165 dma_txpioloopback (osl_t
* osh
, dma32regs_t
* regs
)
1167 OR_REG (osh
, ®s
->control
, XC_LE
);
1172 dma32_dumpring (dma_info_t
* di
, struct bcmstrbuf
*b
, dma32dd_t
* ring
,
1173 uint start
, uint end
, uint max_num
)
1177 for (i
= start
; i
!= end
; i
= XXD ((i
+ 1), max_num
))
1179 /* in the format of high->low 8 bytes */
1180 bcm_bprintf (b
, "ring index %d: 0x%x %x\n", i
, ring
[i
].addr
,
1186 dma32_dumptx (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1191 bcm_bprintf (b
, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d "
1192 "txavail %d\n", di
->txd32
, di
->txdpa
, di
->txp
, di
->txin
,
1193 di
->txout
, di
->hnddma
.txavail
);
1195 bcm_bprintf (b
, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n",
1196 R_REG (di
->osh
, &di
->d32txregs
->control
),
1197 R_REG (di
->osh
, &di
->d32txregs
->addr
),
1198 R_REG (di
->osh
, &di
->d32txregs
->ptr
),
1199 R_REG (di
->osh
, &di
->d32txregs
->status
));
1201 if (dumpring
&& di
->txd32
)
1202 dma32_dumpring (di
, b
, di
->txd32
, di
->txin
, di
->txout
, di
->ntxd
);
1206 dma32_dumprx (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1211 bcm_bprintf (b
, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1212 di
->rxd32
, di
->rxdpa
, di
->rxp
, di
->rxin
, di
->rxout
);
1214 bcm_bprintf (b
, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n",
1215 R_REG (di
->osh
, &di
->d32rxregs
->control
),
1216 R_REG (di
->osh
, &di
->d32rxregs
->addr
),
1217 R_REG (di
->osh
, &di
->d32rxregs
->ptr
),
1218 R_REG (di
->osh
, &di
->d32rxregs
->status
));
1219 if (di
->rxd32
&& dumpring
)
1220 dma32_dumpring (di
, b
, di
->rxd32
, di
->rxin
, di
->rxout
, di
->nrxd
);
1224 dma32_dump (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1226 dma32_dumptx (di
, b
, dumpring
);
1227 dma32_dumprx (di
, b
, dumpring
);
1231 dma64_dumpring (dma_info_t
* di
, struct bcmstrbuf
*b
, dma64dd_t
* ring
,
1232 uint start
, uint end
, uint max_num
)
1236 for (i
= start
; i
!= end
; i
= XXD ((i
+ 1), max_num
))
1238 /* in the format of high->low 16 bytes */
1239 bcm_bprintf (b
, "ring index %d: 0x%x %x %x %x\n",
1240 i
, ring
[i
].addrhigh
, ring
[i
].addrlow
, ring
[i
].ctrl2
,
1246 dma64_dumptx (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1251 bcm_bprintf (b
, "DMA64: txd64 %p txdpa 0x%lx txp %p txin %d txout %d "
1252 "txavail %d\n", di
->txd64
, di
->txdpa
, di
->txp
, di
->txin
,
1253 di
->txout
, di
->hnddma
.txavail
);
1255 bcm_bprintf (b
, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x "
1256 "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n",
1257 R_REG (di
->osh
, &di
->d64txregs
->control
),
1258 R_REG (di
->osh
, &di
->d64txregs
->addrlow
),
1259 R_REG (di
->osh
, &di
->d64txregs
->addrhigh
),
1260 R_REG (di
->osh
, &di
->d64txregs
->ptr
),
1261 R_REG (di
->osh
, &di
->d64txregs
->status0
),
1262 R_REG (di
->osh
, &di
->d64txregs
->status1
));
1264 if (dumpring
&& di
->txd64
)
1266 dma64_dumpring (di
, b
, di
->txd64
, di
->txin
, di
->txout
, di
->ntxd
);
1271 dma64_dumprx (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1276 bcm_bprintf (b
, "DMA64: rxd64 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n",
1277 di
->rxd64
, di
->rxdpa
, di
->rxp
, di
->rxin
, di
->rxout
);
1279 bcm_bprintf (b
, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr "
1280 "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n",
1281 R_REG (di
->osh
, &di
->d64rxregs
->control
),
1282 R_REG (di
->osh
, &di
->d64rxregs
->addrlow
),
1283 R_REG (di
->osh
, &di
->d64rxregs
->addrhigh
),
1284 R_REG (di
->osh
, &di
->d64rxregs
->ptr
),
1285 R_REG (di
->osh
, &di
->d64rxregs
->status0
),
1286 R_REG (di
->osh
, &di
->d64rxregs
->status1
));
1287 if (di
->rxd64
&& dumpring
)
1289 dma64_dumpring (di
, b
, di
->rxd64
, di
->rxin
, di
->rxout
, di
->nrxd
);
1294 dma64_dump (dma_info_t
* di
, struct bcmstrbuf
*b
, bool dumpring
)
1296 dma64_dumptx (di
, b
, dumpring
);
1297 dma64_dumprx (di
, b
, dumpring
);
1303 /* 32 bits DMA functions */
1305 dma32_txinit (dma_info_t
* di
)
1307 DMA_TRACE (("%s: dma_txinit\n", di
->name
));
1312 di
->txin
= di
->txout
= 0;
1313 di
->hnddma
.txavail
= di
->ntxd
- 1;
1315 /* clear tx descriptor ring */
1316 BZERO_SM ((void *) (uintptr
) di
->txd32
, (di
->ntxd
* sizeof (dma32dd_t
)));
1317 W_REG (di
->osh
, &di
->d32txregs
->control
, XC_XE
);
1318 _dma_ddtable_init (di
, DMA_TX
, di
->txdpa
);
1322 dma32_txenabled (dma_info_t
* di
)
1326 /* If the chip is dead, it is not enabled :-) */
1327 xc
= R_REG (di
->osh
, &di
->d32txregs
->control
);
1328 return ((xc
!= 0xffffffff) && (xc
& XC_XE
));
1332 dma32_txsuspend (dma_info_t
* di
)
1334 DMA_TRACE (("%s: dma_txsuspend\n", di
->name
));
1339 OR_REG (di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1343 dma32_txresume (dma_info_t
* di
)
1345 DMA_TRACE (("%s: dma_txresume\n", di
->name
));
1350 AND_REG (di
->osh
, &di
->d32txregs
->control
, ~XC_SE
);
1354 dma32_txsuspended (dma_info_t
* di
)
1356 return (di
->ntxd
== 0)
1357 || ((R_REG (di
->osh
, &di
->d32txregs
->control
) & XC_SE
) == XC_SE
);
1361 dma32_txreclaim (dma_info_t
* di
, bool forceall
)
1365 DMA_TRACE (("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
1367 while ((p
= dma32_getnexttxp (di
, forceall
)))
1368 PKTFREE (di
->osh
, p
, TRUE
);
1372 dma32_txstopped (dma_info_t
* di
)
1374 return ((R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) ==
1379 dma32_rxstopped (dma_info_t
* di
)
1381 return ((R_REG (di
->osh
, &di
->d32rxregs
->status
) & RS_RS_MASK
) ==
1386 dma32_alloc (dma_info_t
* di
, uint direction
)
1392 ddlen
= sizeof (dma32dd_t
);
1394 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1396 if (!ISALIGNED (DMA_CONSISTENT_ALIGN
, D32RINGALIGN
))
1397 size
+= D32RINGALIGN
;
1400 if (direction
== DMA_TX
)
1403 DMA_ALLOC_CONSISTENT (di
->osh
, size
, &di
->txdpa
,
1404 &di
->tx_dmah
)) == NULL
)
1406 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1411 di
->txd32
= (dma32dd_t
*) ROUNDUP ((uintptr
) va
, D32RINGALIGN
);
1412 di
->txdalign
= (uint
) ((int8
*) (uintptr
) di
->txd32
- (int8
*) va
);
1413 di
->txdpa
+= di
->txdalign
;
1414 di
->txdalloc
= size
;
1415 ASSERT (ISALIGNED ((uintptr
) di
->txd32
, D32RINGALIGN
));
1420 DMA_ALLOC_CONSISTENT (di
->osh
, size
, &di
->rxdpa
,
1421 &di
->rx_dmah
)) == NULL
)
1423 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1427 di
->rxd32
= (dma32dd_t
*) ROUNDUP ((uintptr
) va
, D32RINGALIGN
);
1428 di
->rxdalign
= (uint
) ((int8
*) (uintptr
) di
->rxd32
- (int8
*) va
);
1429 di
->rxdpa
+= di
->rxdalign
;
1430 di
->rxdalloc
= size
;
1431 ASSERT (ISALIGNED ((uintptr
) di
->rxd32
, D32RINGALIGN
));
1438 dma32_txreset (dma_info_t
* di
)
1445 /* suspend tx DMA first */
1446 W_REG (di
->osh
, &di
->d32txregs
->control
, XC_SE
);
1447 SPINWAIT (((status
= (R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
))
1448 != XS_XS_DISABLED
) &&
1449 (status
!= XS_XS_IDLE
) && (status
!= XS_XS_STOPPED
), (10000));
1451 W_REG (di
->osh
, &di
->d32txregs
->control
, 0);
1452 SPINWAIT (((status
= (R_REG (di
->osh
,
1453 &di
->d32txregs
->status
) & XS_XS_MASK
)) !=
1454 XS_XS_DISABLED
), 10000);
1456 /* wait for the last transaction to complete */
1459 return (status
== XS_XS_DISABLED
);
1463 dma32_rxidle (dma_info_t
* di
)
1465 DMA_TRACE (("%s: dma_rxidle\n", di
->name
));
1470 return ((R_REG (di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
) ==
1471 R_REG (di
->osh
, &di
->d32rxregs
->ptr
));
1475 dma32_rxreset (dma_info_t
* di
)
1482 W_REG (di
->osh
, &di
->d32rxregs
->control
, 0);
1483 SPINWAIT (((status
= (R_REG (di
->osh
,
1484 &di
->d32rxregs
->status
) & RS_RS_MASK
)) !=
1485 RS_RS_DISABLED
), 10000);
1487 return (status
== RS_RS_DISABLED
);
1491 dma32_rxenabled (dma_info_t
* di
)
1495 rc
= R_REG (di
->osh
, &di
->d32rxregs
->control
);
1496 return ((rc
!= 0xffffffff) && (rc
& RC_RE
));
1500 dma32_txsuspendedidle (dma_info_t
* di
)
1505 if (!(R_REG (di
->osh
, &di
->d32txregs
->control
) & XC_SE
))
1508 if ((R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) != XS_XS_IDLE
)
1512 return ((R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_XS_MASK
) ==
1516 /* !! tx entry routine
1517 * supports full 32bit dma engine buffer addressing so
1518 * dma buffers can cross 4 Kbyte page boundaries.
1521 dma32_txfast (dma_info_t
* di
, void *p0
, bool commit
)
1530 DMA_TRACE (("%s: dma_txfast\n", di
->name
));
1535 * Walk the chain of packet buffers
1536 * allocating and initializing transmit descriptor entries.
1538 for (p
= p0
; p
; p
= next
)
1540 data
= PKTDATA (di
->osh
, p
);
1541 len
= PKTLEN (di
->osh
, p
);
1542 next
= PKTNEXT (di
->osh
, p
);
1544 /* return nonzero if out of tx descriptors */
1545 if (NEXTTXD (txout
) == di
->txin
)
1551 /* get physical address of buffer start */
1553 (uint32
) DMA_MAP (di
->osh
, data
, len
, DMA_TX
, p
,
1554 &di
->txp_dmah
[txout
]);
1560 flags
|= (CTRL_IOC
| CTRL_EOF
);
1561 if (txout
== (di
->ntxd
- 1))
1564 dma32_dd_upd (di
, di
->txd32
, pa
, txout
, &flags
, len
);
1565 ASSERT (di
->txp
[txout
] == NULL
);
1567 txout
= NEXTTXD (txout
);
1570 /* if last txd eof not set, fix it */
1571 if (!(flags
& CTRL_EOF
))
1572 W_SM (&di
->txd32
[PREVTXD (txout
)].ctrl
,
1573 BUS_SWAP32 (flags
| CTRL_IOC
| CTRL_EOF
));
1575 /* save the packet */
1576 di
->txp
[PREVTXD (txout
)] = p0
;
1578 /* bump the tx descriptor index */
1583 W_REG (di
->osh
, &di
->d32txregs
->ptr
, I2B (txout
, dma32dd_t
));
1585 /* tx flow control */
1586 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
1591 DMA_ERROR (("%s: dma_txfast: out of txds\n", di
->name
));
1592 PKTFREE (di
->osh
, p0
, TRUE
);
1593 di
->hnddma
.txavail
= 0;
1594 di
->hnddma
.txnobuf
++;
1599 * Reclaim next completed txd (txds if using chained buffers) and
1600 * return associated packet.
1601 * If 'force' is true, reclaim txd(s) and return associated packet
1602 * regardless of the value of the hardware "curr" pointer.
1605 dma32_getnexttxp (dma_info_t
* di
, bool forceall
)
1610 DMA_TRACE (("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
1622 B2I (R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_CD_MASK
, dma32dd_t
);
1624 if ((start
== 0) && (end
> di
->txout
))
1627 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD (i
))
1630 (BUS_SWAP32 (R_SM (&di
->txd32
[i
].addr
)) - di
->dataoffsetlow
),
1631 (BUS_SWAP32 (R_SM (&di
->txd32
[i
].ctrl
)) & CTRL_BC_MASK
),
1632 DMA_TX
, di
->txp
[i
], &di
->txp_dmah
[i
]);
1634 W_SM (&di
->txd32
[i
].addr
, 0xdeadbeef);
1641 /* tx flow control */
1642 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
1648 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
1649 start, end, di->txout, forceall));
1655 dma32_getnextrxp (dma_info_t
* di
, bool forceall
)
1660 /* if forcing, dma engine must be disabled */
1661 ASSERT (!forceall
|| !dma32_rxenabled (di
));
1665 /* return if no packets posted */
1669 /* ignore curr if forceall */
1672 B2I (R_REG (di
->osh
, &di
->d32rxregs
->status
) & RS_CD_MASK
,
1676 /* get the packet pointer that corresponds to the rx descriptor */
1681 /* clear this packet from the descriptor ring */
1683 (BUS_SWAP32 (R_SM (&di
->rxd32
[i
].addr
)) - di
->dataoffsetlow
),
1684 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
1686 W_SM (&di
->rxd32
[i
].addr
, 0xdeadbeef);
1688 di
->rxin
= NEXTRXD (i
);
1694 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1697 dma32_txrotate (dma_info_t
* di
)
1706 ASSERT (dma32_txsuspendedidle (di
));
1708 nactive
= _dma_txactive (di
);
1710 B2I (((R_REG (di
->osh
, &di
->d32txregs
->status
) & XS_AD_MASK
) >>
1711 XS_AD_SHIFT
), dma32dd_t
);
1712 rot
= TXD (ad
- di
->txin
);
1714 ASSERT (rot
< di
->ntxd
);
1716 /* full-ring case is a lot harder - don't worry about this */
1717 if (rot
>= (di
->ntxd
- nactive
))
1719 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di
->name
));
1724 last
= PREVTXD (di
->txout
);
1726 /* move entries starting at last and moving backwards to first */
1727 for (old
= last
; old
!= PREVTXD (first
); old
= PREVTXD (old
))
1729 new = TXD (old
+ rot
);
1732 * Move the tx dma descriptor.
1733 * EOT is set only in the last entry in the ring.
1735 w
= BUS_SWAP32 (R_SM (&di
->txd32
[old
].ctrl
)) & ~CTRL_EOT
;
1736 if (new == (di
->ntxd
- 1))
1738 W_SM (&di
->txd32
[new].ctrl
, BUS_SWAP32 (w
));
1739 W_SM (&di
->txd32
[new].addr
, R_SM (&di
->txd32
[old
].addr
));
1741 /* zap the old tx dma descriptor address field */
1742 W_SM (&di
->txd32
[old
].addr
, BUS_SWAP32 (0xdeadbeef));
1744 /* move the corresponding txp[] entry */
1745 ASSERT (di
->txp
[new] == NULL
);
1746 di
->txp
[new] = di
->txp
[old
];
1747 di
->txp
[old
] = NULL
;
1750 /* update txin and txout */
1752 di
->txout
= TXD (di
->txout
+ rot
);
1753 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
1756 W_REG (di
->osh
, &di
->d32txregs
->ptr
, I2B (di
->txout
, dma32dd_t
));
1759 /* 64 bits DMA functions */
1763 dma64_txinit (dma_info_t
* di
)
1765 DMA_TRACE (("%s: dma_txinit\n", di
->name
));
1770 di
->txin
= di
->txout
= 0;
1771 di
->hnddma
.txavail
= di
->ntxd
- 1;
1773 /* clear tx descriptor ring */
1774 BZERO_SM ((void *) (uintptr
) di
->txd64
, (di
->ntxd
* sizeof (dma64dd_t
)));
1775 W_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_XE
);
1776 _dma_ddtable_init (di
, DMA_TX
, di
->txdpa
);
1780 dma64_txenabled (dma_info_t
* di
)
1784 /* If the chip is dead, it is not enabled :-) */
1785 xc
= R_REG (di
->osh
, &di
->d64txregs
->control
);
1786 return ((xc
!= 0xffffffff) && (xc
& D64_XC_XE
));
1790 dma64_txsuspend (dma_info_t
* di
)
1792 DMA_TRACE (("%s: dma_txsuspend\n", di
->name
));
1797 OR_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
1801 dma64_txresume (dma_info_t
* di
)
1803 DMA_TRACE (("%s: dma_txresume\n", di
->name
));
1808 AND_REG (di
->osh
, &di
->d64txregs
->control
, ~D64_XC_SE
);
1812 dma64_txsuspended (dma_info_t
* di
)
1814 return (di
->ntxd
== 0)
1815 || ((R_REG (di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
) == D64_XC_SE
);
1819 dma64_txreclaim (dma_info_t
* di
, bool forceall
)
1823 DMA_TRACE (("%s: dma_txreclaim %s\n", di
->name
, forceall
? "all" : ""));
1825 while ((p
= dma64_getnexttxp (di
, forceall
)))
1826 PKTFREE (di
->osh
, p
, TRUE
);
1830 dma64_txstopped (dma_info_t
* di
)
1832 return ((R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
1833 D64_XS0_XS_STOPPED
);
1837 dma64_rxstopped (dma_info_t
* di
)
1839 return ((R_REG (di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
) ==
1840 D64_RS0_RS_STOPPED
);
1844 dma64_alloc (dma_info_t
* di
, uint direction
)
1851 ddlen
= sizeof (dma64dd_t
);
1853 size
= (direction
== DMA_TX
) ? (di
->ntxd
* ddlen
) : (di
->nrxd
* ddlen
);
1855 alignbytes
= di
->dma64align
;
1857 if (!ISALIGNED (DMA_CONSISTENT_ALIGN
, alignbytes
))
1860 if (direction
== DMA_TX
)
1863 DMA_ALLOC_CONSISTENT (di
->osh
, size
, &di
->txdpa
,
1864 &di
->tx_dmah
)) == NULL
)
1866 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
1871 di
->txd64
= (dma64dd_t
*) ROUNDUP ((uintptr
) va
, alignbytes
);
1872 di
->txdalign
= (uint
) ((int8
*) (uintptr
) di
->txd64
- (int8
*) va
);
1873 di
->txdpa
+= di
->txdalign
;
1874 di
->txdalloc
= size
;
1875 ASSERT (ISALIGNED ((uintptr
) di
->txd64
, alignbytes
));
1880 DMA_ALLOC_CONSISTENT (di
->osh
, size
, &di
->rxdpa
,
1881 &di
->rx_dmah
)) == NULL
)
1883 DMA_ERROR (("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
1887 di
->rxd64
= (dma64dd_t
*) ROUNDUP ((uintptr
) va
, alignbytes
);
1888 di
->rxdalign
= (uint
) ((int8
*) (uintptr
) di
->rxd64
- (int8
*) va
);
1889 di
->rxdpa
+= di
->rxdalign
;
1890 di
->rxdalloc
= size
;
1891 ASSERT (ISALIGNED ((uintptr
) di
->rxd64
, alignbytes
));
1898 dma64_txreset (dma_info_t
* di
)
1905 /* suspend tx DMA first */
1906 W_REG (di
->osh
, &di
->d64txregs
->control
, D64_XC_SE
);
1907 SPINWAIT (((status
=
1908 (R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
1909 D64_XS0_XS_DISABLED
) && (status
!= D64_XS0_XS_IDLE
)
1910 && (status
!= D64_XS0_XS_STOPPED
), 10000);
1912 W_REG (di
->osh
, &di
->d64txregs
->control
, 0);
1913 SPINWAIT (((status
=
1914 (R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
)) !=
1915 D64_XS0_XS_DISABLED
), 10000);
1917 /* wait for the last transaction to complete */
1920 return (status
== D64_XS0_XS_DISABLED
);
1924 dma64_rxidle (dma_info_t
* di
)
1926 DMA_TRACE (("%s: dma_rxidle\n", di
->name
));
1931 return ((R_REG (di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
) ==
1932 R_REG (di
->osh
, &di
->d64rxregs
->ptr
));
1936 dma64_rxreset (dma_info_t
* di
)
1943 W_REG (di
->osh
, &di
->d64rxregs
->control
, 0);
1944 SPINWAIT (((status
=
1945 (R_REG (di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_RS_MASK
)) !=
1946 D64_RS0_RS_DISABLED
), 10000);
1948 return (status
== D64_RS0_RS_DISABLED
);
1952 dma64_rxenabled (dma_info_t
* di
)
1956 rc
= R_REG (di
->osh
, &di
->d64rxregs
->control
);
1957 return ((rc
!= 0xffffffff) && (rc
& D64_RC_RE
));
1961 dma64_txsuspendedidle (dma_info_t
* di
)
1967 if (!(R_REG (di
->osh
, &di
->d64txregs
->control
) & D64_XC_SE
))
1970 if ((R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_XS_MASK
) ==
1978 /* !! tx entry routine */
1980 dma64_txfast (dma_info_t
* di
, void *p0
, bool commit
)
1989 DMA_TRACE (("%s: dma_txfast\n", di
->name
));
1994 * Walk the chain of packet buffers
1995 * allocating and initializing transmit descriptor entries.
1997 for (p
= p0
; p
; p
= next
)
1999 data
= PKTDATA (di
->osh
, p
);
2000 len
= PKTLEN (di
->osh
, p
);
2001 next
= PKTNEXT (di
->osh
, p
);
2003 /* return nonzero if out of tx descriptors */
2004 if (NEXTTXD (txout
) == di
->txin
)
2010 /* get physical address of buffer start */
2012 (uint32
) DMA_MAP (di
->osh
, data
, len
, DMA_TX
, p
,
2013 &di
->txp_dmah
[txout
]);
2017 flags
|= D64_CTRL1_SOF
;
2019 flags
|= (D64_CTRL1_IOC
| D64_CTRL1_EOF
);
2020 if (txout
== (di
->ntxd
- 1))
2021 flags
|= D64_CTRL1_EOT
;
2023 dma64_dd_upd (di
, di
->txd64
, pa
, txout
, &flags
, len
);
2024 ASSERT (di
->txp
[txout
] == NULL
);
2026 txout
= NEXTTXD (txout
);
2029 /* if last txd eof not set, fix it */
2030 if (!(flags
& D64_CTRL1_EOF
))
2031 W_SM (&di
->txd64
[PREVTXD (txout
)].ctrl1
,
2032 BUS_SWAP32 (flags
| D64_CTRL1_IOC
| D64_CTRL1_EOF
));
2034 /* save the packet */
2035 di
->txp
[PREVTXD (txout
)] = p0
;
2037 /* bump the tx descriptor index */
2042 W_REG (di
->osh
, &di
->d64txregs
->ptr
, I2B (txout
, dma64dd_t
));
2044 /* tx flow control */
2045 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
2050 DMA_ERROR (("%s: dma_txfast: out of txds\n", di
->name
));
2051 PKTFREE (di
->osh
, p0
, TRUE
);
2052 di
->hnddma
.txavail
= 0;
2053 di
->hnddma
.txnobuf
++;
2058 * Reclaim next completed txd (txds if using chained buffers) and
2059 * return associated packet.
2060 * If 'force' is true, reclaim txd(s) and return associated packet
2061 * regardless of the value of the hardware "curr" pointer.
2064 dma64_getnexttxp (dma_info_t
* di
, bool forceall
)
2069 DMA_TRACE (("%s: dma_getnexttxp %s\n", di
->name
, forceall
? "all" : ""));
2081 B2I (R_REG (di
->osh
, &di
->d64txregs
->status0
) & D64_XS0_CD_MASK
,
2084 if ((start
== 0) && (end
> di
->txout
))
2087 for (i
= start
; i
!= end
&& !txp
; i
= NEXTTXD (i
))
2090 (BUS_SWAP32 (R_SM (&di
->txd64
[i
].addrlow
)) -
2092 (BUS_SWAP32 (R_SM (&di
->txd64
[i
].ctrl2
)) &
2093 D64_CTRL2_BC_MASK
), DMA_TX
, di
->txp
[i
], &di
->txp_dmah
[i
]);
2095 W_SM (&di
->txd64
[i
].addrlow
, 0xdeadbeef);
2096 W_SM (&di
->txd64
[i
].addrhigh
, 0xdeadbeef);
2104 /* tx flow control */
2105 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
2111 DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
2112 start, end, di->txout, forceall));
2118 dma64_getnextrxp (dma_info_t
* di
, bool forceall
)
2123 /* if forcing, dma engine must be disabled */
2124 ASSERT (!forceall
|| !dma64_rxenabled (di
));
2128 /* return if no packets posted */
2132 /* ignore curr if forceall */
2135 B2I (R_REG (di
->osh
, &di
->d64rxregs
->status0
) & D64_RS0_CD_MASK
,
2139 /* get the packet pointer that corresponds to the rx descriptor */
2144 /* clear this packet from the descriptor ring */
2146 (BUS_SWAP32 (R_SM (&di
->rxd64
[i
].addrlow
)) - di
->dataoffsetlow
),
2147 di
->rxbufsize
, DMA_RX
, rxp
, &di
->rxp_dmah
[i
]);
2149 W_SM (&di
->rxd64
[i
].addrlow
, 0xdeadbeef);
2150 W_SM (&di
->rxd64
[i
].addrhigh
, 0xdeadbeef);
2152 di
->rxin
= NEXTRXD (i
);
2158 _dma64_addrext (osl_t
* osh
, dma64regs_t
* dma64regs
)
2161 OR_REG (osh
, &dma64regs
->control
, D64_XC_AE
);
2162 w
= R_REG (osh
, &dma64regs
->control
);
2163 AND_REG (osh
, &dma64regs
->control
, ~D64_XC_AE
);
2164 return ((w
& D64_XC_AE
) == D64_XC_AE
);
2168 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2171 dma64_txrotate (dma_info_t
* di
)
2180 ASSERT (dma64_txsuspendedidle (di
));
2182 nactive
= _dma_txactive (di
);
2184 B2I ((R_REG (di
->osh
, &di
->d64txregs
->status1
) & D64_XS1_AD_MASK
),
2186 rot
= TXD (ad
- di
->txin
);
2188 ASSERT (rot
< di
->ntxd
);
2190 /* full-ring case is a lot harder - don't worry about this */
2191 if (rot
>= (di
->ntxd
- nactive
))
2193 DMA_ERROR (("%s: dma_txrotate: ring full - punt\n", di
->name
));
2198 last
= PREVTXD (di
->txout
);
2200 /* move entries starting at last and moving backwards to first */
2201 for (old
= last
; old
!= PREVTXD (first
); old
= PREVTXD (old
))
2203 new = TXD (old
+ rot
);
2206 * Move the tx dma descriptor.
2207 * EOT is set only in the last entry in the ring.
2209 w
= BUS_SWAP32 (R_SM (&di
->txd64
[old
].ctrl1
)) & ~D64_CTRL1_EOT
;
2210 if (new == (di
->ntxd
- 1))
2212 W_SM (&di
->txd64
[new].ctrl1
, BUS_SWAP32 (w
));
2214 w
= BUS_SWAP32 (R_SM (&di
->txd64
[old
].ctrl2
));
2215 W_SM (&di
->txd64
[new].ctrl2
, BUS_SWAP32 (w
));
2217 W_SM (&di
->txd64
[new].addrlow
, R_SM (&di
->txd64
[old
].addrlow
));
2218 W_SM (&di
->txd64
[new].addrhigh
, R_SM (&di
->txd64
[old
].addrhigh
));
2220 /* zap the old tx dma descriptor address field */
2221 W_SM (&di
->txd64
[old
].addrlow
, BUS_SWAP32 (0xdeadbeef));
2222 W_SM (&di
->txd64
[old
].addrhigh
, BUS_SWAP32 (0xdeadbeef));
2224 /* move the corresponding txp[] entry */
2225 ASSERT (di
->txp
[new] == NULL
);
2226 di
->txp
[new] = di
->txp
[old
];
2227 di
->txp
[old
] = NULL
;
2230 /* update txin and txout */
2232 di
->txout
= TXD (di
->txout
+ rot
);
2233 di
->hnddma
.txavail
= di
->ntxd
- NTXDACTIVE (di
->txin
, di
->txout
) - 1;
2236 W_REG (di
->osh
, &di
->d64txregs
->ptr
, I2B (di
->txout
, dma64dd_t
));
2239 #endif /* BCMDMA64 */
2242 dma_addrwidth (sb_t
* sbh
, void *dmaregs
)
2244 dma32regs_t
*dma32regs
;
2251 /* DMA engine is 64-bit capable */
2252 if (((sb_coreflagshi (sbh
, 0, 0) & SBTMH_DMA64
) == SBTMH_DMA64
))
2254 /* backplane are 64 bits capable */
2255 if (sb_backplane64 (sbh
))
2256 /* If bus is System Backplane or PCIE then we can access 64-bits */
2257 if ((BUSTYPE (sbh
->bustype
) == SB_BUS
) ||
2258 ((BUSTYPE (sbh
->bustype
) == PCI_BUS
) &&
2259 sbh
->buscoretype
== SB_PCIE
))
2260 return (DMADDRWIDTH_64
);
2262 /* DMA64 is always 32 bits capable, AE is always TRUE */
2264 ASSERT (_dma64_addrext (osh
, (dma64regs_t
*) dmaregs
));
2266 return (DMADDRWIDTH_32
);
2270 /* Start checking for 32-bit / 30-bit addressing */
2271 dma32regs
= (dma32regs_t
*) dmaregs
;
2273 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
2274 if ((BUSTYPE (sbh
->bustype
) == SB_BUS
) ||
2275 ((BUSTYPE (sbh
->bustype
) == PCI_BUS
) && sbh
->buscoretype
== SB_PCIE
) ||
2276 (_dma32_addrext (osh
, dma32regs
)))
2277 return (DMADDRWIDTH_32
);
2280 return (DMADDRWIDTH_30
);
This page took 0.234871 seconds and 5 git commands to generate.