3 Broadcom BCM43xx wireless driver
5 DMA ringbuffer and descriptor allocation/management
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
45 struct bcm43xx_dmadesc_generic
* op32_idx2desc(struct bcm43xx_dmaring
*ring
,
47 struct bcm43xx_dmadesc_meta
**meta
)
49 struct bcm43xx_dmadesc32
*desc
;
51 *meta
= &(ring
->meta
[slot
]);
52 desc
= ring
->descbase
;
55 return (struct bcm43xx_dmadesc_generic
*)desc
;
58 static void op32_fill_descriptor(struct bcm43xx_dmaring
*ring
,
59 struct bcm43xx_dmadesc_generic
*desc
,
60 dma_addr_t dmaaddr
, u16 bufsize
,
61 int start
, int end
, int irq
)
63 struct bcm43xx_dmadesc32
*descbase
= ring
->descbase
;
69 slot
= (int)(&(desc
->dma32
) - descbase
);
70 assert(slot
>= 0 && slot
< ring
->nr_slots
);
72 addr
= (u32
)(dmaaddr
& ~SSB_DMA_TRANSLATION_MASK
);
73 addrext
= (u32
)(dmaaddr
& SSB_DMA_TRANSLATION_MASK
)
74 >> SSB_DMA_TRANSLATION_SHIFT
;
75 addr
|= ssb_dma_translation(ring
->dev
->dev
);
76 ctl
= (bufsize
- ring
->frameoffset
)
77 & BCM43xx_DMA32_DCTL_BYTECNT
;
78 if (slot
== ring
->nr_slots
- 1)
79 ctl
|= BCM43xx_DMA32_DCTL_DTABLEEND
;
81 ctl
|= BCM43xx_DMA32_DCTL_FRAMESTART
;
83 ctl
|= BCM43xx_DMA32_DCTL_FRAMEEND
;
85 ctl
|= BCM43xx_DMA32_DCTL_IRQ
;
86 ctl
|= (addrext
<< BCM43xx_DMA32_DCTL_ADDREXT_SHIFT
)
87 & BCM43xx_DMA32_DCTL_ADDREXT_MASK
;
89 desc
->dma32
.control
= cpu_to_le32(ctl
);
90 desc
->dma32
.address
= cpu_to_le32(addr
);
93 static void op32_poke_tx(struct bcm43xx_dmaring
*ring
, int slot
)
95 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXINDEX
,
96 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc32
)));
99 static void op32_tx_suspend(struct bcm43xx_dmaring
*ring
)
101 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
,
102 bcm43xx_dma_read(ring
, BCM43xx_DMA32_TXCTL
)
103 | BCM43xx_DMA32_TXSUSPEND
);
106 static void op32_tx_resume(struct bcm43xx_dmaring
*ring
)
108 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
,
109 bcm43xx_dma_read(ring
, BCM43xx_DMA32_TXCTL
)
110 & ~BCM43xx_DMA32_TXSUSPEND
);
113 static int op32_get_current_rxslot(struct bcm43xx_dmaring
*ring
)
117 val
= bcm43xx_dma_read(ring
, BCM43xx_DMA32_RXSTATUS
);
118 val
&= BCM43xx_DMA32_RXDPTR
;
120 return (val
/ sizeof(struct bcm43xx_dmadesc32
));
123 static void op32_set_current_rxslot(struct bcm43xx_dmaring
*ring
,
126 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXINDEX
,
127 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc32
)));
130 static const struct bcm43xx_dma_ops dma32_ops
= {
131 .idx2desc
= op32_idx2desc
,
132 .fill_descriptor
= op32_fill_descriptor
,
133 .poke_tx
= op32_poke_tx
,
134 .tx_suspend
= op32_tx_suspend
,
135 .tx_resume
= op32_tx_resume
,
136 .get_current_rxslot
= op32_get_current_rxslot
,
137 .set_current_rxslot
= op32_set_current_rxslot
,
142 struct bcm43xx_dmadesc_generic
* op64_idx2desc(struct bcm43xx_dmaring
*ring
,
144 struct bcm43xx_dmadesc_meta
**meta
)
146 struct bcm43xx_dmadesc64
*desc
;
148 *meta
= &(ring
->meta
[slot
]);
149 desc
= ring
->descbase
;
150 desc
= &(desc
[slot
]);
152 return (struct bcm43xx_dmadesc_generic
*)desc
;
155 static void op64_fill_descriptor(struct bcm43xx_dmaring
*ring
,
156 struct bcm43xx_dmadesc_generic
*desc
,
157 dma_addr_t dmaaddr
, u16 bufsize
,
158 int start
, int end
, int irq
)
160 struct bcm43xx_dmadesc64
*descbase
= ring
->descbase
;
162 u32 ctl0
= 0, ctl1
= 0;
166 slot
= (int)(&(desc
->dma64
) - descbase
);
167 assert(slot
>= 0 && slot
< ring
->nr_slots
);
169 addrlo
= (u32
)(dmaaddr
& 0xFFFFFFFF);
170 addrhi
= (((u64
)dmaaddr
>> 32) & ~SSB_DMA_TRANSLATION_MASK
);
171 addrext
= (((u64
)dmaaddr
>> 32) & SSB_DMA_TRANSLATION_MASK
)
172 >> SSB_DMA_TRANSLATION_SHIFT
;
173 addrhi
|= ssb_dma_translation(ring
->dev
->dev
);
174 if (slot
== ring
->nr_slots
- 1)
175 ctl0
|= BCM43xx_DMA64_DCTL0_DTABLEEND
;
177 ctl0
|= BCM43xx_DMA64_DCTL0_FRAMESTART
;
179 ctl0
|= BCM43xx_DMA64_DCTL0_FRAMEEND
;
181 ctl0
|= BCM43xx_DMA64_DCTL0_IRQ
;
182 ctl1
|= (bufsize
- ring
->frameoffset
)
183 & BCM43xx_DMA64_DCTL1_BYTECNT
;
184 ctl1
|= (addrext
<< BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT
)
185 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK
;
187 desc
->dma64
.control0
= cpu_to_le32(ctl0
);
188 desc
->dma64
.control1
= cpu_to_le32(ctl1
);
189 desc
->dma64
.address_low
= cpu_to_le32(addrlo
);
190 desc
->dma64
.address_high
= cpu_to_le32(addrhi
);
193 static void op64_poke_tx(struct bcm43xx_dmaring
*ring
, int slot
)
195 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXINDEX
,
196 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc64
)));
199 static void op64_tx_suspend(struct bcm43xx_dmaring
*ring
)
201 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
,
202 bcm43xx_dma_read(ring
, BCM43xx_DMA64_TXCTL
)
203 | BCM43xx_DMA64_TXSUSPEND
);
206 static void op64_tx_resume(struct bcm43xx_dmaring
*ring
)
208 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
,
209 bcm43xx_dma_read(ring
, BCM43xx_DMA64_TXCTL
)
210 & ~BCM43xx_DMA64_TXSUSPEND
);
213 static int op64_get_current_rxslot(struct bcm43xx_dmaring
*ring
)
217 val
= bcm43xx_dma_read(ring
, BCM43xx_DMA64_RXSTATUS
);
218 val
&= BCM43xx_DMA64_RXSTATDPTR
;
220 return (val
/ sizeof(struct bcm43xx_dmadesc64
));
223 static void op64_set_current_rxslot(struct bcm43xx_dmaring
*ring
,
226 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXINDEX
,
227 (u32
)(slot
* sizeof(struct bcm43xx_dmadesc64
)));
230 static const struct bcm43xx_dma_ops dma64_ops
= {
231 .idx2desc
= op64_idx2desc
,
232 .fill_descriptor
= op64_fill_descriptor
,
233 .poke_tx
= op64_poke_tx
,
234 .tx_suspend
= op64_tx_suspend
,
235 .tx_resume
= op64_tx_resume
,
236 .get_current_rxslot
= op64_get_current_rxslot
,
237 .set_current_rxslot
= op64_set_current_rxslot
,
241 static inline int free_slots(struct bcm43xx_dmaring
*ring
)
243 return (ring
->nr_slots
- ring
->used_slots
);
246 static inline int next_slot(struct bcm43xx_dmaring
*ring
, int slot
)
248 assert(slot
>= -1 && slot
<= ring
->nr_slots
- 1);
249 if (slot
== ring
->nr_slots
- 1)
254 static inline int prev_slot(struct bcm43xx_dmaring
*ring
, int slot
)
256 assert(slot
>= 0 && slot
<= ring
->nr_slots
- 1);
258 return ring
->nr_slots
- 1;
262 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
263 static void update_max_used_slots(struct bcm43xx_dmaring
*ring
,
264 int current_used_slots
)
266 if (current_used_slots
<= ring
->max_used_slots
)
268 ring
->max_used_slots
= current_used_slots
;
269 if (bcm43xx_debug(ring
->dev
, BCM43xx_DBG_DMAVERBOSE
)) {
270 dprintk(KERN_DEBUG PFX
271 "max_used_slots increased to %d on %s ring %d\n",
272 ring
->max_used_slots
,
273 ring
->tx
? "TX" : "RX",
279 void update_max_used_slots(struct bcm43xx_dmaring
*ring
,
280 int current_used_slots
)
284 /* Request a slot for usage. */
286 int request_slot(struct bcm43xx_dmaring
*ring
)
291 assert(!ring
->stopped
);
292 assert(free_slots(ring
) != 0);
294 slot
= next_slot(ring
, ring
->current_slot
);
295 ring
->current_slot
= slot
;
298 update_max_used_slots(ring
, ring
->used_slots
);
303 /* Mac80211-queue to bcm43xx-ring mapping */
304 static struct bcm43xx_dmaring
* priority_to_txring(struct bcm43xx_wldev
*dev
,
307 struct bcm43xx_dmaring
*ring
;
309 /*FIXME: For now we always run on TX-ring-1 */
310 return dev
->dma
.tx_ring1
;
312 /* 0 = highest priority */
313 switch (queue_priority
) {
318 ring
= dev
->dma
.tx_ring3
;
321 ring
= dev
->dma
.tx_ring2
;
324 ring
= dev
->dma
.tx_ring1
;
327 ring
= dev
->dma
.tx_ring0
;
330 ring
= dev
->dma
.tx_ring4
;
333 ring
= dev
->dma
.tx_ring5
;
340 /* Bcm43xx-ring to mac80211-queue mapping */
341 static inline int txring_to_priority(struct bcm43xx_dmaring
*ring
)
343 static const u8 idx_to_prio
[] =
344 { 3, 2, 1, 0, 4, 5, };
346 /*FIXME: have only one queue, for now */
349 return idx_to_prio
[ring
->index
];
353 u16
bcm43xx_dmacontroller_base(int dma64bit
, int controller_idx
)
355 static const u16 map64
[] = {
356 BCM43xx_MMIO_DMA64_BASE0
,
357 BCM43xx_MMIO_DMA64_BASE1
,
358 BCM43xx_MMIO_DMA64_BASE2
,
359 BCM43xx_MMIO_DMA64_BASE3
,
360 BCM43xx_MMIO_DMA64_BASE4
,
361 BCM43xx_MMIO_DMA64_BASE5
,
363 static const u16 map32
[] = {
364 BCM43xx_MMIO_DMA32_BASE0
,
365 BCM43xx_MMIO_DMA32_BASE1
,
366 BCM43xx_MMIO_DMA32_BASE2
,
367 BCM43xx_MMIO_DMA32_BASE3
,
368 BCM43xx_MMIO_DMA32_BASE4
,
369 BCM43xx_MMIO_DMA32_BASE5
,
373 assert(controller_idx
>= 0 &&
374 controller_idx
< ARRAY_SIZE(map64
));
375 return map64
[controller_idx
];
377 assert(controller_idx
>= 0 &&
378 controller_idx
< ARRAY_SIZE(map32
));
379 return map32
[controller_idx
];
383 dma_addr_t
map_descbuffer(struct bcm43xx_dmaring
*ring
,
391 dmaaddr
= dma_map_single(ring
->dev
->dev
->dev
,
395 dmaaddr
= dma_map_single(ring
->dev
->dev
->dev
,
404 void unmap_descbuffer(struct bcm43xx_dmaring
*ring
,
410 dma_unmap_single(ring
->dev
->dev
->dev
,
414 dma_unmap_single(ring
->dev
->dev
->dev
,
421 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring
*ring
,
427 dma_sync_single_for_cpu(ring
->dev
->dev
->dev
,
428 addr
, len
, DMA_FROM_DEVICE
);
432 void sync_descbuffer_for_device(struct bcm43xx_dmaring
*ring
,
438 dma_sync_single_for_device(ring
->dev
->dev
->dev
,
439 addr
, len
, DMA_FROM_DEVICE
);
443 void free_descriptor_buffer(struct bcm43xx_dmaring
*ring
,
444 struct bcm43xx_dmadesc_meta
*meta
,
449 dev_kfree_skb_irq(meta
->skb
);
451 dev_kfree_skb(meta
->skb
);
456 static int alloc_ringmemory(struct bcm43xx_dmaring
*ring
)
458 struct device
*dev
= ring
->dev
->dev
->dev
;
460 ring
->descbase
= dma_alloc_coherent(dev
, BCM43xx_DMA_RINGMEMSIZE
,
461 &(ring
->dmabase
), GFP_KERNEL
);
462 if (!ring
->descbase
) {
463 printk(KERN_ERR PFX
"DMA ringmemory allocation failed\n");
466 memset(ring
->descbase
, 0, BCM43xx_DMA_RINGMEMSIZE
);
471 static void free_ringmemory(struct bcm43xx_dmaring
*ring
)
473 struct device
*dev
= ring
->dev
->dev
->dev
;
475 dma_free_coherent(dev
, BCM43xx_DMA_RINGMEMSIZE
,
476 ring
->descbase
, ring
->dmabase
);
479 /* Reset the RX DMA channel */
480 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_wldev
*dev
,
481 u16 mmio_base
, int dma64
)
489 offset
= dma64
? BCM43xx_DMA64_RXCTL
: BCM43xx_DMA32_RXCTL
;
490 bcm43xx_write32(dev
, mmio_base
+ offset
, 0);
491 for (i
= 0; i
< 10; i
++) {
492 offset
= dma64
? BCM43xx_DMA64_RXSTATUS
: BCM43xx_DMA32_RXSTATUS
;
493 value
= bcm43xx_read32(dev
, mmio_base
+ offset
);
495 value
&= BCM43xx_DMA64_RXSTAT
;
496 if (value
== BCM43xx_DMA64_RXSTAT_DISABLED
) {
501 value
&= BCM43xx_DMA32_RXSTATE
;
502 if (value
== BCM43xx_DMA32_RXSTAT_DISABLED
) {
510 printk(KERN_ERR PFX
"ERROR: DMA RX reset timed out\n");
517 /* Reset the RX DMA channel */
518 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_wldev
*dev
,
519 u16 mmio_base
, int dma64
)
527 for (i
= 0; i
< 10; i
++) {
528 offset
= dma64
? BCM43xx_DMA64_TXSTATUS
: BCM43xx_DMA32_TXSTATUS
;
529 value
= bcm43xx_read32(dev
, mmio_base
+ offset
);
531 value
&= BCM43xx_DMA64_TXSTAT
;
532 if (value
== BCM43xx_DMA64_TXSTAT_DISABLED
||
533 value
== BCM43xx_DMA64_TXSTAT_IDLEWAIT
||
534 value
== BCM43xx_DMA64_TXSTAT_STOPPED
)
537 value
&= BCM43xx_DMA32_TXSTATE
;
538 if (value
== BCM43xx_DMA32_TXSTAT_DISABLED
||
539 value
== BCM43xx_DMA32_TXSTAT_IDLEWAIT
||
540 value
== BCM43xx_DMA32_TXSTAT_STOPPED
)
545 offset
= dma64
? BCM43xx_DMA64_TXCTL
: BCM43xx_DMA32_TXCTL
;
546 bcm43xx_write32(dev
, mmio_base
+ offset
, 0);
547 for (i
= 0; i
< 10; i
++) {
548 offset
= dma64
? BCM43xx_DMA64_TXSTATUS
: BCM43xx_DMA32_TXSTATUS
;
549 value
= bcm43xx_read32(dev
, mmio_base
+ offset
);
551 value
&= BCM43xx_DMA64_TXSTAT
;
552 if (value
== BCM43xx_DMA64_TXSTAT_DISABLED
) {
557 value
&= BCM43xx_DMA32_TXSTATE
;
558 if (value
== BCM43xx_DMA32_TXSTAT_DISABLED
) {
566 printk(KERN_ERR PFX
"ERROR: DMA TX reset timed out\n");
569 /* ensure the reset is completed. */
575 static int setup_rx_descbuffer(struct bcm43xx_dmaring
*ring
,
576 struct bcm43xx_dmadesc_generic
*desc
,
577 struct bcm43xx_dmadesc_meta
*meta
,
580 struct bcm43xx_rxhdr_fw4
*rxhdr
;
581 struct bcm43xx_hwtxstatus
*txstat
;
587 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
590 dmaaddr
= map_descbuffer(ring
, skb
->data
,
591 ring
->rx_buffersize
, 0);
592 if (dma_mapping_error(dmaaddr
)) {
593 /* ugh. try to realloc in zone_dma */
594 gfp_flags
|= GFP_DMA
;
596 dev_kfree_skb_any(skb
);
598 skb
= __dev_alloc_skb(ring
->rx_buffersize
, gfp_flags
);
601 dmaaddr
= map_descbuffer(ring
, skb
->data
,
602 ring
->rx_buffersize
, 0);
605 if (dma_mapping_error(dmaaddr
)) {
606 dev_kfree_skb_any(skb
);
611 meta
->dmaaddr
= dmaaddr
;
612 ring
->ops
->fill_descriptor(ring
, desc
, dmaaddr
,
613 ring
->rx_buffersize
, 0, 0, 0);
615 rxhdr
= (struct bcm43xx_rxhdr_fw4
*)(skb
->data
);
616 rxhdr
->frame_len
= 0;
617 txstat
= (struct bcm43xx_hwtxstatus
*)(skb
->data
);
623 /* Allocate the initial descbuffers.
624 * This is used for an RX ring only.
626 static int alloc_initial_descbuffers(struct bcm43xx_dmaring
*ring
)
628 int i
, err
= -ENOMEM
;
629 struct bcm43xx_dmadesc_generic
*desc
;
630 struct bcm43xx_dmadesc_meta
*meta
;
632 for (i
= 0; i
< ring
->nr_slots
; i
++) {
633 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
635 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_KERNEL
);
637 printk(KERN_ERR PFX
"Failed to allocate initial descbuffers\n");
642 ring
->used_slots
= ring
->nr_slots
;
648 for (i
--; i
>= 0; i
--) {
649 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
651 unmap_descbuffer(ring
, meta
->dmaaddr
, ring
->rx_buffersize
, 0);
652 dev_kfree_skb(meta
->skb
);
657 /* Do initial setup of the DMA controller.
658 * Reset the controller, write the ring busaddress
659 * and switch the "enable" bit on.
661 static int dmacontroller_setup(struct bcm43xx_dmaring
*ring
)
666 u32 trans
= ssb_dma_translation(ring
->dev
->dev
);
670 u64 ringbase
= (u64
)(ring
->dmabase
);
672 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
673 >> SSB_DMA_TRANSLATION_SHIFT
;
674 value
= BCM43xx_DMA64_TXENABLE
;
675 value
|= (addrext
<< BCM43xx_DMA64_TXADDREXT_SHIFT
)
676 & BCM43xx_DMA64_TXADDREXT_MASK
;
677 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXCTL
, value
);
678 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGLO
,
679 (ringbase
& 0xFFFFFFFF));
680 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGHI
,
681 ((ringbase
>> 32) & ~SSB_DMA_TRANSLATION_MASK
)
684 u32 ringbase
= (u32
)(ring
->dmabase
);
686 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
687 >> SSB_DMA_TRANSLATION_SHIFT
;
688 value
= BCM43xx_DMA32_TXENABLE
;
689 value
|= (addrext
<< BCM43xx_DMA32_TXADDREXT_SHIFT
)
690 & BCM43xx_DMA32_TXADDREXT_MASK
;
691 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXCTL
, value
);
692 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXRING
,
693 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
697 err
= alloc_initial_descbuffers(ring
);
701 u64 ringbase
= (u64
)(ring
->dmabase
);
703 addrext
= ((ringbase
>> 32) & SSB_DMA_TRANSLATION_MASK
)
704 >> SSB_DMA_TRANSLATION_SHIFT
;
705 value
= (ring
->frameoffset
<< BCM43xx_DMA64_RXFROFF_SHIFT
);
706 value
|= BCM43xx_DMA64_RXENABLE
;
707 value
|= (addrext
<< BCM43xx_DMA64_RXADDREXT_SHIFT
)
708 & BCM43xx_DMA64_RXADDREXT_MASK
;
709 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXCTL
, value
);
710 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGLO
,
711 (ringbase
& 0xFFFFFFFF));
712 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGHI
,
713 ((ringbase
>> 32) & ~SSB_DMA_TRANSLATION_MASK
)
715 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXINDEX
, 200);
717 u32 ringbase
= (u32
)(ring
->dmabase
);
719 addrext
= (ringbase
& SSB_DMA_TRANSLATION_MASK
)
720 >> SSB_DMA_TRANSLATION_SHIFT
;
721 value
= (ring
->frameoffset
<< BCM43xx_DMA32_RXFROFF_SHIFT
);
722 value
|= BCM43xx_DMA32_RXENABLE
;
723 value
|= (addrext
<< BCM43xx_DMA32_RXADDREXT_SHIFT
)
724 & BCM43xx_DMA32_RXADDREXT_MASK
;
725 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXCTL
, value
);
726 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXRING
,
727 (ringbase
& ~SSB_DMA_TRANSLATION_MASK
)
729 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXINDEX
, 200);
737 /* Shutdown the DMA controller. */
738 static void dmacontroller_cleanup(struct bcm43xx_dmaring
*ring
)
741 bcm43xx_dmacontroller_tx_reset(ring
->dev
, ring
->mmio_base
, ring
->dma64
);
743 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGLO
, 0);
744 bcm43xx_dma_write(ring
, BCM43xx_DMA64_TXRINGHI
, 0);
746 bcm43xx_dma_write(ring
, BCM43xx_DMA32_TXRING
, 0);
748 bcm43xx_dmacontroller_rx_reset(ring
->dev
, ring
->mmio_base
, ring
->dma64
);
750 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGLO
, 0);
751 bcm43xx_dma_write(ring
, BCM43xx_DMA64_RXRINGHI
, 0);
753 bcm43xx_dma_write(ring
, BCM43xx_DMA32_RXRING
, 0);
757 static void free_all_descbuffers(struct bcm43xx_dmaring
*ring
)
759 struct bcm43xx_dmadesc_generic
*desc
;
760 struct bcm43xx_dmadesc_meta
*meta
;
763 if (!ring
->used_slots
)
765 for (i
= 0; i
< ring
->nr_slots
; i
++) {
766 desc
= ring
->ops
->idx2desc(ring
, i
, &meta
);
773 unmap_descbuffer(ring
, meta
->dmaaddr
,
776 unmap_descbuffer(ring
, meta
->dmaaddr
,
777 ring
->rx_buffersize
, 0);
779 free_descriptor_buffer(ring
, meta
, 0);
783 static u64
supported_dma_mask(struct bcm43xx_wldev
*dev
)
788 tmp
= bcm43xx_read32(dev
, SSB_TMSHIGH
);
789 if (tmp
& SSB_TMSHIGH_DMA64
)
790 return DMA_64BIT_MASK
;
791 mmio_base
= bcm43xx_dmacontroller_base(0, 0);
793 mmio_base
+ BCM43xx_DMA32_TXCTL
,
794 BCM43xx_DMA32_TXADDREXT_MASK
);
795 tmp
= bcm43xx_read32(dev
,
796 mmio_base
+ BCM43xx_DMA32_TXCTL
);
797 if (tmp
& BCM43xx_DMA32_TXADDREXT_MASK
)
798 return DMA_32BIT_MASK
;
800 return DMA_30BIT_MASK
;
803 /* Main initialization function. */
805 struct bcm43xx_dmaring
* bcm43xx_setup_dmaring(struct bcm43xx_wldev
*dev
,
806 int controller_index
,
810 struct bcm43xx_dmaring
*ring
;
815 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
819 nr_slots
= BCM43xx_RXRING_SLOTS
;
821 nr_slots
= BCM43xx_TXRING_SLOTS
;
823 ring
->meta
= kcalloc(nr_slots
, sizeof(struct bcm43xx_dmadesc_meta
),
828 ring
->txhdr_cache
= kcalloc(nr_slots
,
829 sizeof(struct bcm43xx_txhdr_fw4
),
831 if (!ring
->txhdr_cache
)
834 /* test for ability to dma to txhdr_cache */
835 dma_test
= dma_map_single(dev
->dev
->dev
,
836 ring
->txhdr_cache
, sizeof(struct bcm43xx_txhdr_fw4
),
839 if (dma_mapping_error(dma_test
)) {
841 kfree(ring
->txhdr_cache
);
842 ring
->txhdr_cache
= kcalloc(nr_slots
,
843 sizeof(struct bcm43xx_txhdr_fw4
),
844 GFP_KERNEL
| GFP_DMA
);
845 if (!ring
->txhdr_cache
)
848 dma_test
= dma_map_single(dev
->dev
->dev
,
849 ring
->txhdr_cache
, sizeof(struct bcm43xx_txhdr_fw4
),
852 if (dma_mapping_error(dma_test
))
853 goto err_kfree_txhdr_cache
;
856 dma_unmap_single(dev
->dev
->dev
,
857 dma_test
, sizeof(struct bcm43xx_txhdr_fw4
),
862 ring
->nr_slots
= nr_slots
;
863 ring
->mmio_base
= bcm43xx_dmacontroller_base(dma64
, controller_index
);
864 ring
->index
= controller_index
;
865 ring
->dma64
= !!dma64
;
867 ring
->ops
= &dma64_ops
;
869 ring
->ops
= &dma32_ops
;
872 ring
->current_slot
= -1;
874 if (ring
->index
== 0) {
875 ring
->rx_buffersize
= BCM43xx_DMA0_RX_BUFFERSIZE
;
876 ring
->frameoffset
= BCM43xx_DMA0_RX_FRAMEOFFSET
;
877 } else if (ring
->index
== 3) {
878 ring
->rx_buffersize
= BCM43xx_DMA3_RX_BUFFERSIZE
;
879 ring
->frameoffset
= BCM43xx_DMA3_RX_FRAMEOFFSET
;
883 spin_lock_init(&ring
->lock
);
884 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
885 ring
->last_injected_overflow
= jiffies
;
888 err
= alloc_ringmemory(ring
);
890 goto err_kfree_txhdr_cache
;
891 err
= dmacontroller_setup(ring
);
893 goto err_free_ringmemory
;
899 free_ringmemory(ring
);
900 err_kfree_txhdr_cache
:
901 kfree(ring
->txhdr_cache
);
910 /* Main cleanup function. */
911 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring
*ring
)
916 dprintk(KERN_INFO PFX
"DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
917 (ring
->dma64
) ? "64" : "32",
919 (ring
->tx
) ? "TX" : "RX",
920 ring
->max_used_slots
, ring
->nr_slots
);
921 /* Device IRQs are disabled prior entering this function,
922 * so no need to take care of concurrency with rx handler stuff.
924 dmacontroller_cleanup(ring
);
925 free_all_descbuffers(ring
);
926 free_ringmemory(ring
);
928 kfree(ring
->txhdr_cache
);
933 void bcm43xx_dma_free(struct bcm43xx_wldev
*dev
)
935 struct bcm43xx_dma
*dma
;
937 if (bcm43xx_using_pio(dev
))
941 bcm43xx_destroy_dmaring(dma
->rx_ring3
);
942 dma
->rx_ring3
= NULL
;
943 bcm43xx_destroy_dmaring(dma
->rx_ring0
);
944 dma
->rx_ring0
= NULL
;
946 bcm43xx_destroy_dmaring(dma
->tx_ring5
);
947 dma
->tx_ring5
= NULL
;
948 bcm43xx_destroy_dmaring(dma
->tx_ring4
);
949 dma
->tx_ring4
= NULL
;
950 bcm43xx_destroy_dmaring(dma
->tx_ring3
);
951 dma
->tx_ring3
= NULL
;
952 bcm43xx_destroy_dmaring(dma
->tx_ring2
);
953 dma
->tx_ring2
= NULL
;
954 bcm43xx_destroy_dmaring(dma
->tx_ring1
);
955 dma
->tx_ring1
= NULL
;
956 bcm43xx_destroy_dmaring(dma
->tx_ring0
);
957 dma
->tx_ring0
= NULL
;
960 int bcm43xx_dma_init(struct bcm43xx_wldev
*dev
)
962 struct bcm43xx_dma
*dma
= &dev
->dma
;
963 struct bcm43xx_dmaring
*ring
;
968 dmamask
= supported_dma_mask(dev
);
969 if (dmamask
== DMA_64BIT_MASK
)
972 err
= ssb_dma_set_mask(dev
->dev
, dmamask
);
974 #ifdef BCM43XX_MAC80211_PIO
975 printk(KERN_WARNING PFX
"DMA for this device not supported. "
976 "Falling back to PIO\n");
977 dev
->__using_pio
= 1;
980 printk(KERN_ERR PFX
"DMA for this device not supported and "
981 "no PIO support compiled in\n");
987 /* setup TX DMA channels. */
988 ring
= bcm43xx_setup_dmaring(dev
, 0, 1, dma64
);
991 dma
->tx_ring0
= ring
;
993 ring
= bcm43xx_setup_dmaring(dev
, 1, 1, dma64
);
995 goto err_destroy_tx0
;
996 dma
->tx_ring1
= ring
;
998 ring
= bcm43xx_setup_dmaring(dev
, 2, 1, dma64
);
1000 goto err_destroy_tx1
;
1001 dma
->tx_ring2
= ring
;
1003 ring
= bcm43xx_setup_dmaring(dev
, 3, 1, dma64
);
1005 goto err_destroy_tx2
;
1006 dma
->tx_ring3
= ring
;
1008 ring
= bcm43xx_setup_dmaring(dev
, 4, 1, dma64
);
1010 goto err_destroy_tx3
;
1011 dma
->tx_ring4
= ring
;
1013 ring
= bcm43xx_setup_dmaring(dev
, 5, 1, dma64
);
1015 goto err_destroy_tx4
;
1016 dma
->tx_ring5
= ring
;
1018 /* setup RX DMA channels. */
1019 ring
= bcm43xx_setup_dmaring(dev
, 0, 0, dma64
);
1021 goto err_destroy_tx5
;
1022 dma
->rx_ring0
= ring
;
1024 if (dev
->dev
->id
.revision
< 5) {
1025 ring
= bcm43xx_setup_dmaring(dev
, 3, 0, dma64
);
1027 goto err_destroy_rx0
;
1028 dma
->rx_ring3
= ring
;
1031 dprintk(KERN_INFO PFX
"%d-bit DMA initialized\n",
1032 (dmamask
== DMA_64BIT_MASK
) ? 64 :
1033 (dmamask
== DMA_32BIT_MASK
) ? 32 : 30);
1039 bcm43xx_destroy_dmaring(dma
->rx_ring0
);
1040 dma
->rx_ring0
= NULL
;
1042 bcm43xx_destroy_dmaring(dma
->tx_ring5
);
1043 dma
->tx_ring5
= NULL
;
1045 bcm43xx_destroy_dmaring(dma
->tx_ring4
);
1046 dma
->tx_ring4
= NULL
;
1048 bcm43xx_destroy_dmaring(dma
->tx_ring3
);
1049 dma
->tx_ring3
= NULL
;
1051 bcm43xx_destroy_dmaring(dma
->tx_ring2
);
1052 dma
->tx_ring2
= NULL
;
1054 bcm43xx_destroy_dmaring(dma
->tx_ring1
);
1055 dma
->tx_ring1
= NULL
;
1057 bcm43xx_destroy_dmaring(dma
->tx_ring0
);
1058 dma
->tx_ring0
= NULL
;
1062 /* Generate a cookie for the TX header. */
1063 static u16
generate_cookie(struct bcm43xx_dmaring
*ring
,
1066 u16 cookie
= 0x1000;
1068 /* Use the upper 4 bits of the cookie as
1069 * DMA controller ID and store the slot number
1070 * in the lower 12 bits.
1071 * Note that the cookie must never be 0, as this
1072 * is a special value used in RX path.
1074 switch (ring
->index
) {
1094 assert(((u16
)slot
& 0xF000) == 0x0000);
1095 cookie
|= (u16
)slot
;
1100 /* Inspect a cookie and find out to which controller/slot it belongs. */
1102 struct bcm43xx_dmaring
* parse_cookie(struct bcm43xx_wldev
*dev
,
1103 u16 cookie
, int *slot
)
1105 struct bcm43xx_dma
*dma
= &dev
->dma
;
1106 struct bcm43xx_dmaring
*ring
= NULL
;
1108 switch (cookie
& 0xF000) {
1110 ring
= dma
->tx_ring0
;
1113 ring
= dma
->tx_ring1
;
1116 ring
= dma
->tx_ring2
;
1119 ring
= dma
->tx_ring3
;
1122 ring
= dma
->tx_ring4
;
1125 ring
= dma
->tx_ring5
;
1130 *slot
= (cookie
& 0x0FFF);
1131 assert(ring
&& *slot
>= 0 && *slot
< ring
->nr_slots
);
1136 static int dma_tx_fragment(struct bcm43xx_dmaring
*ring
,
1137 struct sk_buff
*skb
,
1138 struct ieee80211_tx_control
*ctl
)
1140 const struct bcm43xx_dma_ops
*ops
= ring
->ops
;
1144 struct bcm43xx_dmadesc_generic
*desc
;
1145 struct bcm43xx_dmadesc_meta
*meta
;
1146 struct bcm43xx_dmadesc_meta
*meta_hdr
;
1147 struct sk_buff
*bounce_skb
;
1149 #define SLOTS_PER_PACKET 2
1150 assert(skb_shinfo(skb
)->nr_frags
== 0);
1152 /* Get a slot for the header. */
1153 slot
= request_slot(ring
);
1154 desc
= ops
->idx2desc(ring
, slot
, &meta_hdr
);
1155 memset(meta_hdr
, 0, sizeof(*meta_hdr
));
1157 header
= &(ring
->txhdr_cache
[slot
* sizeof(struct bcm43xx_txhdr_fw4
)]);
1158 bcm43xx_generate_txhdr(ring
->dev
, header
,
1159 skb
->data
, skb
->len
, ctl
,
1160 generate_cookie(ring
, slot
));
1162 meta_hdr
->dmaaddr
= map_descbuffer(ring
, (unsigned char *)header
,
1163 sizeof(struct bcm43xx_txhdr_fw4
), 1);
1164 if (dma_mapping_error(meta_hdr
->dmaaddr
))
1166 ops
->fill_descriptor(ring
, desc
, meta_hdr
->dmaaddr
,
1167 sizeof(struct bcm43xx_txhdr_fw4
), 1, 0, 0);
1169 /* Get a slot for the payload. */
1170 slot
= request_slot(ring
);
1171 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1172 memset(meta
, 0, sizeof(*meta
));
1174 memcpy(&meta
->txstat
.control
, ctl
, sizeof(*ctl
));
1176 meta
->is_last_fragment
= 1;
1178 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1179 /* create a bounce buffer in zone_dma on mapping failure. */
1180 if (dma_mapping_error(meta
->dmaaddr
)) {
1181 bounce_skb
= __dev_alloc_skb(skb
->len
, GFP_ATOMIC
| GFP_DMA
);
1187 memcpy(skb_put(bounce_skb
, skb
->len
), skb
->data
, skb
->len
);
1188 dev_kfree_skb_any(skb
);
1191 meta
->dmaaddr
= map_descbuffer(ring
, skb
->data
, skb
->len
, 1);
1192 if (dma_mapping_error(meta
->dmaaddr
)) {
1194 goto out_free_bounce
;
1198 ops
->fill_descriptor(ring
, desc
, meta
->dmaaddr
,
1201 /* Now transfer the whole frame. */
1203 ops
->poke_tx(ring
, next_slot(ring
, slot
));
1207 dev_kfree_skb_any(skb
);
1209 unmap_descbuffer(ring
, meta_hdr
->dmaaddr
,
1210 sizeof(struct bcm43xx_txhdr_fw4
), 1);
1215 int should_inject_overflow(struct bcm43xx_dmaring
*ring
)
1217 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1218 if (unlikely(bcm43xx_debug(ring
->dev
, BCM43xx_DBG_DMAOVERFLOW
))) {
1219 /* Check if we should inject another ringbuffer overflow
1220 * to test handling of this situation in the stack. */
1221 unsigned long next_overflow
;
1223 next_overflow
= ring
->last_injected_overflow
+ HZ
;
1224 if (time_after(jiffies
, next_overflow
)) {
1225 ring
->last_injected_overflow
= jiffies
;
1226 dprintk(KERN_DEBUG PFX
"Injecting TX ring overflow on "
1227 "DMA controller %d\n", ring
->index
);
1231 #endif /* CONFIG_BCM43XX_MAC80211_DEBUG */
1235 int bcm43xx_dma_tx(struct bcm43xx_wldev
*dev
,
1236 struct sk_buff
*skb
,
1237 struct ieee80211_tx_control
*ctl
)
1239 struct bcm43xx_dmaring
*ring
;
1241 unsigned long flags
;
1243 ring
= priority_to_txring(dev
, ctl
->queue
);
1244 spin_lock_irqsave(&ring
->lock
, flags
);
1246 if (unlikely(free_slots(ring
) < SLOTS_PER_PACKET
)) {
1247 printkl(KERN_ERR PFX
"DMA queue overflow\n");
1251 /* Check if the queue was stopped in mac80211,
1252 * but we got called nevertheless.
1253 * That would be a mac80211 bug. */
1254 assert(!ring
->stopped
);
1256 err
= dma_tx_fragment(ring
, skb
, ctl
);
1257 if (unlikely(err
)) {
1258 printkl(KERN_ERR PFX
"DMA tx mapping failure\n");
1261 ring
->nr_tx_packets
++;
1262 if ((free_slots(ring
) < SLOTS_PER_PACKET
) ||
1263 should_inject_overflow(ring
)) {
1264 /* This TX ring is full. */
1265 ieee80211_stop_queue(dev
->wl
->hw
, txring_to_priority(ring
));
1267 if (bcm43xx_debug(dev
, BCM43xx_DBG_DMAVERBOSE
)) {
1268 dprintk(KERN_DEBUG PFX
"Stopped TX ring %d\n",
1273 spin_unlock_irqrestore(&ring
->lock
, flags
);
1278 void bcm43xx_dma_handle_txstatus(struct bcm43xx_wldev
*dev
,
1279 const struct bcm43xx_txstatus
*status
)
1281 const struct bcm43xx_dma_ops
*ops
;
1282 struct bcm43xx_dmaring
*ring
;
1283 struct bcm43xx_dmadesc_generic
*desc
;
1284 struct bcm43xx_dmadesc_meta
*meta
;
1287 ring
= parse_cookie(dev
, status
->cookie
, &slot
);
1288 if (unlikely(!ring
))
1290 assert(irqs_disabled());
1291 spin_lock(&ring
->lock
);
1296 assert(slot
>= 0 && slot
< ring
->nr_slots
);
1297 desc
= ops
->idx2desc(ring
, slot
, &meta
);
1300 unmap_descbuffer(ring
, meta
->dmaaddr
, meta
->skb
->len
, 1);
1302 unmap_descbuffer(ring
, meta
->dmaaddr
, sizeof(struct bcm43xx_txhdr_fw4
), 1);
1304 if (meta
->is_last_fragment
) {
1306 /* Call back to inform the ieee80211 subsystem about the
1307 * status of the transmission.
1308 * Some fields of txstat are already filled in dma_tx().
1311 meta
->txstat
.flags
|= IEEE80211_TX_STATUS_ACK
;
1312 meta
->txstat
.retry_count
= status
->frame_count
- 1;
1313 ieee80211_tx_status_irqsafe(dev
->wl
->hw
, meta
->skb
, &(meta
->txstat
));
1314 /* skb is freed by ieee80211_tx_status_irqsafe() */
1317 /* No need to call free_descriptor_buffer here, as
1318 * this is only the txhdr, which is not allocated.
1320 assert(meta
->skb
== NULL
);
1323 /* Everything unmapped and free'd. So it's not used anymore. */
1326 if (meta
->is_last_fragment
)
1328 slot
= next_slot(ring
, slot
);
1330 dev
->stats
.last_tx
= jiffies
;
1331 if (ring
->stopped
) {
1332 assert(free_slots(ring
) >= SLOTS_PER_PACKET
);
1333 ieee80211_wake_queue(dev
->wl
->hw
, txring_to_priority(ring
));
1335 if (bcm43xx_debug(dev
, BCM43xx_DBG_DMAVERBOSE
)) {
1336 dprintk(KERN_DEBUG PFX
"Woke up TX ring %d\n",
1341 spin_unlock(&ring
->lock
);
1344 void bcm43xx_dma_get_tx_stats(struct bcm43xx_wldev
*dev
,
1345 struct ieee80211_tx_queue_stats
*stats
)
1347 const int nr_queues
= dev
->wl
->hw
->queues
;
1348 struct bcm43xx_dmaring
*ring
;
1349 struct ieee80211_tx_queue_stats_data
*data
;
1350 unsigned long flags
;
1353 for (i
= 0; i
< nr_queues
; i
++) {
1354 data
= &(stats
->data
[i
]);
1355 ring
= priority_to_txring(dev
, i
);
1357 spin_lock_irqsave(&ring
->lock
, flags
);
1358 data
->len
= ring
->used_slots
/ SLOTS_PER_PACKET
;
1359 data
->limit
= ring
->nr_slots
/ SLOTS_PER_PACKET
;
1360 data
->count
= ring
->nr_tx_packets
;
1361 spin_unlock_irqrestore(&ring
->lock
, flags
);
1365 static void dma_rx(struct bcm43xx_dmaring
*ring
,
1368 const struct bcm43xx_dma_ops
*ops
= ring
->ops
;
1369 struct bcm43xx_dmadesc_generic
*desc
;
1370 struct bcm43xx_dmadesc_meta
*meta
;
1371 struct bcm43xx_rxhdr_fw4
*rxhdr
;
1372 struct sk_buff
*skb
;
1377 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1379 sync_descbuffer_for_cpu(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1382 if (ring
->index
== 3) {
1383 /* We received an xmit status. */
1384 struct bcm43xx_hwtxstatus
*hw
= (struct bcm43xx_hwtxstatus
*)skb
->data
;
1387 while (hw
->cookie
== 0) {
1394 bcm43xx_handle_hwtxstatus(ring
->dev
, hw
);
1395 /* recycle the descriptor buffer. */
1396 sync_descbuffer_for_device(ring
, meta
->dmaaddr
, ring
->rx_buffersize
);
1400 rxhdr
= (struct bcm43xx_rxhdr_fw4
*)skb
->data
;
1401 len
= le16_to_cpu(rxhdr
->frame_len
);
1408 len
= le16_to_cpu(rxhdr
->frame_len
);
1409 } while (len
== 0 && i
++ < 5);
1410 if (unlikely(len
== 0)) {
1411 /* recycle the descriptor buffer. */
1412 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1413 ring
->rx_buffersize
);
1417 if (unlikely(len
> ring
->rx_buffersize
)) {
1418 /* The data did not fit into one descriptor buffer
1419 * and is split over multiple buffers.
1420 * This should never happen, as we try to allocate buffers
1421 * big enough. So simply ignore this packet.
1427 desc
= ops
->idx2desc(ring
, *slot
, &meta
);
1428 /* recycle the descriptor buffer. */
1429 sync_descbuffer_for_device(ring
, meta
->dmaaddr
,
1430 ring
->rx_buffersize
);
1431 *slot
= next_slot(ring
, *slot
);
1433 tmp
-= ring
->rx_buffersize
;
1437 printkl(KERN_ERR PFX
"DMA RX buffer too small "
1438 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1439 len
, ring
->rx_buffersize
, cnt
);
1443 dmaaddr
= meta
->dmaaddr
;
1444 err
= setup_rx_descbuffer(ring
, desc
, meta
, GFP_ATOMIC
);
1445 if (unlikely(err
)) {
1446 dprintkl(KERN_ERR PFX
"DMA RX: setup_rx_descbuffer() failed\n");
1447 sync_descbuffer_for_device(ring
, dmaaddr
,
1448 ring
->rx_buffersize
);
1452 unmap_descbuffer(ring
, dmaaddr
, ring
->rx_buffersize
, 0);
1453 skb_put(skb
, len
+ ring
->frameoffset
);
1454 skb_pull(skb
, ring
->frameoffset
);
1456 bcm43xx_rx(ring
->dev
, skb
, rxhdr
);
1461 void bcm43xx_dma_rx(struct bcm43xx_dmaring
*ring
)
1463 const struct bcm43xx_dma_ops
*ops
= ring
->ops
;
1464 int slot
, current_slot
;
1468 current_slot
= ops
->get_current_rxslot(ring
);
1469 assert(current_slot
>= 0 && current_slot
< ring
->nr_slots
);
1471 slot
= ring
->current_slot
;
1472 for ( ; slot
!= current_slot
; slot
= next_slot(ring
, slot
)) {
1473 dma_rx(ring
, &slot
);
1474 update_max_used_slots(ring
, ++used_slots
);
1476 ops
->set_current_rxslot(ring
, slot
);
1477 ring
->current_slot
= slot
;
1480 static void bcm43xx_dma_tx_suspend_ring(struct bcm43xx_dmaring
*ring
)
1482 unsigned long flags
;
1484 spin_lock_irqsave(&ring
->lock
, flags
);
1486 ring
->ops
->tx_suspend(ring
);
1487 spin_unlock_irqrestore(&ring
->lock
, flags
);
1490 static void bcm43xx_dma_tx_resume_ring(struct bcm43xx_dmaring
*ring
)
1492 unsigned long flags
;
1494 spin_lock_irqsave(&ring
->lock
, flags
);
1496 ring
->ops
->tx_resume(ring
);
1497 spin_unlock_irqrestore(&ring
->lock
, flags
);
1500 void bcm43xx_dma_tx_suspend(struct bcm43xx_wldev
*dev
)
1502 bcm43xx_power_saving_ctl_bits(dev
, -1, 1);
1503 bcm43xx_dma_tx_suspend_ring(dev
->dma
.tx_ring0
);
1504 bcm43xx_dma_tx_suspend_ring(dev
->dma
.tx_ring1
);
1505 bcm43xx_dma_tx_suspend_ring(dev
->dma
.tx_ring2
);
1506 bcm43xx_dma_tx_suspend_ring(dev
->dma
.tx_ring3
);
1507 bcm43xx_dma_tx_suspend_ring(dev
->dma
.tx_ring4
);
1508 bcm43xx_dma_tx_suspend_ring(dev
->dma
.tx_ring5
);
1511 void bcm43xx_dma_tx_resume(struct bcm43xx_wldev
*dev
)
1513 bcm43xx_dma_tx_resume_ring(dev
->dma
.tx_ring5
);
1514 bcm43xx_dma_tx_resume_ring(dev
->dma
.tx_ring4
);
1515 bcm43xx_dma_tx_resume_ring(dev
->dma
.tx_ring3
);
1516 bcm43xx_dma_tx_resume_ring(dev
->dma
.tx_ring2
);
1517 bcm43xx_dma_tx_resume_ring(dev
->dma
.tx_ring1
);
1518 bcm43xx_dma_tx_resume_ring(dev
->dma
.tx_ring0
);
1519 bcm43xx_power_saving_ctl_bits(dev
, -1, -1);