Merge bcm43xx-mac80211 driver from tree at bu3sch.de, pulled 24/6
[openwrt.git] / package / bcm43xx-mac80211 / src / bcm43xx / bcm43xx_dma.c
1 /*
2
3 Broadcom BCM43xx wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "bcm43xx.h"
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
36
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41
42
43 /* 32bit DMA ops. */
44 static
45 struct bcm43xx_dmadesc_generic * op32_idx2desc(struct bcm43xx_dmaring *ring,
46 int slot,
47 struct bcm43xx_dmadesc_meta **meta)
48 {
49 struct bcm43xx_dmadesc32 *desc;
50
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
53 desc = &(desc[slot]);
54
55 return (struct bcm43xx_dmadesc_generic *)desc;
56 }
57
58 static void op32_fill_descriptor(struct bcm43xx_dmaring *ring,
59 struct bcm43xx_dmadesc_generic *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
62 {
63 struct bcm43xx_dmadesc32 *descbase = ring->descbase;
64 int slot;
65 u32 ctl;
66 u32 addr;
67 u32 addrext;
68
69 slot = (int)(&(desc->dma32) - descbase);
70 assert(slot >= 0 && slot < ring->nr_slots);
71
72 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ssb_dma_translation(ring->dev->dev);
76 ctl = (bufsize - ring->frameoffset)
77 & BCM43xx_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
80 if (start)
81 ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
82 if (end)
83 ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
84 if (irq)
85 ctl |= BCM43xx_DMA32_DCTL_IRQ;
86 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
87 & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
88
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
91 }
92
93 static void op32_poke_tx(struct bcm43xx_dmaring *ring, int slot)
94 {
95 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXINDEX,
96 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
97 }
98
99 static void op32_tx_suspend(struct bcm43xx_dmaring *ring)
100 {
101 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
102 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
103 | BCM43xx_DMA32_TXSUSPEND);
104 }
105
106 static void op32_tx_resume(struct bcm43xx_dmaring *ring)
107 {
108 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
109 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
110 & ~BCM43xx_DMA32_TXSUSPEND);
111 }
112
113 static int op32_get_current_rxslot(struct bcm43xx_dmaring *ring)
114 {
115 u32 val;
116
117 val = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
118 val &= BCM43xx_DMA32_RXDPTR;
119
120 return (val / sizeof(struct bcm43xx_dmadesc32));
121 }
122
123 static void op32_set_current_rxslot(struct bcm43xx_dmaring *ring,
124 int slot)
125 {
126 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
127 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
128 }
129
130 static const struct bcm43xx_dma_ops dma32_ops = {
131 .idx2desc = op32_idx2desc,
132 .fill_descriptor = op32_fill_descriptor,
133 .poke_tx = op32_poke_tx,
134 .tx_suspend = op32_tx_suspend,
135 .tx_resume = op32_tx_resume,
136 .get_current_rxslot = op32_get_current_rxslot,
137 .set_current_rxslot = op32_set_current_rxslot,
138 };
139
140 /* 64bit DMA ops. */
141 static
142 struct bcm43xx_dmadesc_generic * op64_idx2desc(struct bcm43xx_dmaring *ring,
143 int slot,
144 struct bcm43xx_dmadesc_meta **meta)
145 {
146 struct bcm43xx_dmadesc64 *desc;
147
148 *meta = &(ring->meta[slot]);
149 desc = ring->descbase;
150 desc = &(desc[slot]);
151
152 return (struct bcm43xx_dmadesc_generic *)desc;
153 }
154
155 static void op64_fill_descriptor(struct bcm43xx_dmaring *ring,
156 struct bcm43xx_dmadesc_generic *desc,
157 dma_addr_t dmaaddr, u16 bufsize,
158 int start, int end, int irq)
159 {
160 struct bcm43xx_dmadesc64 *descbase = ring->descbase;
161 int slot;
162 u32 ctl0 = 0, ctl1 = 0;
163 u32 addrlo, addrhi;
164 u32 addrext;
165
166 slot = (int)(&(desc->dma64) - descbase);
167 assert(slot >= 0 && slot < ring->nr_slots);
168
169 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
170 addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
171 addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
172 >> SSB_DMA_TRANSLATION_SHIFT;
173 addrhi |= ssb_dma_translation(ring->dev->dev);
174 if (slot == ring->nr_slots - 1)
175 ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
176 if (start)
177 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
178 if (end)
179 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
180 if (irq)
181 ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
182 ctl1 |= (bufsize - ring->frameoffset)
183 & BCM43xx_DMA64_DCTL1_BYTECNT;
184 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
185 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
186
187 desc->dma64.control0 = cpu_to_le32(ctl0);
188 desc->dma64.control1 = cpu_to_le32(ctl1);
189 desc->dma64.address_low = cpu_to_le32(addrlo);
190 desc->dma64.address_high = cpu_to_le32(addrhi);
191 }
192
193 static void op64_poke_tx(struct bcm43xx_dmaring *ring, int slot)
194 {
195 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXINDEX,
196 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
197 }
198
199 static void op64_tx_suspend(struct bcm43xx_dmaring *ring)
200 {
201 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
202 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
203 | BCM43xx_DMA64_TXSUSPEND);
204 }
205
206 static void op64_tx_resume(struct bcm43xx_dmaring *ring)
207 {
208 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
209 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
210 & ~BCM43xx_DMA64_TXSUSPEND);
211 }
212
213 static int op64_get_current_rxslot(struct bcm43xx_dmaring *ring)
214 {
215 u32 val;
216
217 val = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
218 val &= BCM43xx_DMA64_RXSTATDPTR;
219
220 return (val / sizeof(struct bcm43xx_dmadesc64));
221 }
222
223 static void op64_set_current_rxslot(struct bcm43xx_dmaring *ring,
224 int slot)
225 {
226 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
227 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
228 }
229
230 static const struct bcm43xx_dma_ops dma64_ops = {
231 .idx2desc = op64_idx2desc,
232 .fill_descriptor = op64_fill_descriptor,
233 .poke_tx = op64_poke_tx,
234 .tx_suspend = op64_tx_suspend,
235 .tx_resume = op64_tx_resume,
236 .get_current_rxslot = op64_get_current_rxslot,
237 .set_current_rxslot = op64_set_current_rxslot,
238 };
239
240
241 static inline int free_slots(struct bcm43xx_dmaring *ring)
242 {
243 return (ring->nr_slots - ring->used_slots);
244 }
245
246 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
247 {
248 assert(slot >= -1 && slot <= ring->nr_slots - 1);
249 if (slot == ring->nr_slots - 1)
250 return 0;
251 return slot + 1;
252 }
253
254 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
255 {
256 assert(slot >= 0 && slot <= ring->nr_slots - 1);
257 if (slot == 0)
258 return ring->nr_slots - 1;
259 return slot - 1;
260 }
261
262 /* Request a slot for usage. */
263 static inline
264 int request_slot(struct bcm43xx_dmaring *ring)
265 {
266 int slot;
267
268 assert(ring->tx);
269 assert(!ring->stopped);
270 assert(free_slots(ring) != 0);
271
272 slot = next_slot(ring, ring->current_slot);
273 ring->current_slot = slot;
274 ring->used_slots++;
275
276 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
277 if (ring->used_slots > ring->max_used_slots)
278 ring->max_used_slots = ring->used_slots;
279 #endif /* CONFIG_BCM43XX_MAC80211_DEBUG*/
280
281 return slot;
282 }
283
284 /* Return a slot to the free slots. */
285 static inline
286 void return_slot(struct bcm43xx_dmaring *ring, int slot)
287 {
288 assert(ring->tx);
289
290 ring->used_slots--;
291 }
292
293 /* Mac80211-queue to bcm43xx-ring mapping */
294 static struct bcm43xx_dmaring * priority_to_txring(struct bcm43xx_wldev *dev,
295 int queue_priority)
296 {
297 struct bcm43xx_dmaring *ring;
298
299 /*FIXME: For now we always run on TX-ring-1 */
300 return dev->dma.tx_ring1;
301
302 /* 0 = highest priority */
303 switch (queue_priority) {
304 default:
305 assert(0);
306 /* fallthrough */
307 case 0:
308 ring = dev->dma.tx_ring3;
309 break;
310 case 1:
311 ring = dev->dma.tx_ring2;
312 break;
313 case 2:
314 ring = dev->dma.tx_ring1;
315 break;
316 case 3:
317 ring = dev->dma.tx_ring0;
318 break;
319 case 4:
320 ring = dev->dma.tx_ring4;
321 break;
322 case 5:
323 ring = dev->dma.tx_ring5;
324 break;
325 }
326
327 return ring;
328 }
329
330 /* Bcm43xx-ring to mac80211-queue mapping */
331 static inline int txring_to_priority(struct bcm43xx_dmaring *ring)
332 {
333 static const u8 idx_to_prio[] =
334 { 3, 2, 1, 0, 4, 5, };
335
336 /*FIXME: have only one queue, for now */
337 return 0;
338
339 return idx_to_prio[ring->index];
340 }
341
342
343 u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
344 {
345 static const u16 map64[] = {
346 BCM43xx_MMIO_DMA64_BASE0,
347 BCM43xx_MMIO_DMA64_BASE1,
348 BCM43xx_MMIO_DMA64_BASE2,
349 BCM43xx_MMIO_DMA64_BASE3,
350 BCM43xx_MMIO_DMA64_BASE4,
351 BCM43xx_MMIO_DMA64_BASE5,
352 };
353 static const u16 map32[] = {
354 BCM43xx_MMIO_DMA32_BASE0,
355 BCM43xx_MMIO_DMA32_BASE1,
356 BCM43xx_MMIO_DMA32_BASE2,
357 BCM43xx_MMIO_DMA32_BASE3,
358 BCM43xx_MMIO_DMA32_BASE4,
359 BCM43xx_MMIO_DMA32_BASE5,
360 };
361
362 if (dma64bit) {
363 assert(controller_idx >= 0 &&
364 controller_idx < ARRAY_SIZE(map64));
365 return map64[controller_idx];
366 }
367 assert(controller_idx >= 0 &&
368 controller_idx < ARRAY_SIZE(map32));
369 return map32[controller_idx];
370 }
371
372 static inline
373 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
374 unsigned char *buf,
375 size_t len,
376 int tx)
377 {
378 dma_addr_t dmaaddr;
379
380 if (tx) {
381 dmaaddr = dma_map_single(ring->dev->dev->dev,
382 buf, len,
383 DMA_TO_DEVICE);
384 } else {
385 dmaaddr = dma_map_single(ring->dev->dev->dev,
386 buf, len,
387 DMA_FROM_DEVICE);
388 }
389
390 return dmaaddr;
391 }
392
393 static inline
394 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
395 dma_addr_t addr,
396 size_t len,
397 int tx)
398 {
399 if (tx) {
400 dma_unmap_single(ring->dev->dev->dev,
401 addr, len,
402 DMA_TO_DEVICE);
403 } else {
404 dma_unmap_single(ring->dev->dev->dev,
405 addr, len,
406 DMA_FROM_DEVICE);
407 }
408 }
409
410 static inline
411 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
412 dma_addr_t addr,
413 size_t len)
414 {
415 assert(!ring->tx);
416
417 dma_sync_single_for_cpu(ring->dev->dev->dev,
418 addr, len, DMA_FROM_DEVICE);
419 }
420
421 static inline
422 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
423 dma_addr_t addr,
424 size_t len)
425 {
426 assert(!ring->tx);
427
428 dma_sync_single_for_device(ring->dev->dev->dev,
429 addr, len, DMA_FROM_DEVICE);
430 }
431
432 static inline
433 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
434 struct bcm43xx_dmadesc_meta *meta,
435 int irq_context)
436 {
437 if (meta->skb) {
438 if (irq_context)
439 dev_kfree_skb_irq(meta->skb);
440 else
441 dev_kfree_skb(meta->skb);
442 meta->skb = NULL;
443 }
444 }
445
446 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
447 {
448 struct device *dev = ring->dev->dev->dev;
449
450 ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
451 &(ring->dmabase), GFP_KERNEL);
452 if (!ring->descbase) {
453 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
454 return -ENOMEM;
455 }
456 memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
457
458 return 0;
459 }
460
461 static void free_ringmemory(struct bcm43xx_dmaring *ring)
462 {
463 struct device *dev = ring->dev->dev->dev;
464
465 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
466 ring->descbase, ring->dmabase);
467 }
468
469 /* Reset the RX DMA channel */
470 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_wldev *dev,
471 u16 mmio_base, int dma64)
472 {
473 int i;
474 u32 value;
475 u16 offset;
476
477 might_sleep();
478
479 offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
480 bcm43xx_write32(dev, mmio_base + offset, 0);
481 for (i = 0; i < 10; i++) {
482 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
483 value = bcm43xx_read32(dev, mmio_base + offset);
484 if (dma64) {
485 value &= BCM43xx_DMA64_RXSTAT;
486 if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
487 i = -1;
488 break;
489 }
490 } else {
491 value &= BCM43xx_DMA32_RXSTATE;
492 if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
493 i = -1;
494 break;
495 }
496 }
497 msleep(1);
498 }
499 if (i != -1) {
500 printk(KERN_ERR PFX "ERROR: DMA RX reset timed out\n");
501 return -ENODEV;
502 }
503
504 return 0;
505 }
506
507 /* Reset the RX DMA channel */
508 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_wldev *dev,
509 u16 mmio_base, int dma64)
510 {
511 int i;
512 u32 value;
513 u16 offset;
514
515 might_sleep();
516
517 for (i = 0; i < 10; i++) {
518 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
519 value = bcm43xx_read32(dev, mmio_base + offset);
520 if (dma64) {
521 value &= BCM43xx_DMA64_TXSTAT;
522 if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
523 value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
524 value == BCM43xx_DMA64_TXSTAT_STOPPED)
525 break;
526 } else {
527 value &= BCM43xx_DMA32_TXSTATE;
528 if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
529 value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
530 value == BCM43xx_DMA32_TXSTAT_STOPPED)
531 break;
532 }
533 msleep(1);
534 }
535 offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
536 bcm43xx_write32(dev, mmio_base + offset, 0);
537 for (i = 0; i < 10; i++) {
538 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
539 value = bcm43xx_read32(dev, mmio_base + offset);
540 if (dma64) {
541 value &= BCM43xx_DMA64_TXSTAT;
542 if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
543 i = -1;
544 break;
545 }
546 } else {
547 value &= BCM43xx_DMA32_TXSTATE;
548 if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
549 i = -1;
550 break;
551 }
552 }
553 msleep(1);
554 }
555 if (i != -1) {
556 printk(KERN_ERR PFX "ERROR: DMA TX reset timed out\n");
557 return -ENODEV;
558 }
559 /* ensure the reset is completed. */
560 msleep(1);
561
562 return 0;
563 }
564
565 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
566 struct bcm43xx_dmadesc_generic *desc,
567 struct bcm43xx_dmadesc_meta *meta,
568 gfp_t gfp_flags)
569 {
570 struct bcm43xx_rxhdr_fw4 *rxhdr;
571 struct bcm43xx_hwtxstatus *txstat;
572 dma_addr_t dmaaddr;
573 struct sk_buff *skb;
574
575 assert(!ring->tx);
576
577 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
578 if (unlikely(!skb))
579 return -ENOMEM;
580 dmaaddr = map_descbuffer(ring, skb->data,
581 ring->rx_buffersize, 0);
582 if (dma_mapping_error(dmaaddr)) {
583 /* ugh. try to realloc in zone_dma */
584 gfp_flags |= GFP_DMA;
585
586 dev_kfree_skb_any(skb);
587
588 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
589 if (unlikely(!skb))
590 return -ENOMEM;
591 dmaaddr = map_descbuffer(ring, skb->data,
592 ring->rx_buffersize, 0);
593 }
594
595 if (dma_mapping_error(dmaaddr)) {
596 dev_kfree_skb_any(skb);
597 return -EIO;
598 }
599
600 meta->skb = skb;
601 meta->dmaaddr = dmaaddr;
602 ring->ops->fill_descriptor(ring, desc, dmaaddr,
603 ring->rx_buffersize, 0, 0, 0);
604
605 rxhdr = (struct bcm43xx_rxhdr_fw4 *)(skb->data);
606 rxhdr->frame_len = 0;
607 txstat = (struct bcm43xx_hwtxstatus *)(skb->data);
608 txstat->cookie = 0;
609
610 return 0;
611 }
612
613 /* Allocate the initial descbuffers.
614 * This is used for an RX ring only.
615 */
616 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
617 {
618 int i, err = -ENOMEM;
619 struct bcm43xx_dmadesc_generic *desc;
620 struct bcm43xx_dmadesc_meta *meta;
621
622 for (i = 0; i < ring->nr_slots; i++) {
623 desc = ring->ops->idx2desc(ring, i, &meta);
624
625 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
626 if (err) {
627 printk(KERN_ERR PFX "Failed to allocate initial descbuffers\n");
628 goto err_unwind;
629 }
630 }
631 mb();
632 ring->used_slots = ring->nr_slots;
633 err = 0;
634 out:
635 return err;
636
637 err_unwind:
638 for (i--; i >= 0; i--) {
639 desc = ring->ops->idx2desc(ring, i, &meta);
640
641 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
642 dev_kfree_skb(meta->skb);
643 }
644 goto out;
645 }
646
647 /* Do initial setup of the DMA controller.
648 * Reset the controller, write the ring busaddress
649 * and switch the "enable" bit on.
650 */
651 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
652 {
653 int err = 0;
654 u32 value;
655 u32 addrext;
656 u32 trans = ssb_dma_translation(ring->dev->dev);
657
658 if (ring->tx) {
659 if (ring->dma64) {
660 u64 ringbase = (u64)(ring->dmabase);
661
662 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
663 >> SSB_DMA_TRANSLATION_SHIFT;
664 value = BCM43xx_DMA64_TXENABLE;
665 value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
666 & BCM43xx_DMA64_TXADDREXT_MASK;
667 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
668 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
669 (ringbase & 0xFFFFFFFF));
670 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
671 ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK)
672 | trans);
673 } else {
674 u32 ringbase = (u32)(ring->dmabase);
675
676 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
677 >> SSB_DMA_TRANSLATION_SHIFT;
678 value = BCM43xx_DMA32_TXENABLE;
679 value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
680 & BCM43xx_DMA32_TXADDREXT_MASK;
681 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
682 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
683 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
684 | trans);
685 }
686 } else {
687 err = alloc_initial_descbuffers(ring);
688 if (err)
689 goto out;
690 if (ring->dma64) {
691 u64 ringbase = (u64)(ring->dmabase);
692
693 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
694 >> SSB_DMA_TRANSLATION_SHIFT;
695 value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
696 value |= BCM43xx_DMA64_RXENABLE;
697 value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
698 & BCM43xx_DMA64_RXADDREXT_MASK;
699 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
700 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
701 (ringbase & 0xFFFFFFFF));
702 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
703 ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK)
704 | trans);
705 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
706 } else {
707 u32 ringbase = (u32)(ring->dmabase);
708
709 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
710 >> SSB_DMA_TRANSLATION_SHIFT;
711 value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
712 value |= BCM43xx_DMA32_RXENABLE;
713 value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
714 & BCM43xx_DMA32_RXADDREXT_MASK;
715 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
716 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
717 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
718 | trans);
719 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
720 }
721 }
722
723 out:
724 return err;
725 }
726
727 /* Shutdown the DMA controller. */
728 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
729 {
730 if (ring->tx) {
731 bcm43xx_dmacontroller_tx_reset(ring->dev, ring->mmio_base, ring->dma64);
732 if (ring->dma64) {
733 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
734 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
735 } else
736 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
737 } else {
738 bcm43xx_dmacontroller_rx_reset(ring->dev, ring->mmio_base, ring->dma64);
739 if (ring->dma64) {
740 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
741 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
742 } else
743 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
744 }
745 }
746
747 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
748 {
749 struct bcm43xx_dmadesc_generic *desc;
750 struct bcm43xx_dmadesc_meta *meta;
751 int i;
752
753 if (!ring->used_slots)
754 return;
755 for (i = 0; i < ring->nr_slots; i++) {
756 desc = ring->ops->idx2desc(ring, i, &meta);
757
758 if (!meta->skb) {
759 assert(ring->tx);
760 continue;
761 }
762 if (ring->tx) {
763 unmap_descbuffer(ring, meta->dmaaddr,
764 meta->skb->len, 1);
765 } else {
766 unmap_descbuffer(ring, meta->dmaaddr,
767 ring->rx_buffersize, 0);
768 }
769 free_descriptor_buffer(ring, meta, 0);
770 }
771 }
772
773 static u64 supported_dma_mask(struct bcm43xx_wldev *dev)
774 {
775 u32 tmp;
776 u16 mmio_base;
777
778 tmp = bcm43xx_read32(dev, SSB_TMSHIGH);
779 if (tmp & SSB_TMSHIGH_DMA64)
780 return DMA_64BIT_MASK;
781 mmio_base = bcm43xx_dmacontroller_base(0, 0);
782 bcm43xx_write32(dev,
783 mmio_base + BCM43xx_DMA32_TXCTL,
784 BCM43xx_DMA32_TXADDREXT_MASK);
785 tmp = bcm43xx_read32(dev,
786 mmio_base + BCM43xx_DMA32_TXCTL);
787 if (tmp & BCM43xx_DMA32_TXADDREXT_MASK)
788 return DMA_32BIT_MASK;
789
790 return DMA_30BIT_MASK;
791 }
792
793 /* Main initialization function. */
794 static
795 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_wldev *dev,
796 int controller_index,
797 int for_tx,
798 int dma64)
799 {
800 struct bcm43xx_dmaring *ring;
801 int err;
802 int nr_slots;
803 dma_addr_t dma_test;
804
805 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
806 if (!ring)
807 goto out;
808
809 nr_slots = BCM43xx_RXRING_SLOTS;
810 if (for_tx)
811 nr_slots = BCM43xx_TXRING_SLOTS;
812
813 ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
814 GFP_KERNEL);
815 if (!ring->meta)
816 goto err_kfree_ring;
817 if (for_tx) {
818 ring->txhdr_cache = kcalloc(nr_slots,
819 sizeof(struct bcm43xx_txhdr_fw4),
820 GFP_KERNEL);
821 if (!ring->txhdr_cache)
822 goto err_kfree_meta;
823
824 /* test for ability to dma to txhdr_cache */
825 dma_test = dma_map_single(dev->dev->dev,
826 ring->txhdr_cache, sizeof(struct bcm43xx_txhdr_fw4),
827 DMA_TO_DEVICE);
828
829 if (dma_mapping_error(dma_test)) {
830 /* ugh realloc */
831 kfree(ring->txhdr_cache);
832 ring->txhdr_cache = kcalloc(nr_slots,
833 sizeof(struct bcm43xx_txhdr_fw4),
834 GFP_KERNEL | GFP_DMA);
835 if (!ring->txhdr_cache)
836 goto err_kfree_meta;
837
838 dma_test = dma_map_single(dev->dev->dev,
839 ring->txhdr_cache, sizeof(struct bcm43xx_txhdr_fw4),
840 DMA_TO_DEVICE);
841
842 if (dma_mapping_error(dma_test))
843 goto err_kfree_txhdr_cache;
844 }
845
846 dma_unmap_single(dev->dev->dev,
847 dma_test, sizeof(struct bcm43xx_txhdr_fw4),
848 DMA_TO_DEVICE);
849 }
850
851 ring->dev = dev;
852 ring->nr_slots = nr_slots;
853 ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
854 ring->index = controller_index;
855 ring->dma64 = !!dma64;
856 if (dma64)
857 ring->ops = &dma64_ops;
858 else
859 ring->ops = &dma32_ops;
860 if (for_tx) {
861 ring->tx = 1;
862 ring->current_slot = -1;
863 } else {
864 if (ring->index == 0) {
865 ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
866 ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
867 } else if (ring->index == 3) {
868 ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
869 ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
870 } else
871 assert(0);
872 }
873 spin_lock_init(&ring->lock);
874 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
875 ring->last_injected_overflow = jiffies;
876 #endif
877
878 err = alloc_ringmemory(ring);
879 if (err)
880 goto err_kfree_txhdr_cache;
881 err = dmacontroller_setup(ring);
882 if (err)
883 goto err_free_ringmemory;
884
885 out:
886 return ring;
887
888 err_free_ringmemory:
889 free_ringmemory(ring);
890 err_kfree_txhdr_cache:
891 kfree(ring->txhdr_cache);
892 err_kfree_meta:
893 kfree(ring->meta);
894 err_kfree_ring:
895 kfree(ring);
896 ring = NULL;
897 goto out;
898 }
899
900 /* Main cleanup function. */
901 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
902 {
903 if (!ring)
904 return;
905
906 dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
907 (ring->dma64) ? "64" : "32",
908 ring->mmio_base,
909 (ring->tx) ? "TX" : "RX",
910 ring->max_used_slots, ring->nr_slots);
911 /* Device IRQs are disabled prior entering this function,
912 * so no need to take care of concurrency with rx handler stuff.
913 */
914 dmacontroller_cleanup(ring);
915 free_all_descbuffers(ring);
916 free_ringmemory(ring);
917
918 kfree(ring->txhdr_cache);
919 kfree(ring->meta);
920 kfree(ring);
921 }
922
923 void bcm43xx_dma_free(struct bcm43xx_wldev *dev)
924 {
925 struct bcm43xx_dma *dma;
926
927 if (bcm43xx_using_pio(dev))
928 return;
929 dma = &dev->dma;
930
931 bcm43xx_destroy_dmaring(dma->rx_ring3);
932 dma->rx_ring3 = NULL;
933 bcm43xx_destroy_dmaring(dma->rx_ring0);
934 dma->rx_ring0 = NULL;
935
936 bcm43xx_destroy_dmaring(dma->tx_ring5);
937 dma->tx_ring5 = NULL;
938 bcm43xx_destroy_dmaring(dma->tx_ring4);
939 dma->tx_ring4 = NULL;
940 bcm43xx_destroy_dmaring(dma->tx_ring3);
941 dma->tx_ring3 = NULL;
942 bcm43xx_destroy_dmaring(dma->tx_ring2);
943 dma->tx_ring2 = NULL;
944 bcm43xx_destroy_dmaring(dma->tx_ring1);
945 dma->tx_ring1 = NULL;
946 bcm43xx_destroy_dmaring(dma->tx_ring0);
947 dma->tx_ring0 = NULL;
948 }
949
950 int bcm43xx_dma_init(struct bcm43xx_wldev *dev)
951 {
952 struct bcm43xx_dma *dma = &dev->dma;
953 struct bcm43xx_dmaring *ring;
954 int err;
955 u64 dmamask;
956 int dma64 = 0;
957
958 dmamask = supported_dma_mask(dev);
959 if (dmamask == DMA_64BIT_MASK)
960 dma64 = 1;
961
962 err = ssb_dma_set_mask(dev->dev, dmamask);
963 if (err) {
964 #ifdef BCM43XX_MAC80211_PIO
965 printk(KERN_WARNING PFX "DMA for this device not supported. "
966 "Falling back to PIO\n");
967 dev->__using_pio = 1;
968 return -EAGAIN;
969 #else
970 printk(KERN_ERR PFX "DMA for this device not supported and "
971 "no PIO support compiled in\n");
972 return -EOPNOTSUPP;
973 #endif
974 }
975
976 err = -ENOMEM;
977 /* setup TX DMA channels. */
978 ring = bcm43xx_setup_dmaring(dev, 0, 1, dma64);
979 if (!ring)
980 goto out;
981 dma->tx_ring0 = ring;
982
983 ring = bcm43xx_setup_dmaring(dev, 1, 1, dma64);
984 if (!ring)
985 goto err_destroy_tx0;
986 dma->tx_ring1 = ring;
987
988 ring = bcm43xx_setup_dmaring(dev, 2, 1, dma64);
989 if (!ring)
990 goto err_destroy_tx1;
991 dma->tx_ring2 = ring;
992
993 ring = bcm43xx_setup_dmaring(dev, 3, 1, dma64);
994 if (!ring)
995 goto err_destroy_tx2;
996 dma->tx_ring3 = ring;
997
998 ring = bcm43xx_setup_dmaring(dev, 4, 1, dma64);
999 if (!ring)
1000 goto err_destroy_tx3;
1001 dma->tx_ring4 = ring;
1002
1003 ring = bcm43xx_setup_dmaring(dev, 5, 1, dma64);
1004 if (!ring)
1005 goto err_destroy_tx4;
1006 dma->tx_ring5 = ring;
1007
1008 /* setup RX DMA channels. */
1009 ring = bcm43xx_setup_dmaring(dev, 0, 0, dma64);
1010 if (!ring)
1011 goto err_destroy_tx5;
1012 dma->rx_ring0 = ring;
1013
1014 if (dev->dev->id.revision < 5) {
1015 ring = bcm43xx_setup_dmaring(dev, 3, 0, dma64);
1016 if (!ring)
1017 goto err_destroy_rx0;
1018 dma->rx_ring3 = ring;
1019 }
1020
1021 dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
1022 (dmamask == DMA_64BIT_MASK) ? 64 :
1023 (dmamask == DMA_32BIT_MASK) ? 32 : 30);
1024 err = 0;
1025 out:
1026 return err;
1027
1028 err_destroy_rx0:
1029 bcm43xx_destroy_dmaring(dma->rx_ring0);
1030 dma->rx_ring0 = NULL;
1031 err_destroy_tx5:
1032 bcm43xx_destroy_dmaring(dma->tx_ring5);
1033 dma->tx_ring5 = NULL;
1034 err_destroy_tx4:
1035 bcm43xx_destroy_dmaring(dma->tx_ring4);
1036 dma->tx_ring4 = NULL;
1037 err_destroy_tx3:
1038 bcm43xx_destroy_dmaring(dma->tx_ring3);
1039 dma->tx_ring3 = NULL;
1040 err_destroy_tx2:
1041 bcm43xx_destroy_dmaring(dma->tx_ring2);
1042 dma->tx_ring2 = NULL;
1043 err_destroy_tx1:
1044 bcm43xx_destroy_dmaring(dma->tx_ring1);
1045 dma->tx_ring1 = NULL;
1046 err_destroy_tx0:
1047 bcm43xx_destroy_dmaring(dma->tx_ring0);
1048 dma->tx_ring0 = NULL;
1049 goto out;
1050 }
1051
1052 /* Generate a cookie for the TX header. */
1053 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
1054 int slot)
1055 {
1056 u16 cookie = 0x1000;
1057
1058 /* Use the upper 4 bits of the cookie as
1059 * DMA controller ID and store the slot number
1060 * in the lower 12 bits.
1061 * Note that the cookie must never be 0, as this
1062 * is a special value used in RX path.
1063 */
1064 switch (ring->index) {
1065 case 0:
1066 cookie = 0xA000;
1067 break;
1068 case 1:
1069 cookie = 0xB000;
1070 break;
1071 case 2:
1072 cookie = 0xC000;
1073 break;
1074 case 3:
1075 cookie = 0xD000;
1076 break;
1077 case 4:
1078 cookie = 0xE000;
1079 break;
1080 case 5:
1081 cookie = 0xF000;
1082 break;
1083 }
1084 assert(((u16)slot & 0xF000) == 0x0000);
1085 cookie |= (u16)slot;
1086
1087 return cookie;
1088 }
1089
1090 /* Inspect a cookie and find out to which controller/slot it belongs. */
1091 static
1092 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_wldev *dev,
1093 u16 cookie, int *slot)
1094 {
1095 struct bcm43xx_dma *dma = &dev->dma;
1096 struct bcm43xx_dmaring *ring = NULL;
1097
1098 switch (cookie & 0xF000) {
1099 case 0xA000:
1100 ring = dma->tx_ring0;
1101 break;
1102 case 0xB000:
1103 ring = dma->tx_ring1;
1104 break;
1105 case 0xC000:
1106 ring = dma->tx_ring2;
1107 break;
1108 case 0xD000:
1109 ring = dma->tx_ring3;
1110 break;
1111 case 0xE000:
1112 ring = dma->tx_ring4;
1113 break;
1114 case 0xF000:
1115 ring = dma->tx_ring5;
1116 break;
1117 default:
1118 assert(0);
1119 }
1120 *slot = (cookie & 0x0FFF);
1121 assert(ring && *slot >= 0 && *slot < ring->nr_slots);
1122
1123 return ring;
1124 }
1125
1126 static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
1127 struct sk_buff *skb,
1128 struct ieee80211_tx_control *ctl)
1129 {
1130 const struct bcm43xx_dma_ops *ops = ring->ops;
1131 u8 *header;
1132 int slot;
1133 int err;
1134 struct bcm43xx_dmadesc_generic *desc;
1135 struct bcm43xx_dmadesc_meta *meta;
1136 struct bcm43xx_dmadesc_meta *meta_hdr;
1137 struct sk_buff *bounce_skb;
1138
1139 #define SLOTS_PER_PACKET 2
1140 assert(skb_shinfo(skb)->nr_frags == 0);
1141
1142 /* Get a slot for the header. */
1143 slot = request_slot(ring);
1144 desc = ops->idx2desc(ring, slot, &meta_hdr);
1145 memset(meta_hdr, 0, sizeof(*meta_hdr));
1146
1147 header = &(ring->txhdr_cache[slot * sizeof(struct bcm43xx_txhdr_fw4)]);
1148 bcm43xx_generate_txhdr(ring->dev, header,
1149 skb->data, skb->len, ctl,
1150 generate_cookie(ring, slot));
1151
1152 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1153 sizeof(struct bcm43xx_txhdr_fw4), 1);
1154 if (dma_mapping_error(meta_hdr->dmaaddr))
1155 return -EIO;
1156 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1157 sizeof(struct bcm43xx_txhdr_fw4), 1, 0, 0);
1158
1159 /* Get a slot for the payload. */
1160 slot = request_slot(ring);
1161 desc = ops->idx2desc(ring, slot, &meta);
1162 memset(meta, 0, sizeof(*meta));
1163
1164 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1165 meta->skb = skb;
1166 meta->is_last_fragment = 1;
1167
1168 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1169 /* create a bounce buffer in zone_dma on mapping failure. */
1170 if (dma_mapping_error(meta->dmaaddr)) {
1171 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1172 if (!bounce_skb) {
1173 err = -ENOMEM;
1174 goto out_unmap_hdr;
1175 }
1176
1177 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1178 dev_kfree_skb_any(skb);
1179 skb = bounce_skb;
1180 meta->skb = skb;
1181 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1182 if (dma_mapping_error(meta->dmaaddr)) {
1183 err = -EIO;
1184 goto out_free_bounce;
1185 }
1186 }
1187
1188 ops->fill_descriptor(ring, desc, meta->dmaaddr,
1189 skb->len, 0, 1, 1);
1190
1191 /* Now transfer the whole frame. */
1192 wmb();
1193 ops->poke_tx(ring, next_slot(ring, slot));
1194 return 0;
1195
1196 out_free_bounce:
1197 dev_kfree_skb_any(skb);
1198 out_unmap_hdr:
1199 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1200 sizeof(struct bcm43xx_txhdr_fw4), 1);
1201 return err;
1202 }
1203
1204 static inline
1205 int should_inject_overflow(struct bcm43xx_dmaring *ring)
1206 {
1207 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1208 if (unlikely(bcm43xx_debug(ring->dev, BCM43xx_DBG_DMAOVERFLOW))) {
1209 /* Check if we should inject another ringbuffer overflow
1210 * to test handling of this situation in the stack. */
1211 unsigned long next_overflow;
1212
1213 next_overflow = ring->last_injected_overflow + HZ;
1214 if (time_after(jiffies, next_overflow)) {
1215 ring->last_injected_overflow = jiffies;
1216 dprintk(KERN_DEBUG PFX "Injecting TX ring overflow on "
1217 "DMA controller %d\n", ring->index);
1218 return 1;
1219 }
1220 }
1221 #endif /* CONFIG_BCM43XX_MAC80211_DEBUG */
1222 return 0;
1223 }
1224
1225 int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,
1226 struct sk_buff *skb,
1227 struct ieee80211_tx_control *ctl)
1228 {
1229 struct bcm43xx_dmaring *ring;
1230 int err = 0;
1231 unsigned long flags;
1232
1233 ring = priority_to_txring(dev, ctl->queue);
1234 spin_lock_irqsave(&ring->lock, flags);
1235 assert(ring->tx);
1236 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1237 printkl(KERN_ERR PFX "DMA queue overflow\n");
1238 err = -ENOSPC;
1239 goto out_unlock;
1240 }
1241 /* Check if the queue was stopped in mac80211,
1242 * but we got called nevertheless.
1243 * That would be a mac80211 bug. */
1244 assert(!ring->stopped);
1245
1246 err = dma_tx_fragment(ring, skb, ctl);
1247 if (unlikely(err)) {
1248 printkl(KERN_ERR PFX "DMA tx mapping failure\n");
1249 goto out_unlock;
1250 }
1251 ring->nr_tx_packets++;
1252 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1253 should_inject_overflow(ring)) {
1254 /* This TX ring is full. */
1255 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
1256 ring->stopped = 1;
1257 }
1258 out_unlock:
1259 spin_unlock_irqrestore(&ring->lock, flags);
1260
1261 return err;
1262 }
1263
1264 void bcm43xx_dma_handle_txstatus(struct bcm43xx_wldev *dev,
1265 const struct bcm43xx_txstatus *status)
1266 {
1267 const struct bcm43xx_dma_ops *ops;
1268 struct bcm43xx_dmaring *ring;
1269 struct bcm43xx_dmadesc_generic *desc;
1270 struct bcm43xx_dmadesc_meta *meta;
1271 int slot;
1272
1273 ring = parse_cookie(dev, status->cookie, &slot);
1274 if (unlikely(!ring))
1275 return;
1276 assert(irqs_disabled());
1277 spin_lock(&ring->lock);
1278
1279 assert(ring->tx);
1280 ops = ring->ops;
1281 while (1) {
1282 assert(slot >= 0 && slot < ring->nr_slots);
1283 desc = ops->idx2desc(ring, slot, &meta);
1284
1285 if (meta->skb)
1286 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1287 else
1288 unmap_descbuffer(ring, meta->dmaaddr, sizeof(struct bcm43xx_txhdr_fw4), 1);
1289
1290 if (meta->is_last_fragment) {
1291 assert(meta->skb);
1292 /* Call back to inform the ieee80211 subsystem about the
1293 * status of the transmission.
1294 * Some fields of txstat are already filled in dma_tx().
1295 */
1296 if (status->acked)
1297 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1298 meta->txstat.retry_count = status->frame_count - 1;
1299 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, &(meta->txstat));
1300 /* skb is freed by ieee80211_tx_status_irqsafe() */
1301 meta->skb = NULL;
1302 } else {
1303 /* No need to call free_descriptor_buffer here, as
1304 * this is only the txhdr, which is not allocated.
1305 */
1306 assert(meta->skb == NULL);
1307 }
1308 /* Everything belonging to the slot is unmapped
1309 * and freed, so we can return it.
1310 */
1311 return_slot(ring, slot);
1312
1313 if (meta->is_last_fragment)
1314 break;
1315 slot = next_slot(ring, slot);
1316 }
1317 dev->stats.last_tx = jiffies;
1318 if (ring->stopped) {
1319 assert(free_slots(ring) >= SLOTS_PER_PACKET);
1320 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
1321 ring->stopped = 0;
1322 }
1323
1324 spin_unlock(&ring->lock);
1325 }
1326
1327 void bcm43xx_dma_get_tx_stats(struct bcm43xx_wldev *dev,
1328 struct ieee80211_tx_queue_stats *stats)
1329 {
1330 const int nr_queues = dev->wl->hw->queues;
1331 struct bcm43xx_dmaring *ring;
1332 struct ieee80211_tx_queue_stats_data *data;
1333 unsigned long flags;
1334 int i;
1335
1336 for (i = 0; i < nr_queues; i++) {
1337 data = &(stats->data[i]);
1338 ring = priority_to_txring(dev, i);
1339
1340 spin_lock_irqsave(&ring->lock, flags);
1341 data->len = ring->used_slots / SLOTS_PER_PACKET;
1342 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1343 data->count = ring->nr_tx_packets;
1344 spin_unlock_irqrestore(&ring->lock, flags);
1345 }
1346 }
1347
1348 static void dma_rx(struct bcm43xx_dmaring *ring,
1349 int *slot)
1350 {
1351 const struct bcm43xx_dma_ops *ops = ring->ops;
1352 struct bcm43xx_dmadesc_generic *desc;
1353 struct bcm43xx_dmadesc_meta *meta;
1354 struct bcm43xx_rxhdr_fw4 *rxhdr;
1355 struct sk_buff *skb;
1356 u16 len;
1357 int err;
1358 dma_addr_t dmaaddr;
1359
1360 desc = ops->idx2desc(ring, *slot, &meta);
1361
1362 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1363 skb = meta->skb;
1364
1365 if (ring->index == 3) {
1366 /* We received an xmit status. */
1367 struct bcm43xx_hwtxstatus *hw = (struct bcm43xx_hwtxstatus *)skb->data;
1368 int i = 0;
1369
1370 while (hw->cookie == 0) {
1371 if (i > 100)
1372 break;
1373 i++;
1374 udelay(2);
1375 barrier();
1376 }
1377 bcm43xx_handle_hwtxstatus(ring->dev, hw);
1378 /* recycle the descriptor buffer. */
1379 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
1380
1381 return;
1382 }
1383 rxhdr = (struct bcm43xx_rxhdr_fw4 *)skb->data;
1384 len = le16_to_cpu(rxhdr->frame_len);
1385 if (len == 0) {
1386 int i = 0;
1387
1388 do {
1389 udelay(2);
1390 barrier();
1391 len = le16_to_cpu(rxhdr->frame_len);
1392 } while (len == 0 && i++ < 5);
1393 if (unlikely(len == 0)) {
1394 /* recycle the descriptor buffer. */
1395 sync_descbuffer_for_device(ring, meta->dmaaddr,
1396 ring->rx_buffersize);
1397 goto drop;
1398 }
1399 }
1400 if (unlikely(len > ring->rx_buffersize)) {
1401 /* The data did not fit into one descriptor buffer
1402 * and is split over multiple buffers.
1403 * This should never happen, as we try to allocate buffers
1404 * big enough. So simply ignore this packet.
1405 */
1406 int cnt = 0;
1407 s32 tmp = len;
1408
1409 while (1) {
1410 desc = ops->idx2desc(ring, *slot, &meta);
1411 /* recycle the descriptor buffer. */
1412 sync_descbuffer_for_device(ring, meta->dmaaddr,
1413 ring->rx_buffersize);
1414 *slot = next_slot(ring, *slot);
1415 cnt++;
1416 tmp -= ring->rx_buffersize;
1417 if (tmp <= 0)
1418 break;
1419 }
1420 printkl(KERN_ERR PFX "DMA RX buffer too small "
1421 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1422 len, ring->rx_buffersize, cnt);
1423 goto drop;
1424 }
1425
1426 dmaaddr = meta->dmaaddr;
1427 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1428 if (unlikely(err)) {
1429 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
1430 sync_descbuffer_for_device(ring, dmaaddr,
1431 ring->rx_buffersize);
1432 goto drop;
1433 }
1434
1435 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1436 skb_put(skb, len + ring->frameoffset);
1437 skb_pull(skb, ring->frameoffset);
1438
1439 bcm43xx_rx(ring->dev, skb, rxhdr);
1440 drop:
1441 return;
1442 }
1443
1444 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
1445 {
1446 const struct bcm43xx_dma_ops *ops = ring->ops;
1447 int slot, current_slot;
1448 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1449 int used_slots = 0;
1450 #endif
1451
1452 assert(!ring->tx);
1453 current_slot = ops->get_current_rxslot(ring);
1454 assert(current_slot >= 0 && current_slot < ring->nr_slots);
1455
1456 slot = ring->current_slot;
1457 for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
1458 dma_rx(ring, &slot);
1459 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1460 if (++used_slots > ring->max_used_slots)
1461 ring->max_used_slots = used_slots;
1462 #endif
1463 }
1464 ops->set_current_rxslot(ring, slot);
1465 ring->current_slot = slot;
1466 }
1467
1468 static void bcm43xx_dma_tx_suspend_ring(struct bcm43xx_dmaring *ring)
1469 {
1470 unsigned long flags;
1471
1472 spin_lock_irqsave(&ring->lock, flags);
1473 assert(ring->tx);
1474 ring->ops->tx_suspend(ring);
1475 spin_unlock_irqrestore(&ring->lock, flags);
1476 }
1477
1478 static void bcm43xx_dma_tx_resume_ring(struct bcm43xx_dmaring *ring)
1479 {
1480 unsigned long flags;
1481
1482 spin_lock_irqsave(&ring->lock, flags);
1483 assert(ring->tx);
1484 ring->ops->tx_resume(ring);
1485 spin_unlock_irqrestore(&ring->lock, flags);
1486 }
1487
1488 void bcm43xx_dma_tx_suspend(struct bcm43xx_wldev *dev)
1489 {
1490 bcm43xx_power_saving_ctl_bits(dev, -1, 1);
1491 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring0);
1492 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring1);
1493 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring2);
1494 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring3);
1495 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring4);
1496 bcm43xx_dma_tx_suspend_ring(dev->dma.tx_ring5);
1497 }
1498
1499 void bcm43xx_dma_tx_resume(struct bcm43xx_wldev *dev)
1500 {
1501 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring5);
1502 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring4);
1503 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring3);
1504 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring2);
1505 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring1);
1506 bcm43xx_dma_tx_resume_ring(dev->dma.tx_ring0);
1507 bcm43xx_power_saving_ctl_bits(dev, -1, -1);
1508 }
This page took 0.142245 seconds and 5 git commands to generate.