0f66db543a62a4484b8ee62be13cebdaed063981
[openwrt.git] / package / bcm43xx-mac80211 / src / bcm43xx / bcm43xx_dma.c
1 /*
2
3 Broadcom BCM43xx wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "bcm43xx.h"
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
36
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41
42
43 /* 32bit DMA ops. */
44 static
45 struct bcm43xx_dmadesc_generic * op32_idx2desc(struct bcm43xx_dmaring *ring,
46 int slot,
47 struct bcm43xx_dmadesc_meta **meta)
48 {
49 struct bcm43xx_dmadesc32 *desc;
50
51 *meta = &(ring->meta[slot]);
52 desc = ring->descbase;
53 desc = &(desc[slot]);
54
55 return (struct bcm43xx_dmadesc_generic *)desc;
56 }
57
58 static void op32_fill_descriptor(struct bcm43xx_dmaring *ring,
59 struct bcm43xx_dmadesc_generic *desc,
60 dma_addr_t dmaaddr, u16 bufsize,
61 int start, int end, int irq)
62 {
63 struct bcm43xx_dmadesc32 *descbase = ring->descbase;
64 int slot;
65 u32 ctl;
66 u32 addr;
67 u32 addrext;
68
69 slot = (int)(&(desc->dma32) - descbase);
70 assert(slot >= 0 && slot < ring->nr_slots);
71
72 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 >> SSB_DMA_TRANSLATION_SHIFT;
75 addr |= ssb_dma_translation(ring->dev->dev);
76 ctl = (bufsize - ring->frameoffset)
77 & BCM43xx_DMA32_DCTL_BYTECNT;
78 if (slot == ring->nr_slots - 1)
79 ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
80 if (start)
81 ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
82 if (end)
83 ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
84 if (irq)
85 ctl |= BCM43xx_DMA32_DCTL_IRQ;
86 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
87 & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
88
89 desc->dma32.control = cpu_to_le32(ctl);
90 desc->dma32.address = cpu_to_le32(addr);
91 }
92
93 static void op32_poke_tx(struct bcm43xx_dmaring *ring, int slot)
94 {
95 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXINDEX,
96 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
97 }
98
99 static void op32_tx_suspend(struct bcm43xx_dmaring *ring)
100 {
101 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
102 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
103 | BCM43xx_DMA32_TXSUSPEND);
104 }
105
106 static void op32_tx_resume(struct bcm43xx_dmaring *ring)
107 {
108 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
109 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
110 & ~BCM43xx_DMA32_TXSUSPEND);
111 }
112
113 static int op32_get_current_rxslot(struct bcm43xx_dmaring *ring)
114 {
115 u32 val;
116
117 val = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
118 val &= BCM43xx_DMA32_RXDPTR;
119
120 return (val / sizeof(struct bcm43xx_dmadesc32));
121 }
122
123 static void op32_set_current_rxslot(struct bcm43xx_dmaring *ring,
124 int slot)
125 {
126 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
127 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
128 }
129
130 static const struct bcm43xx_dma_ops dma32_ops = {
131 .idx2desc = op32_idx2desc,
132 .fill_descriptor = op32_fill_descriptor,
133 .poke_tx = op32_poke_tx,
134 .tx_suspend = op32_tx_suspend,
135 .tx_resume = op32_tx_resume,
136 .get_current_rxslot = op32_get_current_rxslot,
137 .set_current_rxslot = op32_set_current_rxslot,
138 };
139
140 /* 64bit DMA ops. */
141 static
142 struct bcm43xx_dmadesc_generic * op64_idx2desc(struct bcm43xx_dmaring *ring,
143 int slot,
144 struct bcm43xx_dmadesc_meta **meta)
145 {
146 struct bcm43xx_dmadesc64 *desc;
147
148 *meta = &(ring->meta[slot]);
149 desc = ring->descbase;
150 desc = &(desc[slot]);
151
152 return (struct bcm43xx_dmadesc_generic *)desc;
153 }
154
155 static void op64_fill_descriptor(struct bcm43xx_dmaring *ring,
156 struct bcm43xx_dmadesc_generic *desc,
157 dma_addr_t dmaaddr, u16 bufsize,
158 int start, int end, int irq)
159 {
160 struct bcm43xx_dmadesc64 *descbase = ring->descbase;
161 int slot;
162 u32 ctl0 = 0, ctl1 = 0;
163 u32 addrlo, addrhi;
164 u32 addrext;
165
166 slot = (int)(&(desc->dma64) - descbase);
167 assert(slot >= 0 && slot < ring->nr_slots);
168
169 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
170 addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
171 addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
172 >> SSB_DMA_TRANSLATION_SHIFT;
173 addrhi |= ssb_dma_translation(ring->dev->dev);
174 if (slot == ring->nr_slots - 1)
175 ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
176 if (start)
177 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
178 if (end)
179 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
180 if (irq)
181 ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
182 ctl1 |= (bufsize - ring->frameoffset)
183 & BCM43xx_DMA64_DCTL1_BYTECNT;
184 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
185 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
186
187 desc->dma64.control0 = cpu_to_le32(ctl0);
188 desc->dma64.control1 = cpu_to_le32(ctl1);
189 desc->dma64.address_low = cpu_to_le32(addrlo);
190 desc->dma64.address_high = cpu_to_le32(addrhi);
191 }
192
193 static void op64_poke_tx(struct bcm43xx_dmaring *ring, int slot)
194 {
195 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXINDEX,
196 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
197 }
198
199 static void op64_tx_suspend(struct bcm43xx_dmaring *ring)
200 {
201 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
202 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
203 | BCM43xx_DMA64_TXSUSPEND);
204 }
205
206 static void op64_tx_resume(struct bcm43xx_dmaring *ring)
207 {
208 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
209 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
210 & ~BCM43xx_DMA64_TXSUSPEND);
211 }
212
213 static int op64_get_current_rxslot(struct bcm43xx_dmaring *ring)
214 {
215 u32 val;
216
217 val = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
218 val &= BCM43xx_DMA64_RXSTATDPTR;
219
220 return (val / sizeof(struct bcm43xx_dmadesc64));
221 }
222
223 static void op64_set_current_rxslot(struct bcm43xx_dmaring *ring,
224 int slot)
225 {
226 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
227 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
228 }
229
230 static const struct bcm43xx_dma_ops dma64_ops = {
231 .idx2desc = op64_idx2desc,
232 .fill_descriptor = op64_fill_descriptor,
233 .poke_tx = op64_poke_tx,
234 .tx_suspend = op64_tx_suspend,
235 .tx_resume = op64_tx_resume,
236 .get_current_rxslot = op64_get_current_rxslot,
237 .set_current_rxslot = op64_set_current_rxslot,
238 };
239
240
241 static inline int free_slots(struct bcm43xx_dmaring *ring)
242 {
243 return (ring->nr_slots - ring->used_slots);
244 }
245
246 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
247 {
248 assert(slot >= -1 && slot <= ring->nr_slots - 1);
249 if (slot == ring->nr_slots - 1)
250 return 0;
251 return slot + 1;
252 }
253
254 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
255 {
256 assert(slot >= 0 && slot <= ring->nr_slots - 1);
257 if (slot == 0)
258 return ring->nr_slots - 1;
259 return slot - 1;
260 }
261
262 /* Request a slot for usage. */
263 static inline
264 int request_slot(struct bcm43xx_dmaring *ring)
265 {
266 int slot;
267
268 assert(ring->tx);
269 assert(!ring->stopped);
270 assert(free_slots(ring) != 0);
271
272 slot = next_slot(ring, ring->current_slot);
273 ring->current_slot = slot;
274 ring->used_slots++;
275
276 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
277 if (ring->used_slots > ring->max_used_slots)
278 ring->max_used_slots = ring->used_slots;
279 #endif /* CONFIG_BCM43XX_MAC80211_DEBUG*/
280
281 return slot;
282 }
283
284 /* Return a slot to the free slots. */
285 static inline
286 void return_slot(struct bcm43xx_dmaring *ring, int slot)
287 {
288 assert(ring->tx);
289
290 ring->used_slots--;
291 }
292
293 u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
294 {
295 static const u16 map64[] = {
296 BCM43xx_MMIO_DMA64_BASE0,
297 BCM43xx_MMIO_DMA64_BASE1,
298 BCM43xx_MMIO_DMA64_BASE2,
299 BCM43xx_MMIO_DMA64_BASE3,
300 BCM43xx_MMIO_DMA64_BASE4,
301 BCM43xx_MMIO_DMA64_BASE5,
302 };
303 static const u16 map32[] = {
304 BCM43xx_MMIO_DMA32_BASE0,
305 BCM43xx_MMIO_DMA32_BASE1,
306 BCM43xx_MMIO_DMA32_BASE2,
307 BCM43xx_MMIO_DMA32_BASE3,
308 BCM43xx_MMIO_DMA32_BASE4,
309 BCM43xx_MMIO_DMA32_BASE5,
310 };
311
312 if (dma64bit) {
313 assert(controller_idx >= 0 &&
314 controller_idx < ARRAY_SIZE(map64));
315 return map64[controller_idx];
316 }
317 assert(controller_idx >= 0 &&
318 controller_idx < ARRAY_SIZE(map32));
319 return map32[controller_idx];
320 }
321
322 static inline
323 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
324 unsigned char *buf,
325 size_t len,
326 int tx)
327 {
328 dma_addr_t dmaaddr;
329
330 if (tx) {
331 dmaaddr = dma_map_single(ring->dev->dev->dev,
332 buf, len,
333 DMA_TO_DEVICE);
334 } else {
335 dmaaddr = dma_map_single(ring->dev->dev->dev,
336 buf, len,
337 DMA_FROM_DEVICE);
338 }
339
340 return dmaaddr;
341 }
342
343 static inline
344 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
345 dma_addr_t addr,
346 size_t len,
347 int tx)
348 {
349 if (tx) {
350 dma_unmap_single(ring->dev->dev->dev,
351 addr, len,
352 DMA_TO_DEVICE);
353 } else {
354 dma_unmap_single(ring->dev->dev->dev,
355 addr, len,
356 DMA_FROM_DEVICE);
357 }
358 }
359
360 static inline
361 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
362 dma_addr_t addr,
363 size_t len)
364 {
365 assert(!ring->tx);
366
367 dma_sync_single_for_cpu(ring->dev->dev->dev,
368 addr, len, DMA_FROM_DEVICE);
369 }
370
371 static inline
372 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
373 dma_addr_t addr,
374 size_t len)
375 {
376 assert(!ring->tx);
377
378 dma_sync_single_for_device(ring->dev->dev->dev,
379 addr, len, DMA_FROM_DEVICE);
380 }
381
382 static inline
383 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
384 struct bcm43xx_dmadesc_meta *meta,
385 int irq_context)
386 {
387 if (meta->skb) {
388 if (irq_context)
389 dev_kfree_skb_irq(meta->skb);
390 else
391 dev_kfree_skb(meta->skb);
392 meta->skb = NULL;
393 }
394 }
395
396 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
397 {
398 struct device *dev = ring->dev->dev->dev;
399
400 ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
401 &(ring->dmabase), GFP_KERNEL);
402 if (!ring->descbase) {
403 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
404 return -ENOMEM;
405 }
406 memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
407
408 return 0;
409 }
410
411 static void free_ringmemory(struct bcm43xx_dmaring *ring)
412 {
413 struct device *dev = ring->dev->dev->dev;
414
415 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
416 ring->descbase, ring->dmabase);
417 }
418
419 /* Reset the RX DMA channel */
420 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_wldev *dev,
421 u16 mmio_base, int dma64)
422 {
423 int i;
424 u32 value;
425 u16 offset;
426
427 offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
428 bcm43xx_write32(dev, mmio_base + offset, 0);
429 for (i = 0; i < 1000; i++) {
430 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
431 value = bcm43xx_read32(dev, mmio_base + offset);
432 if (dma64) {
433 value &= BCM43xx_DMA64_RXSTAT;
434 if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
435 i = -1;
436 break;
437 }
438 } else {
439 value &= BCM43xx_DMA32_RXSTATE;
440 if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
441 i = -1;
442 break;
443 }
444 }
445 udelay(10);
446 }
447 if (i != -1) {
448 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
449 return -ENODEV;
450 }
451
452 return 0;
453 }
454
455 /* Reset the RX DMA channel */
456 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_wldev *dev,
457 u16 mmio_base, int dma64)
458 {
459 int i;
460 u32 value;
461 u16 offset;
462
463 for (i = 0; i < 1000; i++) {
464 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
465 value = bcm43xx_read32(dev, mmio_base + offset);
466 if (dma64) {
467 value &= BCM43xx_DMA64_TXSTAT;
468 if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
469 value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
470 value == BCM43xx_DMA64_TXSTAT_STOPPED)
471 break;
472 } else {
473 value &= BCM43xx_DMA32_TXSTATE;
474 if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
475 value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
476 value == BCM43xx_DMA32_TXSTAT_STOPPED)
477 break;
478 }
479 udelay(10);
480 }
481 offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
482 bcm43xx_write32(dev, mmio_base + offset, 0);
483 for (i = 0; i < 1000; i++) {
484 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
485 value = bcm43xx_read32(dev, mmio_base + offset);
486 if (dma64) {
487 value &= BCM43xx_DMA64_TXSTAT;
488 if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
489 i = -1;
490 break;
491 }
492 } else {
493 value &= BCM43xx_DMA32_TXSTATE;
494 if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
495 i = -1;
496 break;
497 }
498 }
499 udelay(10);
500 }
501 if (i != -1) {
502 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
503 return -ENODEV;
504 }
505 /* ensure the reset is completed. */
506 udelay(300);
507
508 return 0;
509 }
510
511 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
512 struct bcm43xx_dmadesc_generic *desc,
513 struct bcm43xx_dmadesc_meta *meta,
514 gfp_t gfp_flags)
515 {
516 struct bcm43xx_rxhdr_fw4 *rxhdr;
517 struct bcm43xx_hwtxstatus *txstat;
518 dma_addr_t dmaaddr;
519 struct sk_buff *skb;
520
521 assert(!ring->tx);
522
523 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
524 if (unlikely(!skb))
525 return -ENOMEM;
526 dmaaddr = map_descbuffer(ring, skb->data,
527 ring->rx_buffersize, 0);
528 if (dma_mapping_error(dmaaddr)) {
529 /* ugh. try to realloc in zone_dma */
530 gfp_flags |= GFP_DMA;
531
532 dev_kfree_skb_any(skb);
533
534 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
535 if (unlikely(!skb))
536 return -ENOMEM;
537 dmaaddr = map_descbuffer(ring, skb->data,
538 ring->rx_buffersize, 0);
539 }
540
541 if (dma_mapping_error(dmaaddr)) {
542 dev_kfree_skb_any(skb);
543 return -EIO;
544 }
545
546 meta->skb = skb;
547 meta->dmaaddr = dmaaddr;
548 ring->ops->fill_descriptor(ring, desc, dmaaddr,
549 ring->rx_buffersize, 0, 0, 0);
550
551 rxhdr = (struct bcm43xx_rxhdr_fw4 *)(skb->data);
552 rxhdr->frame_len = 0;
553 txstat = (struct bcm43xx_hwtxstatus *)(skb->data);
554 txstat->cookie = 0;
555
556 return 0;
557 }
558
559 /* Allocate the initial descbuffers.
560 * This is used for an RX ring only.
561 */
562 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
563 {
564 int i, err = -ENOMEM;
565 struct bcm43xx_dmadesc_generic *desc;
566 struct bcm43xx_dmadesc_meta *meta;
567
568 for (i = 0; i < ring->nr_slots; i++) {
569 desc = ring->ops->idx2desc(ring, i, &meta);
570
571 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
572 if (err) {
573 printk(KERN_ERR PFX "Failed to allocate initial descbuffers\n");
574 goto err_unwind;
575 }
576 }
577 mb();
578 ring->used_slots = ring->nr_slots;
579 err = 0;
580 out:
581 return err;
582
583 err_unwind:
584 for (i--; i >= 0; i--) {
585 desc = ring->ops->idx2desc(ring, i, &meta);
586
587 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
588 dev_kfree_skb(meta->skb);
589 }
590 goto out;
591 }
592
593 /* Do initial setup of the DMA controller.
594 * Reset the controller, write the ring busaddress
595 * and switch the "enable" bit on.
596 */
597 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
598 {
599 int err = 0;
600 u32 value;
601 u32 addrext;
602 u32 trans = ssb_dma_translation(ring->dev->dev);
603
604 if (ring->tx) {
605 if (ring->dma64) {
606 u64 ringbase = (u64)(ring->dmabase);
607
608 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
609 >> SSB_DMA_TRANSLATION_SHIFT;
610 value = BCM43xx_DMA64_TXENABLE;
611 value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
612 & BCM43xx_DMA64_TXADDREXT_MASK;
613 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
614 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
615 (ringbase & 0xFFFFFFFF));
616 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
617 ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK)
618 | trans);
619 } else {
620 u32 ringbase = (u32)(ring->dmabase);
621
622 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
623 >> SSB_DMA_TRANSLATION_SHIFT;
624 value = BCM43xx_DMA32_TXENABLE;
625 value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
626 & BCM43xx_DMA32_TXADDREXT_MASK;
627 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
628 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
629 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
630 | trans);
631 }
632 } else {
633 err = alloc_initial_descbuffers(ring);
634 if (err)
635 goto out;
636 if (ring->dma64) {
637 u64 ringbase = (u64)(ring->dmabase);
638
639 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
640 >> SSB_DMA_TRANSLATION_SHIFT;
641 value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
642 value |= BCM43xx_DMA64_RXENABLE;
643 value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
644 & BCM43xx_DMA64_RXADDREXT_MASK;
645 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
646 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
647 (ringbase & 0xFFFFFFFF));
648 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
649 ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK)
650 | trans);
651 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
652 } else {
653 u32 ringbase = (u32)(ring->dmabase);
654
655 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
656 >> SSB_DMA_TRANSLATION_SHIFT;
657 value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
658 value |= BCM43xx_DMA32_RXENABLE;
659 value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
660 & BCM43xx_DMA32_RXADDREXT_MASK;
661 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
662 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
663 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
664 | trans);
665 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
666 }
667 }
668
669 out:
670 return err;
671 }
672
673 /* Shutdown the DMA controller. */
674 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
675 {
676 if (ring->tx) {
677 bcm43xx_dmacontroller_tx_reset(ring->dev, ring->mmio_base, ring->dma64);
678 if (ring->dma64) {
679 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
680 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
681 } else
682 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
683 } else {
684 bcm43xx_dmacontroller_rx_reset(ring->dev, ring->mmio_base, ring->dma64);
685 if (ring->dma64) {
686 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
687 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
688 } else
689 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
690 }
691 }
692
693 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
694 {
695 struct bcm43xx_dmadesc_generic *desc;
696 struct bcm43xx_dmadesc_meta *meta;
697 int i;
698
699 if (!ring->used_slots)
700 return;
701 for (i = 0; i < ring->nr_slots; i++) {
702 desc = ring->ops->idx2desc(ring, i, &meta);
703
704 if (!meta->skb) {
705 assert(ring->tx);
706 continue;
707 }
708 if (ring->tx) {
709 unmap_descbuffer(ring, meta->dmaaddr,
710 meta->skb->len, 1);
711 } else {
712 unmap_descbuffer(ring, meta->dmaaddr,
713 ring->rx_buffersize, 0);
714 }
715 free_descriptor_buffer(ring, meta, 0);
716 }
717 }
718
719 static u64 supported_dma_mask(struct bcm43xx_wldev *dev)
720 {
721 u32 tmp;
722 u16 mmio_base;
723
724 tmp = bcm43xx_read32(dev, SSB_TMSHIGH);
725 if (tmp & SSB_TMSHIGH_DMA64)
726 return DMA_64BIT_MASK;
727 mmio_base = bcm43xx_dmacontroller_base(0, 0);
728 bcm43xx_write32(dev,
729 mmio_base + BCM43xx_DMA32_TXCTL,
730 BCM43xx_DMA32_TXADDREXT_MASK);
731 tmp = bcm43xx_read32(dev,
732 mmio_base + BCM43xx_DMA32_TXCTL);
733 if (tmp & BCM43xx_DMA32_TXADDREXT_MASK)
734 return DMA_32BIT_MASK;
735
736 return DMA_30BIT_MASK;
737 }
738
739 /* Main initialization function. */
740 static
741 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_wldev *dev,
742 int controller_index,
743 int for_tx,
744 int dma64)
745 {
746 struct bcm43xx_dmaring *ring;
747 int err;
748 int nr_slots;
749 dma_addr_t dma_test;
750
751 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
752 if (!ring)
753 goto out;
754
755 nr_slots = BCM43xx_RXRING_SLOTS;
756 if (for_tx)
757 nr_slots = BCM43xx_TXRING_SLOTS;
758
759 ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
760 GFP_KERNEL);
761 if (!ring->meta)
762 goto err_kfree_ring;
763 if (for_tx) {
764 ring->txhdr_cache = kcalloc(nr_slots,
765 sizeof(struct bcm43xx_txhdr_fw4),
766 GFP_KERNEL);
767 if (!ring->txhdr_cache)
768 goto err_kfree_meta;
769
770 /* test for ability to dma to txhdr_cache */
771 dma_test = dma_map_single(dev->dev->dev,
772 ring->txhdr_cache, sizeof(struct bcm43xx_txhdr_fw4),
773 DMA_TO_DEVICE);
774
775 if (dma_mapping_error(dma_test)) {
776 /* ugh realloc */
777 kfree(ring->txhdr_cache);
778 ring->txhdr_cache = kcalloc(nr_slots,
779 sizeof(struct bcm43xx_txhdr_fw4),
780 GFP_KERNEL | GFP_DMA);
781 if (!ring->txhdr_cache)
782 goto err_kfree_meta;
783
784 dma_test = dma_map_single(dev->dev->dev,
785 ring->txhdr_cache, sizeof(struct bcm43xx_txhdr_fw4),
786 DMA_TO_DEVICE);
787
788 if (dma_mapping_error(dma_test))
789 goto err_kfree_txhdr_cache;
790 }
791
792 dma_unmap_single(dev->dev->dev,
793 dma_test, sizeof(struct bcm43xx_txhdr_fw4),
794 DMA_TO_DEVICE);
795 }
796
797 ring->dev = dev;
798 ring->nr_slots = nr_slots;
799 ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
800 ring->index = controller_index;
801 ring->dma64 = !!dma64;
802 if (dma64)
803 ring->ops = &dma64_ops;
804 else
805 ring->ops = &dma32_ops;
806 if (for_tx) {
807 ring->tx = 1;
808 ring->current_slot = -1;
809 } else {
810 if (ring->index == 0) {
811 ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
812 ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
813 } else if (ring->index == 3) {
814 ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
815 ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
816 } else
817 assert(0);
818 }
819
820 err = alloc_ringmemory(ring);
821 if (err)
822 goto err_kfree_txhdr_cache;
823 err = dmacontroller_setup(ring);
824 if (err)
825 goto err_free_ringmemory;
826
827 out:
828 return ring;
829
830 err_free_ringmemory:
831 free_ringmemory(ring);
832 err_kfree_txhdr_cache:
833 kfree(ring->txhdr_cache);
834 err_kfree_meta:
835 kfree(ring->meta);
836 err_kfree_ring:
837 kfree(ring);
838 ring = NULL;
839 goto out;
840 }
841
842 /* Main cleanup function. */
843 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
844 {
845 if (!ring)
846 return;
847
848 dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
849 (ring->dma64) ? "64" : "32",
850 ring->mmio_base,
851 (ring->tx) ? "TX" : "RX",
852 ring->max_used_slots, ring->nr_slots);
853 /* Device IRQs are disabled prior entering this function,
854 * so no need to take care of concurrency with rx handler stuff.
855 */
856 dmacontroller_cleanup(ring);
857 free_all_descbuffers(ring);
858 free_ringmemory(ring);
859
860 kfree(ring->txhdr_cache);
861 kfree(ring->meta);
862 kfree(ring);
863 }
864
865 void bcm43xx_dma_free(struct bcm43xx_wldev *dev)
866 {
867 struct bcm43xx_dma *dma;
868
869 if (bcm43xx_using_pio(dev))
870 return;
871 dma = &dev->dma;
872
873 bcm43xx_destroy_dmaring(dma->rx_ring3);
874 dma->rx_ring3 = NULL;
875 bcm43xx_destroy_dmaring(dma->rx_ring0);
876 dma->rx_ring0 = NULL;
877
878 bcm43xx_destroy_dmaring(dma->tx_ring5);
879 dma->tx_ring5 = NULL;
880 bcm43xx_destroy_dmaring(dma->tx_ring4);
881 dma->tx_ring4 = NULL;
882 bcm43xx_destroy_dmaring(dma->tx_ring3);
883 dma->tx_ring3 = NULL;
884 bcm43xx_destroy_dmaring(dma->tx_ring2);
885 dma->tx_ring2 = NULL;
886 bcm43xx_destroy_dmaring(dma->tx_ring1);
887 dma->tx_ring1 = NULL;
888 bcm43xx_destroy_dmaring(dma->tx_ring0);
889 dma->tx_ring0 = NULL;
890 }
891
892 int bcm43xx_dma_init(struct bcm43xx_wldev *dev)
893 {
894 struct bcm43xx_dma *dma = &dev->dma;
895 struct bcm43xx_dmaring *ring;
896 int err;
897 u64 dmamask;
898 int dma64 = 0;
899
900 dmamask = supported_dma_mask(dev);
901 if (dmamask == DMA_64BIT_MASK)
902 dma64 = 1;
903
904 err = ssb_dma_set_mask(dev->dev, dmamask);
905 if (err) {
906 #ifdef BCM43XX_MAC80211_PIO
907 printk(KERN_WARNING PFX "DMA for this device not supported. "
908 "Falling back to PIO\n");
909 dev->__using_pio = 1;
910 return -EAGAIN;
911 #else
912 printk(KERN_ERR PFX "DMA for this device not supported and "
913 "no PIO support compiled in\n");
914 return -EOPNOTSUPP;
915 #endif
916 }
917
918 err = -ENOMEM;
919 /* setup TX DMA channels. */
920 ring = bcm43xx_setup_dmaring(dev, 0, 1, dma64);
921 if (!ring)
922 goto out;
923 dma->tx_ring0 = ring;
924
925 ring = bcm43xx_setup_dmaring(dev, 1, 1, dma64);
926 if (!ring)
927 goto err_destroy_tx0;
928 dma->tx_ring1 = ring;
929
930 ring = bcm43xx_setup_dmaring(dev, 2, 1, dma64);
931 if (!ring)
932 goto err_destroy_tx1;
933 dma->tx_ring2 = ring;
934
935 ring = bcm43xx_setup_dmaring(dev, 3, 1, dma64);
936 if (!ring)
937 goto err_destroy_tx2;
938 dma->tx_ring3 = ring;
939
940 ring = bcm43xx_setup_dmaring(dev, 4, 1, dma64);
941 if (!ring)
942 goto err_destroy_tx3;
943 dma->tx_ring4 = ring;
944
945 ring = bcm43xx_setup_dmaring(dev, 5, 1, dma64);
946 if (!ring)
947 goto err_destroy_tx4;
948 dma->tx_ring5 = ring;
949
950 /* setup RX DMA channels. */
951 ring = bcm43xx_setup_dmaring(dev, 0, 0, dma64);
952 if (!ring)
953 goto err_destroy_tx5;
954 dma->rx_ring0 = ring;
955
956 if (dev->dev->id.revision < 5) {
957 ring = bcm43xx_setup_dmaring(dev, 3, 0, dma64);
958 if (!ring)
959 goto err_destroy_rx0;
960 dma->rx_ring3 = ring;
961 }
962
963 dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
964 (dmamask == DMA_64BIT_MASK) ? 64 :
965 (dmamask == DMA_32BIT_MASK) ? 32 : 30);
966 err = 0;
967 out:
968 return err;
969
970 err_destroy_rx0:
971 bcm43xx_destroy_dmaring(dma->rx_ring0);
972 dma->rx_ring0 = NULL;
973 err_destroy_tx5:
974 bcm43xx_destroy_dmaring(dma->tx_ring5);
975 dma->tx_ring5 = NULL;
976 err_destroy_tx4:
977 bcm43xx_destroy_dmaring(dma->tx_ring4);
978 dma->tx_ring4 = NULL;
979 err_destroy_tx3:
980 bcm43xx_destroy_dmaring(dma->tx_ring3);
981 dma->tx_ring3 = NULL;
982 err_destroy_tx2:
983 bcm43xx_destroy_dmaring(dma->tx_ring2);
984 dma->tx_ring2 = NULL;
985 err_destroy_tx1:
986 bcm43xx_destroy_dmaring(dma->tx_ring1);
987 dma->tx_ring1 = NULL;
988 err_destroy_tx0:
989 bcm43xx_destroy_dmaring(dma->tx_ring0);
990 dma->tx_ring0 = NULL;
991 goto out;
992 }
993
994 /* Generate a cookie for the TX header. */
995 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
996 int slot)
997 {
998 u16 cookie = 0x1000;
999
1000 /* Use the upper 4 bits of the cookie as
1001 * DMA controller ID and store the slot number
1002 * in the lower 12 bits.
1003 * Note that the cookie must never be 0, as this
1004 * is a special value used in RX path.
1005 */
1006 switch (ring->index) {
1007 case 0:
1008 cookie = 0xA000;
1009 break;
1010 case 1:
1011 cookie = 0xB000;
1012 break;
1013 case 2:
1014 cookie = 0xC000;
1015 break;
1016 case 3:
1017 cookie = 0xD000;
1018 break;
1019 case 4:
1020 cookie = 0xE000;
1021 break;
1022 case 5:
1023 cookie = 0xF000;
1024 break;
1025 }
1026 assert(((u16)slot & 0xF000) == 0x0000);
1027 cookie |= (u16)slot;
1028
1029 return cookie;
1030 }
1031
1032 /* Inspect a cookie and find out to which controller/slot it belongs. */
1033 static
1034 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_wldev *dev,
1035 u16 cookie, int *slot)
1036 {
1037 struct bcm43xx_dma *dma = &dev->dma;
1038 struct bcm43xx_dmaring *ring = NULL;
1039
1040 switch (cookie & 0xF000) {
1041 case 0xA000:
1042 ring = dma->tx_ring0;
1043 break;
1044 case 0xB000:
1045 ring = dma->tx_ring1;
1046 break;
1047 case 0xC000:
1048 ring = dma->tx_ring2;
1049 break;
1050 case 0xD000:
1051 ring = dma->tx_ring3;
1052 break;
1053 case 0xE000:
1054 ring = dma->tx_ring4;
1055 break;
1056 case 0xF000:
1057 ring = dma->tx_ring5;
1058 break;
1059 default:
1060 assert(0);
1061 }
1062 *slot = (cookie & 0x0FFF);
1063 assert(ring && *slot >= 0 && *slot < ring->nr_slots);
1064
1065 return ring;
1066 }
1067
1068 static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
1069 struct sk_buff *skb,
1070 struct ieee80211_tx_control *ctl)
1071 {
1072 const struct bcm43xx_dma_ops *ops = ring->ops;
1073 u8 *header;
1074 int slot;
1075 int err;
1076 struct bcm43xx_dmadesc_generic *desc;
1077 struct bcm43xx_dmadesc_meta *meta;
1078 struct bcm43xx_dmadesc_meta *meta_hdr;
1079 struct sk_buff *bounce_skb;
1080
1081 #define SLOTS_PER_PACKET 2
1082 assert(skb_shinfo(skb)->nr_frags == 0);
1083
1084 /* Get a slot for the header. */
1085 slot = request_slot(ring);
1086 desc = ops->idx2desc(ring, slot, &meta_hdr);
1087 memset(meta_hdr, 0, sizeof(*meta_hdr));
1088
1089 header = &(ring->txhdr_cache[slot * sizeof(struct bcm43xx_txhdr_fw4)]);
1090 bcm43xx_generate_txhdr(ring->dev, header,
1091 skb->data, skb->len, ctl,
1092 generate_cookie(ring, slot));
1093
1094 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1095 sizeof(struct bcm43xx_txhdr_fw4), 1);
1096 if (dma_mapping_error(meta_hdr->dmaaddr))
1097 return -EIO;
1098 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1099 sizeof(struct bcm43xx_txhdr_fw4), 1, 0, 0);
1100
1101 /* Get a slot for the payload. */
1102 slot = request_slot(ring);
1103 desc = ops->idx2desc(ring, slot, &meta);
1104 memset(meta, 0, sizeof(*meta));
1105
1106 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1107 meta->skb = skb;
1108 meta->is_last_fragment = 1;
1109
1110 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1111 /* create a bounce buffer in zone_dma on mapping failure. */
1112 if (dma_mapping_error(meta->dmaaddr)) {
1113 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1114 if (!bounce_skb) {
1115 err = -ENOMEM;
1116 goto out_unmap_hdr;
1117 }
1118
1119 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1120 dev_kfree_skb_any(skb);
1121 skb = bounce_skb;
1122 meta->skb = skb;
1123 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1124 if (dma_mapping_error(meta->dmaaddr)) {
1125 err = -EIO;
1126 goto out_free_bounce;
1127 }
1128 }
1129
1130 ops->fill_descriptor(ring, desc, meta->dmaaddr,
1131 skb->len, 0, 1, 1);
1132
1133 /* Now transfer the whole frame. */
1134 wmb();
1135 ops->poke_tx(ring, next_slot(ring, slot));
1136 return 0;
1137
1138 out_free_bounce:
1139 dev_kfree_skb_any(skb);
1140 out_unmap_hdr:
1141 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1142 sizeof(struct bcm43xx_txhdr_fw4), 1);
1143 return err;
1144 }
1145
1146 int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,
1147 struct sk_buff *skb,
1148 struct ieee80211_tx_control *ctl)
1149 {
1150 struct bcm43xx_dmaring *ring = dev->dma.tx_ring1;
1151 int err = 0;
1152
1153 assert(ring->tx);
1154 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1155 /* This should never trigger, as we call
1156 * ieee80211_stop_queue() when it's full.
1157 */
1158 printkl(KERN_ERR PFX "DMA queue overflow\n");
1159 return NETDEV_TX_BUSY;
1160 }
1161
1162 err = dma_tx_fragment(ring, skb, ctl);
1163 if (unlikely(err)) {
1164 printkl(KERN_ERR PFX "DMA tx mapping failure\n");
1165 return NETDEV_TX_BUSY;
1166 }
1167
1168 ring->nr_tx_packets++;
1169 if (free_slots(ring) < SLOTS_PER_PACKET) {
1170 /* FIXME: we currently only have one queue */
1171 ieee80211_stop_queue(dev->wl->hw, 0);
1172 ring->stopped = 1;
1173 }
1174
1175 return 0;
1176 }
1177
1178 void bcm43xx_dma_handle_txstatus(struct bcm43xx_wldev *dev,
1179 const struct bcm43xx_txstatus *status)
1180 {
1181 const struct bcm43xx_dma_ops *ops;
1182 struct bcm43xx_dmaring *ring;
1183 struct bcm43xx_dmadesc_generic *desc;
1184 struct bcm43xx_dmadesc_meta *meta;
1185 int slot;
1186
1187 ring = parse_cookie(dev, status->cookie, &slot);
1188 if (unlikely(!ring))
1189 return;
1190 assert(ring->tx);
1191 ops = ring->ops;
1192 while (1) {
1193 assert(slot >= 0 && slot < ring->nr_slots);
1194 desc = ops->idx2desc(ring, slot, &meta);
1195
1196 if (meta->skb)
1197 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1198 else
1199 unmap_descbuffer(ring, meta->dmaaddr, sizeof(struct bcm43xx_txhdr_fw4), 1);
1200
1201 if (meta->is_last_fragment) {
1202 assert(meta->skb);
1203 /* Call back to inform the ieee80211 subsystem about the
1204 * status of the transmission.
1205 * Some fields of txstat are already filled in dma_tx().
1206 */
1207 if (status->acked)
1208 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;
1209 meta->txstat.retry_count = status->frame_count - 1;
1210 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, &(meta->txstat));
1211 /* skb is freed by ieee80211_tx_status_irqsafe() */
1212 meta->skb = NULL;
1213 } else {
1214 /* No need to call free_descriptor_buffer here, as
1215 * this is only the txhdr, which is not allocated.
1216 */
1217 assert(meta->skb == NULL);
1218 }
1219 /* Everything belonging to the slot is unmapped
1220 * and freed, so we can return it.
1221 */
1222 return_slot(ring, slot);
1223
1224 if (meta->is_last_fragment)
1225 break;
1226 slot = next_slot(ring, slot);
1227 }
1228 dev->stats.last_tx = jiffies;
1229 if (ring->stopped) {
1230 assert(free_slots(ring) >= SLOTS_PER_PACKET);
1231 /* FIXME: we currently only have one queue */
1232 ieee80211_wake_queue(dev->wl->hw, 0);
1233 ring->stopped = 0;
1234 }
1235 }
1236
1237 void bcm43xx_dma_get_tx_stats(struct bcm43xx_wldev *dev,
1238 struct ieee80211_tx_queue_stats *stats)
1239 {
1240 struct bcm43xx_dma *dma = &dev->dma;
1241 struct bcm43xx_dmaring *ring;
1242 struct ieee80211_tx_queue_stats_data *data;
1243
1244 ring = dma->tx_ring1;
1245 data = &(stats->data[0]);
1246 data->len = ring->used_slots / SLOTS_PER_PACKET;
1247 data->limit = ring->nr_slots / SLOTS_PER_PACKET;
1248 data->count = ring->nr_tx_packets;
1249 }
1250
1251 static void dma_rx(struct bcm43xx_dmaring *ring,
1252 int *slot)
1253 {
1254 const struct bcm43xx_dma_ops *ops = ring->ops;
1255 struct bcm43xx_dmadesc_generic *desc;
1256 struct bcm43xx_dmadesc_meta *meta;
1257 struct bcm43xx_rxhdr_fw4 *rxhdr;
1258 struct sk_buff *skb;
1259 u16 len;
1260 int err;
1261 dma_addr_t dmaaddr;
1262
1263 desc = ops->idx2desc(ring, *slot, &meta);
1264
1265 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1266 skb = meta->skb;
1267
1268 if (ring->index == 3) {
1269 /* We received an xmit status. */
1270 struct bcm43xx_hwtxstatus *hw = (struct bcm43xx_hwtxstatus *)skb->data;
1271 int i = 0;
1272
1273 while (hw->cookie == 0) {
1274 if (i > 100)
1275 break;
1276 i++;
1277 udelay(2);
1278 barrier();
1279 }
1280 bcm43xx_handle_hwtxstatus(ring->dev, hw);
1281 /* recycle the descriptor buffer. */
1282 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
1283
1284 return;
1285 }
1286 rxhdr = (struct bcm43xx_rxhdr_fw4 *)skb->data;
1287 len = le16_to_cpu(rxhdr->frame_len);
1288 if (len == 0) {
1289 int i = 0;
1290
1291 do {
1292 udelay(2);
1293 barrier();
1294 len = le16_to_cpu(rxhdr->frame_len);
1295 } while (len == 0 && i++ < 5);
1296 if (unlikely(len == 0)) {
1297 /* recycle the descriptor buffer. */
1298 sync_descbuffer_for_device(ring, meta->dmaaddr,
1299 ring->rx_buffersize);
1300 goto drop;
1301 }
1302 }
1303 if (unlikely(len > ring->rx_buffersize)) {
1304 /* The data did not fit into one descriptor buffer
1305 * and is split over multiple buffers.
1306 * This should never happen, as we try to allocate buffers
1307 * big enough. So simply ignore this packet.
1308 */
1309 int cnt = 0;
1310 s32 tmp = len;
1311
1312 while (1) {
1313 desc = ops->idx2desc(ring, *slot, &meta);
1314 /* recycle the descriptor buffer. */
1315 sync_descbuffer_for_device(ring, meta->dmaaddr,
1316 ring->rx_buffersize);
1317 *slot = next_slot(ring, *slot);
1318 cnt++;
1319 tmp -= ring->rx_buffersize;
1320 if (tmp <= 0)
1321 break;
1322 }
1323 printkl(KERN_ERR PFX "DMA RX buffer too small "
1324 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1325 len, ring->rx_buffersize, cnt);
1326 goto drop;
1327 }
1328
1329 dmaaddr = meta->dmaaddr;
1330 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1331 if (unlikely(err)) {
1332 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
1333 sync_descbuffer_for_device(ring, dmaaddr,
1334 ring->rx_buffersize);
1335 goto drop;
1336 }
1337
1338 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1339 skb_put(skb, len + ring->frameoffset);
1340 skb_pull(skb, ring->frameoffset);
1341
1342 bcm43xx_rx(ring->dev, skb, rxhdr);
1343 drop:
1344 return;
1345 }
1346
1347 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
1348 {
1349 const struct bcm43xx_dma_ops *ops = ring->ops;
1350 int slot, current_slot;
1351 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1352 int used_slots = 0;
1353 #endif
1354
1355 assert(!ring->tx);
1356 current_slot = ops->get_current_rxslot(ring);
1357 assert(current_slot >= 0 && current_slot < ring->nr_slots);
1358
1359 slot = ring->current_slot;
1360 for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
1361 dma_rx(ring, &slot);
1362 #ifdef CONFIG_BCM43XX_MAC80211_DEBUG
1363 if (++used_slots > ring->max_used_slots)
1364 ring->max_used_slots = used_slots;
1365 #endif
1366 }
1367 ops->set_current_rxslot(ring, slot);
1368 ring->current_slot = slot;
1369 }
1370
1371 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
1372 {
1373 assert(ring->tx);
1374 bcm43xx_power_saving_ctl_bits(ring->dev, -1, 1);
1375 ring->ops->tx_suspend(ring);
1376 }
1377
1378 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
1379 {
1380 assert(ring->tx);
1381 ring->ops->tx_resume(ring);
1382 bcm43xx_power_saving_ctl_bits(ring->dev, -1, -1);
1383 }
This page took 0.092832 seconds and 3 git commands to generate.