generic: rtl8366: add enable_vlan{,4k} to smi_ops
[openwrt.git] / target / linux / generic / patches-2.6.35 / 975-ssb_update.patch
1 --- a/drivers/net/b44.c
2 +++ b/drivers/net/b44.c
3 @@ -135,7 +135,6 @@ static void b44_init_rings(struct b44 *)
4
5 static void b44_init_hw(struct b44 *, int);
6
7 -static int dma_desc_align_mask;
8 static int dma_desc_sync_size;
9 static int instance;
10
11 @@ -150,9 +149,8 @@ static inline void b44_sync_dma_desc_for
12 unsigned long offset,
13 enum dma_data_direction dir)
14 {
15 - ssb_dma_sync_single_range_for_device(sdev, dma_base,
16 - offset & dma_desc_align_mask,
17 - dma_desc_sync_size, dir);
18 + dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
19 + dma_desc_sync_size, dir);
20 }
21
22 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
23 @@ -160,9 +158,8 @@ static inline void b44_sync_dma_desc_for
24 unsigned long offset,
25 enum dma_data_direction dir)
26 {
27 - ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
28 - offset & dma_desc_align_mask,
29 - dma_desc_sync_size, dir);
30 + dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
31 + dma_desc_sync_size, dir);
32 }
33
34 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
35 @@ -608,10 +605,10 @@ static void b44_tx(struct b44 *bp)
36
37 BUG_ON(skb == NULL);
38
39 - ssb_dma_unmap_single(bp->sdev,
40 - rp->mapping,
41 - skb->len,
42 - DMA_TO_DEVICE);
43 + dma_unmap_single(bp->sdev->dma_dev,
44 + rp->mapping,
45 + skb->len,
46 + DMA_TO_DEVICE);
47 rp->skb = NULL;
48 dev_kfree_skb_irq(skb);
49 }
50 @@ -648,29 +645,29 @@ static int b44_alloc_rx_skb(struct b44 *
51 if (skb == NULL)
52 return -ENOMEM;
53
54 - mapping = ssb_dma_map_single(bp->sdev, skb->data,
55 - RX_PKT_BUF_SZ,
56 - DMA_FROM_DEVICE);
57 + mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
58 + RX_PKT_BUF_SZ,
59 + DMA_FROM_DEVICE);
60
61 /* Hardware bug work-around, the chip is unable to do PCI DMA
62 to/from anything above 1GB :-( */
63 - if (ssb_dma_mapping_error(bp->sdev, mapping) ||
64 + if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
65 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
66 /* Sigh... */
67 - if (!ssb_dma_mapping_error(bp->sdev, mapping))
68 - ssb_dma_unmap_single(bp->sdev, mapping,
69 + if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
70 + dma_unmap_single(bp->sdev->dma_dev, mapping,
71 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
72 dev_kfree_skb_any(skb);
73 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
74 if (skb == NULL)
75 return -ENOMEM;
76 - mapping = ssb_dma_map_single(bp->sdev, skb->data,
77 - RX_PKT_BUF_SZ,
78 - DMA_FROM_DEVICE);
79 - if (ssb_dma_mapping_error(bp->sdev, mapping) ||
80 - mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
81 - if (!ssb_dma_mapping_error(bp->sdev, mapping))
82 - ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
83 + mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
84 + RX_PKT_BUF_SZ,
85 + DMA_FROM_DEVICE);
86 + if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
87 + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
88 + if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
89 + dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
90 dev_kfree_skb_any(skb);
91 return -ENOMEM;
92 }
93 @@ -745,9 +742,9 @@ static void b44_recycle_rx(struct b44 *b
94 dest_idx * sizeof(*dest_desc),
95 DMA_BIDIRECTIONAL);
96
97 - ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
98 - RX_PKT_BUF_SZ,
99 - DMA_FROM_DEVICE);
100 + dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
101 + RX_PKT_BUF_SZ,
102 + DMA_FROM_DEVICE);
103 }
104
105 static int b44_rx(struct b44 *bp, int budget)
106 @@ -767,9 +764,9 @@ static int b44_rx(struct b44 *bp, int bu
107 struct rx_header *rh;
108 u16 len;
109
110 - ssb_dma_sync_single_for_cpu(bp->sdev, map,
111 - RX_PKT_BUF_SZ,
112 - DMA_FROM_DEVICE);
113 + dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
114 + RX_PKT_BUF_SZ,
115 + DMA_FROM_DEVICE);
116 rh = (struct rx_header *) skb->data;
117 len = le16_to_cpu(rh->len);
118 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
119 @@ -801,8 +798,8 @@ static int b44_rx(struct b44 *bp, int bu
120 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
121 if (skb_size < 0)
122 goto drop_it;
123 - ssb_dma_unmap_single(bp->sdev, map,
124 - skb_size, DMA_FROM_DEVICE);
125 + dma_unmap_single(bp->sdev->dma_dev, map,
126 + skb_size, DMA_FROM_DEVICE);
127 /* Leave out rx_header */
128 skb_put(skb, len + RX_PKT_OFFSET);
129 skb_pull(skb, RX_PKT_OFFSET);
130 @@ -954,24 +951,24 @@ static netdev_tx_t b44_start_xmit(struct
131 goto err_out;
132 }
133
134 - mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
135 - if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
136 + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
137 + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
138 struct sk_buff *bounce_skb;
139
140 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
141 - if (!ssb_dma_mapping_error(bp->sdev, mapping))
142 - ssb_dma_unmap_single(bp->sdev, mapping, len,
143 + if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
144 + dma_unmap_single(bp->sdev->dma_dev, mapping, len,
145 DMA_TO_DEVICE);
146
147 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
148 if (!bounce_skb)
149 goto err_out;
150
151 - mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
152 - len, DMA_TO_DEVICE);
153 - if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
154 - if (!ssb_dma_mapping_error(bp->sdev, mapping))
155 - ssb_dma_unmap_single(bp->sdev, mapping,
156 + mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
157 + len, DMA_TO_DEVICE);
158 + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
159 + if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
160 + dma_unmap_single(bp->sdev->dma_dev, mapping,
161 len, DMA_TO_DEVICE);
162 dev_kfree_skb_any(bounce_skb);
163 goto err_out;
164 @@ -1068,8 +1065,8 @@ static void b44_free_rings(struct b44 *b
165
166 if (rp->skb == NULL)
167 continue;
168 - ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
169 - DMA_FROM_DEVICE);
170 + dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
171 + DMA_FROM_DEVICE);
172 dev_kfree_skb_any(rp->skb);
173 rp->skb = NULL;
174 }
175 @@ -1080,8 +1077,8 @@ static void b44_free_rings(struct b44 *b
176
177 if (rp->skb == NULL)
178 continue;
179 - ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
180 - DMA_TO_DEVICE);
181 + dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
182 + DMA_TO_DEVICE);
183 dev_kfree_skb_any(rp->skb);
184 rp->skb = NULL;
185 }
186 @@ -1103,14 +1100,12 @@ static void b44_init_rings(struct b44 *b
187 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
188
189 if (bp->flags & B44_FLAG_RX_RING_HACK)
190 - ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
191 - DMA_TABLE_BYTES,
192 - DMA_BIDIRECTIONAL);
193 + dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
194 + DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
195
196 if (bp->flags & B44_FLAG_TX_RING_HACK)
197 - ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
198 - DMA_TABLE_BYTES,
199 - DMA_TO_DEVICE);
200 + dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
201 + DMA_TABLE_BYTES, DMA_TO_DEVICE);
202
203 for (i = 0; i < bp->rx_pending; i++) {
204 if (b44_alloc_rx_skb(bp, -1, i) < 0)
205 @@ -1130,27 +1125,23 @@ static void b44_free_consistent(struct b
206 bp->tx_buffers = NULL;
207 if (bp->rx_ring) {
208 if (bp->flags & B44_FLAG_RX_RING_HACK) {
209 - ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
210 - DMA_TABLE_BYTES,
211 - DMA_BIDIRECTIONAL);
212 + dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
213 + DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
214 kfree(bp->rx_ring);
215 } else
216 - ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
217 - bp->rx_ring, bp->rx_ring_dma,
218 - GFP_KERNEL);
219 + dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
220 + bp->rx_ring, bp->rx_ring_dma);
221 bp->rx_ring = NULL;
222 bp->flags &= ~B44_FLAG_RX_RING_HACK;
223 }
224 if (bp->tx_ring) {
225 if (bp->flags & B44_FLAG_TX_RING_HACK) {
226 - ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
227 - DMA_TABLE_BYTES,
228 - DMA_TO_DEVICE);
229 + dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
230 + DMA_TABLE_BYTES, DMA_TO_DEVICE);
231 kfree(bp->tx_ring);
232 } else
233 - ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
234 - bp->tx_ring, bp->tx_ring_dma,
235 - GFP_KERNEL);
236 + dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
237 + bp->tx_ring, bp->tx_ring_dma);
238 bp->tx_ring = NULL;
239 bp->flags &= ~B44_FLAG_TX_RING_HACK;
240 }
241 @@ -1175,7 +1166,8 @@ static int b44_alloc_consistent(struct b
242 goto out_err;
243
244 size = DMA_TABLE_BYTES;
245 - bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
246 + bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
247 + &bp->rx_ring_dma, gfp);
248 if (!bp->rx_ring) {
249 /* Allocation may have failed due to pci_alloc_consistent
250 insisting on use of GFP_DMA, which is more restrictive
251 @@ -1187,11 +1179,11 @@ static int b44_alloc_consistent(struct b
252 if (!rx_ring)
253 goto out_err;
254
255 - rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
256 - DMA_TABLE_BYTES,
257 - DMA_BIDIRECTIONAL);
258 + rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
259 + DMA_TABLE_BYTES,
260 + DMA_BIDIRECTIONAL);
261
262 - if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
263 + if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
264 rx_ring_dma + size > DMA_BIT_MASK(30)) {
265 kfree(rx_ring);
266 goto out_err;
267 @@ -1202,7 +1194,8 @@ static int b44_alloc_consistent(struct b
268 bp->flags |= B44_FLAG_RX_RING_HACK;
269 }
270
271 - bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
272 + bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
273 + &bp->tx_ring_dma, gfp);
274 if (!bp->tx_ring) {
275 /* Allocation may have failed due to ssb_dma_alloc_consistent
276 insisting on use of GFP_DMA, which is more restrictive
277 @@ -1214,11 +1207,11 @@ static int b44_alloc_consistent(struct b
278 if (!tx_ring)
279 goto out_err;
280
281 - tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
282 - DMA_TABLE_BYTES,
283 - DMA_TO_DEVICE);
284 + tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
285 + DMA_TABLE_BYTES,
286 + DMA_TO_DEVICE);
287
288 - if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
289 + if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
290 tx_ring_dma + size > DMA_BIT_MASK(30)) {
291 kfree(tx_ring);
292 goto out_err;
293 @@ -2176,12 +2169,14 @@ static int __devinit b44_init_one(struct
294 "Failed to powerup the bus\n");
295 goto err_out_free_dev;
296 }
297 - err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
298 - if (err) {
299 +
300 + if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
301 + dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
302 dev_err(sdev->dev,
303 "Required 30BIT DMA mask unsupported by the system\n");
304 goto err_out_powerdown;
305 }
306 +
307 err = b44_get_invariants(bp);
308 if (err) {
309 dev_err(sdev->dev,
310 @@ -2344,7 +2339,6 @@ static int __init b44_init(void)
311 int err;
312
313 /* Setup paramaters for syncing RX/TX DMA descriptors */
314 - dma_desc_align_mask = ~(dma_desc_align_size - 1);
315 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
316
317 err = b44_pci_init();
318 --- a/drivers/ssb/driver_chipcommon.c
319 +++ b/drivers/ssb/driver_chipcommon.c
320 @@ -209,6 +209,24 @@ static void chipco_powercontrol_init(str
321 }
322 }
323
324 +/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
325 +static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
326 +{
327 + struct ssb_bus *bus = cc->dev->bus;
328 +
329 + switch (bus->chip_id) {
330 + case 0x4312:
331 + case 0x4322:
332 + case 0x4328:
333 + return 7000;
334 + case 0x4325:
335 + /* TODO: */
336 + default:
337 + return 15000;
338 + }
339 +}
340 +
341 +/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
342 static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
343 {
344 struct ssb_bus *bus = cc->dev->bus;
345 @@ -218,6 +236,12 @@ static void calc_fast_powerup_delay(stru
346
347 if (bus->bustype != SSB_BUSTYPE_PCI)
348 return;
349 +
350 + if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
351 + cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc);
352 + return;
353 + }
354 +
355 if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
356 return;
357
358 @@ -235,6 +259,7 @@ void ssb_chipcommon_init(struct ssb_chip
359 return; /* We don't have a ChipCommon */
360 if (cc->dev->id.revision >= 11)
361 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
362 + ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
363 ssb_pmu_init(cc);
364 chipco_powercontrol_init(cc);
365 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
366 --- a/drivers/ssb/driver_chipcommon_pmu.c
367 +++ b/drivers/ssb/driver_chipcommon_pmu.c
368 @@ -502,9 +502,9 @@ static void ssb_pmu_resources_init(struc
369 chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
370 }
371
372 +/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
373 void ssb_pmu_init(struct ssb_chipcommon *cc)
374 {
375 - struct ssb_bus *bus = cc->dev->bus;
376 u32 pmucap;
377
378 if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
379 @@ -516,15 +516,12 @@ void ssb_pmu_init(struct ssb_chipcommon
380 ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
381 cc->pmu.rev, pmucap);
382
383 - if (cc->pmu.rev >= 1) {
384 - if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
385 - chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
386 - ~SSB_CHIPCO_PMU_CTL_NOILPONW);
387 - } else {
388 - chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
389 - SSB_CHIPCO_PMU_CTL_NOILPONW);
390 - }
391 - }
392 + if (cc->pmu.rev == 1)
393 + chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
394 + ~SSB_CHIPCO_PMU_CTL_NOILPONW);
395 + else
396 + chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
397 + SSB_CHIPCO_PMU_CTL_NOILPONW);
398 ssb_pmu_pll_init(cc);
399 ssb_pmu_resources_init(cc);
400 }
401 --- a/drivers/ssb/main.c
402 +++ b/drivers/ssb/main.c
403 @@ -486,6 +486,7 @@ static int ssb_devices_register(struct s
404 #ifdef CONFIG_SSB_PCIHOST
405 sdev->irq = bus->host_pci->irq;
406 dev->parent = &bus->host_pci->dev;
407 + sdev->dma_dev = dev->parent;
408 #endif
409 break;
410 case SSB_BUSTYPE_PCMCIA:
411 @@ -501,6 +502,7 @@ static int ssb_devices_register(struct s
412 break;
413 case SSB_BUSTYPE_SSB:
414 dev->dma_mask = &dev->coherent_dma_mask;
415 + sdev->dma_dev = dev;
416 break;
417 }
418
419 @@ -1226,80 +1228,6 @@ u32 ssb_dma_translation(struct ssb_devic
420 }
421 EXPORT_SYMBOL(ssb_dma_translation);
422
423 -int ssb_dma_set_mask(struct ssb_device *dev, u64 mask)
424 -{
425 -#ifdef CONFIG_SSB_PCIHOST
426 - int err;
427 -#endif
428 -
429 - switch (dev->bus->bustype) {
430 - case SSB_BUSTYPE_PCI:
431 -#ifdef CONFIG_SSB_PCIHOST
432 - err = pci_set_dma_mask(dev->bus->host_pci, mask);
433 - if (err)
434 - return err;
435 - err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask);
436 - return err;
437 -#endif
438 - case SSB_BUSTYPE_SSB:
439 - return dma_set_mask(dev->dev, mask);
440 - default:
441 - __ssb_dma_not_implemented(dev);
442 - }
443 - return -ENOSYS;
444 -}
445 -EXPORT_SYMBOL(ssb_dma_set_mask);
446 -
447 -void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
448 - dma_addr_t *dma_handle, gfp_t gfp_flags)
449 -{
450 - switch (dev->bus->bustype) {
451 - case SSB_BUSTYPE_PCI:
452 -#ifdef CONFIG_SSB_PCIHOST
453 - if (gfp_flags & GFP_DMA) {
454 - /* Workaround: The PCI API does not support passing
455 - * a GFP flag. */
456 - return dma_alloc_coherent(&dev->bus->host_pci->dev,
457 - size, dma_handle, gfp_flags);
458 - }
459 - return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle);
460 -#endif
461 - case SSB_BUSTYPE_SSB:
462 - return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags);
463 - default:
464 - __ssb_dma_not_implemented(dev);
465 - }
466 - return NULL;
467 -}
468 -EXPORT_SYMBOL(ssb_dma_alloc_consistent);
469 -
470 -void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
471 - void *vaddr, dma_addr_t dma_handle,
472 - gfp_t gfp_flags)
473 -{
474 - switch (dev->bus->bustype) {
475 - case SSB_BUSTYPE_PCI:
476 -#ifdef CONFIG_SSB_PCIHOST
477 - if (gfp_flags & GFP_DMA) {
478 - /* Workaround: The PCI API does not support passing
479 - * a GFP flag. */
480 - dma_free_coherent(&dev->bus->host_pci->dev,
481 - size, vaddr, dma_handle);
482 - return;
483 - }
484 - pci_free_consistent(dev->bus->host_pci, size,
485 - vaddr, dma_handle);
486 - return;
487 -#endif
488 - case SSB_BUSTYPE_SSB:
489 - dma_free_coherent(dev->dev, size, vaddr, dma_handle);
490 - return;
491 - default:
492 - __ssb_dma_not_implemented(dev);
493 - }
494 -}
495 -EXPORT_SYMBOL(ssb_dma_free_consistent);
496 -
497 int ssb_bus_may_powerdown(struct ssb_bus *bus)
498 {
499 struct ssb_chipcommon *cc;
500 --- a/drivers/ssb/pci.c
501 +++ b/drivers/ssb/pci.c
502 @@ -626,11 +626,22 @@ static int ssb_pci_sprom_get(struct ssb_
503 return -ENODEV;
504 }
505 if (bus->chipco.dev) { /* can be unavailible! */
506 - bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
507 - SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
508 + /*
509 + * get SPROM offset: SSB_SPROM_BASE1 except for
510 + * chipcommon rev >= 31 or chip ID is 0x4312 and
511 + * chipcommon status & 3 == 2
512 + */
513 + if (bus->chipco.dev->id.revision >= 31)
514 + bus->sprom_offset = SSB_SPROM_BASE31;
515 + else if (bus->chip_id == 0x4312 &&
516 + (bus->chipco.status & 0x03) == 2)
517 + bus->sprom_offset = SSB_SPROM_BASE31;
518 + else
519 + bus->sprom_offset = SSB_SPROM_BASE1;
520 } else {
521 bus->sprom_offset = SSB_SPROM_BASE1;
522 }
523 + ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
524
525 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
526 if (!buf)
527 --- a/include/linux/ssb/ssb.h
528 +++ b/include/linux/ssb/ssb.h
529 @@ -167,7 +167,7 @@ struct ssb_device {
530 * is an optimization. */
531 const struct ssb_bus_ops *ops;
532
533 - struct device *dev;
534 + struct device *dev, *dma_dev;
535
536 struct ssb_bus *bus;
537 struct ssb_device_id id;
538 @@ -470,14 +470,6 @@ extern u32 ssb_dma_translation(struct ss
539 #define SSB_DMA_TRANSLATION_MASK 0xC0000000
540 #define SSB_DMA_TRANSLATION_SHIFT 30
541
542 -extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask);
543 -
544 -extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
545 - dma_addr_t *dma_handle, gfp_t gfp_flags);
546 -extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
547 - void *vaddr, dma_addr_t dma_handle,
548 - gfp_t gfp_flags);
549 -
550 static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
551 {
552 #ifdef CONFIG_SSB_DEBUG
553 @@ -486,155 +478,6 @@ static inline void __cold __ssb_dma_not_
554 #endif /* DEBUG */
555 }
556
557 -static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
558 -{
559 - switch (dev->bus->bustype) {
560 - case SSB_BUSTYPE_PCI:
561 -#ifdef CONFIG_SSB_PCIHOST
562 - return pci_dma_mapping_error(dev->bus->host_pci, addr);
563 -#endif
564 - break;
565 - case SSB_BUSTYPE_SSB:
566 - return dma_mapping_error(dev->dev, addr);
567 - default:
568 - break;
569 - }
570 - __ssb_dma_not_implemented(dev);
571 - return -ENOSYS;
572 -}
573 -
574 -static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
575 - size_t size, enum dma_data_direction dir)
576 -{
577 - switch (dev->bus->bustype) {
578 - case SSB_BUSTYPE_PCI:
579 -#ifdef CONFIG_SSB_PCIHOST
580 - return pci_map_single(dev->bus->host_pci, p, size, dir);
581 -#endif
582 - break;
583 - case SSB_BUSTYPE_SSB:
584 - return dma_map_single(dev->dev, p, size, dir);
585 - default:
586 - break;
587 - }
588 - __ssb_dma_not_implemented(dev);
589 - return 0;
590 -}
591 -
592 -static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr,
593 - size_t size, enum dma_data_direction dir)
594 -{
595 - switch (dev->bus->bustype) {
596 - case SSB_BUSTYPE_PCI:
597 -#ifdef CONFIG_SSB_PCIHOST
598 - pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
599 - return;
600 -#endif
601 - break;
602 - case SSB_BUSTYPE_SSB:
603 - dma_unmap_single(dev->dev, dma_addr, size, dir);
604 - return;
605 - default:
606 - break;
607 - }
608 - __ssb_dma_not_implemented(dev);
609 -}
610 -
611 -static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
612 - dma_addr_t dma_addr,
613 - size_t size,
614 - enum dma_data_direction dir)
615 -{
616 - switch (dev->bus->bustype) {
617 - case SSB_BUSTYPE_PCI:
618 -#ifdef CONFIG_SSB_PCIHOST
619 - pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
620 - size, dir);
621 - return;
622 -#endif
623 - break;
624 - case SSB_BUSTYPE_SSB:
625 - dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
626 - return;
627 - default:
628 - break;
629 - }
630 - __ssb_dma_not_implemented(dev);
631 -}
632 -
633 -static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
634 - dma_addr_t dma_addr,
635 - size_t size,
636 - enum dma_data_direction dir)
637 -{
638 - switch (dev->bus->bustype) {
639 - case SSB_BUSTYPE_PCI:
640 -#ifdef CONFIG_SSB_PCIHOST
641 - pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
642 - size, dir);
643 - return;
644 -#endif
645 - break;
646 - case SSB_BUSTYPE_SSB:
647 - dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
648 - return;
649 - default:
650 - break;
651 - }
652 - __ssb_dma_not_implemented(dev);
653 -}
654 -
655 -static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
656 - dma_addr_t dma_addr,
657 - unsigned long offset,
658 - size_t size,
659 - enum dma_data_direction dir)
660 -{
661 - switch (dev->bus->bustype) {
662 - case SSB_BUSTYPE_PCI:
663 -#ifdef CONFIG_SSB_PCIHOST
664 - /* Just sync everything. That's all the PCI API can do. */
665 - pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
666 - offset + size, dir);
667 - return;
668 -#endif
669 - break;
670 - case SSB_BUSTYPE_SSB:
671 - dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
672 - size, dir);
673 - return;
674 - default:
675 - break;
676 - }
677 - __ssb_dma_not_implemented(dev);
678 -}
679 -
680 -static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
681 - dma_addr_t dma_addr,
682 - unsigned long offset,
683 - size_t size,
684 - enum dma_data_direction dir)
685 -{
686 - switch (dev->bus->bustype) {
687 - case SSB_BUSTYPE_PCI:
688 -#ifdef CONFIG_SSB_PCIHOST
689 - /* Just sync everything. That's all the PCI API can do. */
690 - pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
691 - offset + size, dir);
692 - return;
693 -#endif
694 - break;
695 - case SSB_BUSTYPE_SSB:
696 - dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
697 - size, dir);
698 - return;
699 - default:
700 - break;
701 - }
702 - __ssb_dma_not_implemented(dev);
703 -}
704 -
705 -
706 #ifdef CONFIG_SSB_PCIHOST
707 /* PCI-host wrapper driver */
708 extern int ssb_pcihost_register(struct pci_driver *driver);
This page took 0.076562 seconds and 5 git commands to generate.