[ar71xx] fix gpio-leds, and gpio-buttons platform data initialization
[openwrt.git] / target / linux / brcm47xx / patches-2.6.23 / 120-b44_ssb_support.patch
1 --- a/drivers/net/b44.c
2 +++ b/drivers/net/b44.c
3 @@ -1,7 +1,9 @@
4 -/* b44.c: Broadcom 4400 device driver.
5 +/* b44.c: Broadcom 4400/47xx device driver.
6 *
7 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
8 - * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
9 + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
10 + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
11 + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
12 * Copyright (C) 2006 Broadcom Corporation.
13 *
14 * Distribute under GPL.
15 @@ -21,11 +23,13 @@
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 +#include <linux/ssb/ssb.h>
20
21 #include <asm/uaccess.h>
22 #include <asm/io.h>
23 #include <asm/irq.h>
24
25 +
26 #include "b44.h"
27
28 #define DRV_MODULE_NAME "b44"
29 @@ -87,8 +91,8 @@
30 static char version[] __devinitdata =
31 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
32
33 -MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
34 -MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
35 +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
36 +MODULE_DESCRIPTION("Broadcom 4400/47xx 10/100 PCI ethernet driver");
37 MODULE_LICENSE("GPL");
38 MODULE_VERSION(DRV_MODULE_VERSION);
39
40 @@ -96,18 +100,11 @@
41 module_param(b44_debug, int, 0);
42 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
43
44 -static struct pci_device_id b44_pci_tbl[] = {
45 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
46 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
47 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
48 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
49 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
50 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
51 - { } /* terminate list with empty entry */
52 +static struct ssb_device_id b44_ssb_tbl[] = {
53 + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
54 + SSB_DEVTABLE_END
55 };
56
57 -MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
58 -
59 static void b44_halt(struct b44 *);
60 static void b44_init_rings(struct b44 *);
61
62 @@ -119,6 +116,7 @@
63
64 static int dma_desc_align_mask;
65 static int dma_desc_sync_size;
66 +static int instance;
67
68 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
69 #define _B44(x...) # x,
70 @@ -126,35 +124,24 @@
71 #undef _B44
72 };
73
74 -static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
75 - dma_addr_t dma_base,
76 - unsigned long offset,
77 - enum dma_data_direction dir)
78 -{
79 - dma_sync_single_range_for_device(&pdev->dev, dma_base,
80 - offset & dma_desc_align_mask,
81 - dma_desc_sync_size, dir);
82 -}
83 -
84 -static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
85 - dma_addr_t dma_base,
86 - unsigned long offset,
87 - enum dma_data_direction dir)
88 -{
89 - dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
90 - offset & dma_desc_align_mask,
91 - dma_desc_sync_size, dir);
92 -}
93 -
94 -static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
95 -{
96 - return readl(bp->regs + reg);
97 -}
98 -
99 -static inline void bw32(const struct b44 *bp,
100 - unsigned long reg, unsigned long val)
101 -{
102 - writel(val, bp->regs + reg);
103 +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
104 + dma_addr_t dma_base,
105 + unsigned long offset,
106 + enum dma_data_direction dir)
107 +{
108 + dma_sync_single_range_for_device(sdev->dev, dma_base,
109 + offset & dma_desc_align_mask,
110 + dma_desc_sync_size, dir);
111 +}
112 +
113 +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
114 + dma_addr_t dma_base,
115 + unsigned long offset,
116 + enum dma_data_direction dir)
117 +{
118 + dma_sync_single_range_for_cpu(sdev->dev, dma_base,
119 + offset & dma_desc_align_mask,
120 + dma_desc_sync_size, dir);
121 }
122
123 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
124 @@ -182,117 +169,29 @@
125 return 0;
126 }
127
128 -/* Sonics SiliconBackplane support routines. ROFL, you should see all the
129 - * buzz words used on this company's website :-)
130 - *
131 - * All of these routines must be invoked with bp->lock held and
132 - * interrupts disabled.
133 - */
134 -
135 -#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
136 -#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
137 -
138 -static u32 ssb_get_core_rev(struct b44 *bp)
139 -{
140 - return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
141 -}
142 -
143 -static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
144 -{
145 - u32 bar_orig, pci_rev, val;
146 -
147 - pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
148 - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
149 - pci_rev = ssb_get_core_rev(bp);
150 -
151 - val = br32(bp, B44_SBINTVEC);
152 - val |= cores;
153 - bw32(bp, B44_SBINTVEC, val);
154 -
155 - val = br32(bp, SSB_PCI_TRANS_2);
156 - val |= SSB_PCI_PREF | SSB_PCI_BURST;
157 - bw32(bp, SSB_PCI_TRANS_2, val);
158 -
159 - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
160 -
161 - return pci_rev;
162 -}
163 -
164 -static void ssb_core_disable(struct b44 *bp)
165 -{
166 - if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
167 - return;
168 -
169 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
170 - b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
171 - b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
172 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
173 - SBTMSLOW_REJECT | SBTMSLOW_RESET));
174 - br32(bp, B44_SBTMSLOW);
175 - udelay(1);
176 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
177 - br32(bp, B44_SBTMSLOW);
178 - udelay(1);
179 -}
180 -
181 -static void ssb_core_reset(struct b44 *bp)
182 +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
183 {
184 u32 val;
185
186 - ssb_core_disable(bp);
187 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
188 - br32(bp, B44_SBTMSLOW);
189 - udelay(1);
190 -
191 - /* Clear SERR if set, this is a hw bug workaround. */
192 - if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
193 - bw32(bp, B44_SBTMSHIGH, 0);
194 -
195 - val = br32(bp, B44_SBIMSTATE);
196 - if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
197 - bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
198 -
199 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
200 - br32(bp, B44_SBTMSLOW);
201 - udelay(1);
202 + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
203 + (index << CAM_CTRL_INDEX_SHIFT)));
204
205 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
206 - br32(bp, B44_SBTMSLOW);
207 - udelay(1);
208 -}
209 + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
210
211 -static int ssb_core_unit(struct b44 *bp)
212 -{
213 -#if 0
214 - u32 val = br32(bp, B44_SBADMATCH0);
215 - u32 base;
216 + val = br32(bp, B44_CAM_DATA_LO);
217
218 - type = val & SBADMATCH0_TYPE_MASK;
219 - switch (type) {
220 - case 0:
221 - base = val & SBADMATCH0_BS0_MASK;
222 - break;
223 + data[2] = (val >> 24) & 0xFF;
224 + data[3] = (val >> 16) & 0xFF;
225 + data[4] = (val >> 8) & 0xFF;
226 + data[5] = (val >> 0) & 0xFF;
227
228 - case 1:
229 - base = val & SBADMATCH0_BS1_MASK;
230 - break;
231 + val = br32(bp, B44_CAM_DATA_HI);
232
233 - case 2:
234 - default:
235 - base = val & SBADMATCH0_BS2_MASK;
236 - break;
237 - };
238 -#endif
239 - return 0;
240 + data[0] = (val >> 8) & 0xFF;
241 + data[1] = (val >> 0) & 0xFF;
242 }
243
244 -static int ssb_is_core_up(struct b44 *bp)
245 -{
246 - return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
247 - == SBTMSLOW_CLOCK);
248 -}
249 -
250 -static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
251 +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
252 {
253 u32 val;
254
255 @@ -328,14 +227,14 @@
256 bw32(bp, B44_IMASK, bp->imask);
257 }
258
259 -static int b44_readphy(struct b44 *bp, int reg, u32 *val)
260 +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
261 {
262 int err;
263
264 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
265 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
266 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
267 - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
268 + (phy_addr << MDIO_DATA_PMD_SHIFT) |
269 (reg << MDIO_DATA_RA_SHIFT) |
270 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
271 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
272 @@ -344,18 +243,34 @@
273 return err;
274 }
275
276 -static int b44_writephy(struct b44 *bp, int reg, u32 val)
277 +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
278 {
279 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
280 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
281 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
282 - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
283 + (phy_addr << MDIO_DATA_PMD_SHIFT) |
284 (reg << MDIO_DATA_RA_SHIFT) |
285 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
286 (val & MDIO_DATA_DATA)));
287 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
288 }
289
290 +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
291 +{
292 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
293 + return 0;
294 +
295 + return __b44_readphy(bp, bp->phy_addr, reg, val);
296 +}
297 +
298 +static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
299 +{
300 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
301 + return 0;
302 +
303 + return __b44_writephy(bp, bp->phy_addr, reg, val);
304 +}
305 +
306 /* miilib interface */
307 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
308 * due to code existing before miilib use was added to this driver.
309 @@ -384,6 +299,8 @@
310 u32 val;
311 int err;
312
313 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
314 + return 0;
315 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
316 if (err)
317 return err;
318 @@ -442,11 +359,27 @@
319 __b44_set_flow_ctrl(bp, pause_enab);
320 }
321
322 +
323 +extern char *nvram_get(char *name); //FIXME: move elsewhere
324 static int b44_setup_phy(struct b44 *bp)
325 {
326 u32 val;
327 int err;
328
329 + /*
330 + * workaround for bad hardware design in Linksys WAP54G v1.0
331 + * see https://dev.openwrt.org/ticket/146
332 + * check and reset bit "isolate"
333 + */
334 + if ((atoi(nvram_get("boardnum")) == 2) &&
335 + (__b44_readphy(bp, 0, MII_BMCR, &val) == 0) &&
336 + (val & BMCR_ISOLATE) &&
337 + (__b44_writephy(bp, 0, MII_BMCR, val & ~BMCR_ISOLATE) != 0)) {
338 + printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
339 + }
340 +
341 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
342 + return 0;
343 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
344 goto out;
345 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
346 @@ -542,6 +475,19 @@
347 {
348 u32 bmsr, aux;
349
350 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
351 + bp->flags |= B44_FLAG_100_BASE_T;
352 + bp->flags |= B44_FLAG_FULL_DUPLEX;
353 + if (!netif_carrier_ok(bp->dev)) {
354 + u32 val = br32(bp, B44_TX_CTRL);
355 + val |= TX_CTRL_DUPLEX;
356 + bw32(bp, B44_TX_CTRL, val);
357 + netif_carrier_on(bp->dev);
358 + b44_link_report(bp);
359 + }
360 + return;
361 + }
362 +
363 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
364 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
365 (bmsr != 0xffff)) {
366 @@ -617,10 +563,10 @@
367
368 BUG_ON(skb == NULL);
369
370 - pci_unmap_single(bp->pdev,
371 + dma_unmap_single(bp->sdev->dev,
372 pci_unmap_addr(rp, mapping),
373 skb->len,
374 - PCI_DMA_TODEVICE);
375 + DMA_TO_DEVICE);
376 rp->skb = NULL;
377 dev_kfree_skb_irq(skb);
378 }
379 @@ -657,9 +603,9 @@
380 if (skb == NULL)
381 return -ENOMEM;
382
383 - mapping = pci_map_single(bp->pdev, skb->data,
384 + mapping = dma_map_single(bp->sdev->dev, skb->data,
385 RX_PKT_BUF_SZ,
386 - PCI_DMA_FROMDEVICE);
387 + DMA_FROM_DEVICE);
388
389 /* Hardware bug work-around, the chip is unable to do PCI DMA
390 to/from anything above 1GB :-( */
391 @@ -667,18 +613,18 @@
392 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
393 /* Sigh... */
394 if (!dma_mapping_error(mapping))
395 - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
396 + dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
397 dev_kfree_skb_any(skb);
398 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
399 if (skb == NULL)
400 return -ENOMEM;
401 - mapping = pci_map_single(bp->pdev, skb->data,
402 + mapping = dma_map_single(bp->sdev->dev, skb->data,
403 RX_PKT_BUF_SZ,
404 - PCI_DMA_FROMDEVICE);
405 + DMA_FROM_DEVICE);
406 if (dma_mapping_error(mapping) ||
407 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
408 if (!dma_mapping_error(mapping))
409 - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
410 + dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
411 dev_kfree_skb_any(skb);
412 return -ENOMEM;
413 }
414 @@ -705,9 +651,9 @@
415 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
416
417 if (bp->flags & B44_FLAG_RX_RING_HACK)
418 - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
419 - dest_idx * sizeof(dp),
420 - DMA_BIDIRECTIONAL);
421 + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
422 + dest_idx * sizeof(dp),
423 + DMA_BIDIRECTIONAL);
424
425 return RX_PKT_BUF_SZ;
426 }
427 @@ -734,9 +680,9 @@
428 pci_unmap_addr(src_map, mapping));
429
430 if (bp->flags & B44_FLAG_RX_RING_HACK)
431 - b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
432 - src_idx * sizeof(src_desc),
433 - DMA_BIDIRECTIONAL);
434 + b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
435 + src_idx * sizeof(src_desc),
436 + DMA_BIDIRECTIONAL);
437
438 ctrl = src_desc->ctrl;
439 if (dest_idx == (B44_RX_RING_SIZE - 1))
440 @@ -750,13 +696,13 @@
441 src_map->skb = NULL;
442
443 if (bp->flags & B44_FLAG_RX_RING_HACK)
444 - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
445 - dest_idx * sizeof(dest_desc),
446 - DMA_BIDIRECTIONAL);
447 + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
448 + dest_idx * sizeof(dest_desc),
449 + DMA_BIDIRECTIONAL);
450
451 - pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
452 + dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr),
453 RX_PKT_BUF_SZ,
454 - PCI_DMA_FROMDEVICE);
455 + DMA_FROM_DEVICE);
456 }
457
458 static int b44_rx(struct b44 *bp, int budget)
459 @@ -776,9 +722,9 @@
460 struct rx_header *rh;
461 u16 len;
462
463 - pci_dma_sync_single_for_cpu(bp->pdev, map,
464 + dma_sync_single_for_cpu(bp->sdev->dev, map,
465 RX_PKT_BUF_SZ,
466 - PCI_DMA_FROMDEVICE);
467 + DMA_FROM_DEVICE);
468 rh = (struct rx_header *) skb->data;
469 len = le16_to_cpu(rh->len);
470 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
471 @@ -810,8 +756,8 @@
472 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
473 if (skb_size < 0)
474 goto drop_it;
475 - pci_unmap_single(bp->pdev, map,
476 - skb_size, PCI_DMA_FROMDEVICE);
477 + dma_unmap_single(bp->sdev->dev, map,
478 + skb_size, DMA_FROM_DEVICE);
479 /* Leave out rx_header */
480 skb_put(skb, len + RX_PKT_OFFSET);
481 skb_pull(skb, RX_PKT_OFFSET);
482 @@ -982,24 +928,24 @@
483 goto err_out;
484 }
485
486 - mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
487 + mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
488 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
489 struct sk_buff *bounce_skb;
490
491 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
492 if (!dma_mapping_error(mapping))
493 - pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
494 + dma_unmap_single(bp->sdev->dev, mapping, len, DMA_TO_DEVICE);
495
496 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
497 if (!bounce_skb)
498 goto err_out;
499
500 - mapping = pci_map_single(bp->pdev, bounce_skb->data,
501 - len, PCI_DMA_TODEVICE);
502 + mapping = dma_map_single(bp->sdev->dev, bounce_skb->data,
503 + len, DMA_TO_DEVICE);
504 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
505 if (!dma_mapping_error(mapping))
506 - pci_unmap_single(bp->pdev, mapping,
507 - len, PCI_DMA_TODEVICE);
508 + dma_unmap_single(bp->sdev->dev, mapping,
509 + len, DMA_TO_DEVICE);
510 dev_kfree_skb_any(bounce_skb);
511 goto err_out;
512 }
513 @@ -1022,9 +968,9 @@
514 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
515
516 if (bp->flags & B44_FLAG_TX_RING_HACK)
517 - b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
518 - entry * sizeof(bp->tx_ring[0]),
519 - DMA_TO_DEVICE);
520 + b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
521 + entry * sizeof(bp->tx_ring[0]),
522 + DMA_TO_DEVICE);
523
524 entry = NEXT_TX(entry);
525
526 @@ -1097,10 +1043,10 @@
527
528 if (rp->skb == NULL)
529 continue;
530 - pci_unmap_single(bp->pdev,
531 + dma_unmap_single(bp->sdev->dev,
532 pci_unmap_addr(rp, mapping),
533 RX_PKT_BUF_SZ,
534 - PCI_DMA_FROMDEVICE);
535 + DMA_FROM_DEVICE);
536 dev_kfree_skb_any(rp->skb);
537 rp->skb = NULL;
538 }
539 @@ -1111,10 +1057,10 @@
540
541 if (rp->skb == NULL)
542 continue;
543 - pci_unmap_single(bp->pdev,
544 + dma_unmap_single(bp->sdev->dev,
545 pci_unmap_addr(rp, mapping),
546 rp->skb->len,
547 - PCI_DMA_TODEVICE);
548 + DMA_TO_DEVICE);
549 dev_kfree_skb_any(rp->skb);
550 rp->skb = NULL;
551 }
552 @@ -1136,14 +1082,14 @@
553 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
554
555 if (bp->flags & B44_FLAG_RX_RING_HACK)
556 - dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
557 - DMA_TABLE_BYTES,
558 - PCI_DMA_BIDIRECTIONAL);
559 + dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma,
560 + DMA_TABLE_BYTES,
561 + DMA_BIDIRECTIONAL);
562
563 if (bp->flags & B44_FLAG_TX_RING_HACK)
564 - dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
565 - DMA_TABLE_BYTES,
566 - PCI_DMA_TODEVICE);
567 + dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma,
568 + DMA_TABLE_BYTES,
569 + DMA_TO_DEVICE);
570
571 for (i = 0; i < bp->rx_pending; i++) {
572 if (b44_alloc_rx_skb(bp, -1, i) < 0)
573 @@ -1163,24 +1109,24 @@
574 bp->tx_buffers = NULL;
575 if (bp->rx_ring) {
576 if (bp->flags & B44_FLAG_RX_RING_HACK) {
577 - dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
578 - DMA_TABLE_BYTES,
579 - DMA_BIDIRECTIONAL);
580 + dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma,
581 + DMA_TABLE_BYTES,
582 + DMA_BIDIRECTIONAL);
583 kfree(bp->rx_ring);
584 } else
585 - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
586 + dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
587 bp->rx_ring, bp->rx_ring_dma);
588 bp->rx_ring = NULL;
589 bp->flags &= ~B44_FLAG_RX_RING_HACK;
590 }
591 if (bp->tx_ring) {
592 if (bp->flags & B44_FLAG_TX_RING_HACK) {
593 - dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
594 - DMA_TABLE_BYTES,
595 - DMA_TO_DEVICE);
596 + dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma,
597 + DMA_TABLE_BYTES,
598 + DMA_TO_DEVICE);
599 kfree(bp->tx_ring);
600 } else
601 - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
602 + dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
603 bp->tx_ring, bp->tx_ring_dma);
604 bp->tx_ring = NULL;
605 bp->flags &= ~B44_FLAG_TX_RING_HACK;
606 @@ -1206,7 +1152,7 @@
607 goto out_err;
608
609 size = DMA_TABLE_BYTES;
610 - bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
611 + bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, GFP_ATOMIC);
612 if (!bp->rx_ring) {
613 /* Allocation may have failed due to pci_alloc_consistent
614 insisting on use of GFP_DMA, which is more restrictive
615 @@ -1218,9 +1164,9 @@
616 if (!rx_ring)
617 goto out_err;
618
619 - rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
620 - DMA_TABLE_BYTES,
621 - DMA_BIDIRECTIONAL);
622 + rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring,
623 + DMA_TABLE_BYTES,
624 + DMA_BIDIRECTIONAL);
625
626 if (dma_mapping_error(rx_ring_dma) ||
627 rx_ring_dma + size > DMA_30BIT_MASK) {
628 @@ -1233,9 +1179,9 @@
629 bp->flags |= B44_FLAG_RX_RING_HACK;
630 }
631
632 - bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
633 + bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, GFP_ATOMIC);
634 if (!bp->tx_ring) {
635 - /* Allocation may have failed due to pci_alloc_consistent
636 + /* Allocation may have failed due to dma_alloc_coherent
637 insisting on use of GFP_DMA, which is more restrictive
638 than necessary... */
639 struct dma_desc *tx_ring;
640 @@ -1245,9 +1191,9 @@
641 if (!tx_ring)
642 goto out_err;
643
644 - tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
645 - DMA_TABLE_BYTES,
646 - DMA_TO_DEVICE);
647 + tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring,
648 + DMA_TABLE_BYTES,
649 + DMA_TO_DEVICE);
650
651 if (dma_mapping_error(tx_ring_dma) ||
652 tx_ring_dma + size > DMA_30BIT_MASK) {
653 @@ -1282,7 +1228,9 @@
654 /* bp->lock is held. */
655 static void b44_chip_reset(struct b44 *bp)
656 {
657 - if (ssb_is_core_up(bp)) {
658 + struct ssb_device *sdev = bp->sdev;
659 +
660 + if (ssb_device_is_enabled(bp->sdev)) {
661 bw32(bp, B44_RCV_LAZY, 0);
662 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
663 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
664 @@ -1294,19 +1242,24 @@
665 }
666 bw32(bp, B44_DMARX_CTRL, 0);
667 bp->rx_prod = bp->rx_cons = 0;
668 - } else {
669 - ssb_pci_setup(bp, (bp->core_unit == 0 ?
670 - SBINTVEC_ENET0 :
671 - SBINTVEC_ENET1));
672 }
673
674 - ssb_core_reset(bp);
675 -
676 + ssb_device_enable(bp->sdev, 0);
677 b44_clear_stats(bp);
678
679 - /* Make PHY accessible. */
680 - bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
681 + switch (sdev->bus->bustype) {
682 + case SSB_BUSTYPE_SSB:
683 + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
684 + (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO)
685 + & MDIO_CTRL_MAXF_MASK)));
686 + break;
687 + case SSB_BUSTYPE_PCI:
688 + case SSB_BUSTYPE_PCMCIA:
689 + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
690 (0x0d & MDIO_CTRL_MAXF_MASK)));
691 + break;
692 + }
693 +
694 br32(bp, B44_MDIO_CTRL);
695
696 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
697 @@ -1349,6 +1302,7 @@
698 {
699 struct b44 *bp = netdev_priv(dev);
700 struct sockaddr *addr = p;
701 + u32 val;
702
703 if (netif_running(dev))
704 return -EBUSY;
705 @@ -1359,7 +1313,11 @@
706 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
707
708 spin_lock_irq(&bp->lock);
709 - __b44_set_mac_addr(bp);
710 +
711 + val = br32(bp, B44_RXCONFIG);
712 + if (!(val & RXCONFIG_CAM_ABSENT))
713 + __b44_set_mac_addr(bp);
714 +
715 spin_unlock_irq(&bp->lock);
716
717 return 0;
718 @@ -1445,18 +1403,6 @@
719 return err;
720 }
721
722 -#if 0
723 -/*static*/ void b44_dump_state(struct b44 *bp)
724 -{
725 - u32 val32, val32_2, val32_3, val32_4, val32_5;
726 - u16 val16;
727 -
728 - pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
729 - printk("DEBUG: PCI status [%04x] \n", val16);
730 -
731 -}
732 -#endif
733 -
734 #ifdef CONFIG_NET_POLL_CONTROLLER
735 /*
736 * Polling receive - used by netconsole and other diagnostic tools
737 @@ -1570,7 +1516,6 @@
738 static void b44_setup_wol(struct b44 *bp)
739 {
740 u32 val;
741 - u16 pmval;
742
743 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
744
745 @@ -1594,13 +1539,6 @@
746 } else {
747 b44_setup_pseudo_magicp(bp);
748 }
749 -
750 - val = br32(bp, B44_SBTMSLOW);
751 - bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
752 -
753 - pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
754 - pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
755 -
756 }
757
758 static int b44_close(struct net_device *dev)
759 @@ -1700,7 +1638,7 @@
760
761 val = br32(bp, B44_RXCONFIG);
762 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
763 - if (dev->flags & IFF_PROMISC) {
764 + if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
765 val |= RXCONFIG_PROMISC;
766 bw32(bp, B44_RXCONFIG, val);
767 } else {
768 @@ -1747,12 +1685,8 @@
769
770 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
771 {
772 - struct b44 *bp = netdev_priv(dev);
773 - struct pci_dev *pci_dev = bp->pdev;
774 -
775 strcpy (info->driver, DRV_MODULE_NAME);
776 strcpy (info->version, DRV_MODULE_VERSION);
777 - strcpy (info->bus_info, pci_name(pci_dev));
778 }
779
780 static int b44_nway_reset(struct net_device *dev)
781 @@ -2035,6 +1969,245 @@
782 .get_ethtool_stats = b44_get_ethtool_stats,
783 };
784
785 +static int b44_ethtool_ioctl (struct net_device *dev, void __user *useraddr)
786 +{
787 + struct b44 *bp = dev->priv;
788 + u32 ethcmd;
789 +
790 + if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
791 + return -EFAULT;
792 +
793 + switch (ethcmd) {
794 + case ETHTOOL_GDRVINFO: {
795 + struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
796 + strcpy (info.driver, DRV_MODULE_NAME);
797 + strcpy (info.version, DRV_MODULE_VERSION);
798 + memset(&info.fw_version, 0, sizeof(info.fw_version));
799 + info.eedump_len = 0;
800 + info.regdump_len = 0;
801 + if (copy_to_user (useraddr, &info, sizeof (info)))
802 + return -EFAULT;
803 + return 0;
804 + }
805 +
806 + case ETHTOOL_GSET: {
807 + struct ethtool_cmd cmd = { ETHTOOL_GSET };
808 +
809 + if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
810 + return -EAGAIN;
811 + cmd.supported = (SUPPORTED_Autoneg);
812 + cmd.supported |= (SUPPORTED_100baseT_Half |
813 + SUPPORTED_100baseT_Full |
814 + SUPPORTED_10baseT_Half |
815 + SUPPORTED_10baseT_Full |
816 + SUPPORTED_MII);
817 +
818 + cmd.advertising = 0;
819 + if (bp->flags & B44_FLAG_ADV_10HALF)
820 + cmd.advertising |= ADVERTISE_10HALF;
821 + if (bp->flags & B44_FLAG_ADV_10FULL)
822 + cmd.advertising |= ADVERTISE_10FULL;
823 + if (bp->flags & B44_FLAG_ADV_100HALF)
824 + cmd.advertising |= ADVERTISE_100HALF;
825 + if (bp->flags & B44_FLAG_ADV_100FULL)
826 + cmd.advertising |= ADVERTISE_100FULL;
827 + cmd.advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
828 + cmd.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
829 + SPEED_100 : SPEED_10;
830 + cmd.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
831 + DUPLEX_FULL : DUPLEX_HALF;
832 + cmd.port = 0;
833 + cmd.phy_address = bp->phy_addr;
834 + cmd.transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
835 + XCVR_INTERNAL : XCVR_EXTERNAL;
836 + cmd.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
837 + AUTONEG_DISABLE : AUTONEG_ENABLE;
838 + cmd.maxtxpkt = 0;
839 + cmd.maxrxpkt = 0;
840 + if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
841 + return -EFAULT;
842 + return 0;
843 + }
844 + case ETHTOOL_SSET: {
845 + struct ethtool_cmd cmd;
846 +
847 + if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
848 + return -EAGAIN;
849 +
850 + if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
851 + return -EFAULT;
852 +
853 + /* We do not support gigabit. */
854 + if (cmd.autoneg == AUTONEG_ENABLE) {
855 + if (cmd.advertising &
856 + (ADVERTISED_1000baseT_Half |
857 + ADVERTISED_1000baseT_Full))
858 + return -EINVAL;
859 + } else if ((cmd.speed != SPEED_100 &&
860 + cmd.speed != SPEED_10) ||
861 + (cmd.duplex != DUPLEX_HALF &&
862 + cmd.duplex != DUPLEX_FULL)) {
863 + return -EINVAL;
864 + }
865 +
866 + spin_lock_irq(&bp->lock);
867 +
868 + if (cmd.autoneg == AUTONEG_ENABLE) {
869 + bp->flags &= ~B44_FLAG_FORCE_LINK;
870 + bp->flags &= ~(B44_FLAG_ADV_10HALF |
871 + B44_FLAG_ADV_10FULL |
872 + B44_FLAG_ADV_100HALF |
873 + B44_FLAG_ADV_100FULL);
874 + if (cmd.advertising & ADVERTISE_10HALF)
875 + bp->flags |= B44_FLAG_ADV_10HALF;
876 + if (cmd.advertising & ADVERTISE_10FULL)
877 + bp->flags |= B44_FLAG_ADV_10FULL;
878 + if (cmd.advertising & ADVERTISE_100HALF)
879 + bp->flags |= B44_FLAG_ADV_100HALF;
880 + if (cmd.advertising & ADVERTISE_100FULL)
881 + bp->flags |= B44_FLAG_ADV_100FULL;
882 + } else {
883 + bp->flags |= B44_FLAG_FORCE_LINK;
884 + if (cmd.speed == SPEED_100)
885 + bp->flags |= B44_FLAG_100_BASE_T;
886 + if (cmd.duplex == DUPLEX_FULL)
887 + bp->flags |= B44_FLAG_FULL_DUPLEX;
888 + }
889 +
890 + b44_setup_phy(bp);
891 +
892 + spin_unlock_irq(&bp->lock);
893 +
894 + return 0;
895 + }
896 +
897 + case ETHTOOL_GMSGLVL: {
898 + struct ethtool_value edata = { ETHTOOL_GMSGLVL };
899 + edata.data = bp->msg_enable;
900 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
901 + return -EFAULT;
902 + return 0;
903 + }
904 + case ETHTOOL_SMSGLVL: {
905 + struct ethtool_value edata;
906 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
907 + return -EFAULT;
908 + bp->msg_enable = edata.data;
909 + return 0;
910 + }
911 + case ETHTOOL_NWAY_RST: {
912 + u32 bmcr;
913 + int r;
914 +
915 + spin_lock_irq(&bp->lock);
916 + b44_readphy(bp, MII_BMCR, &bmcr);
917 + b44_readphy(bp, MII_BMCR, &bmcr);
918 + r = -EINVAL;
919 + if (bmcr & BMCR_ANENABLE) {
920 + b44_writephy(bp, MII_BMCR,
921 + bmcr | BMCR_ANRESTART);
922 + r = 0;
923 + }
924 + spin_unlock_irq(&bp->lock);
925 +
926 + return r;
927 + }
928 + case ETHTOOL_GLINK: {
929 + struct ethtool_value edata = { ETHTOOL_GLINK };
930 + edata.data = netif_carrier_ok(bp->dev) ? 1 : 0;
931 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
932 + return -EFAULT;
933 + return 0;
934 + }
935 + case ETHTOOL_GRINGPARAM: {
936 + struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
937 +
938 + ering.rx_max_pending = B44_RX_RING_SIZE - 1;
939 + ering.rx_pending = bp->rx_pending;
940 +
941 + /* XXX ethtool lacks a tx_max_pending, oops... */
942 +
943 + if (copy_to_user(useraddr, &ering, sizeof(ering)))
944 + return -EFAULT;
945 + return 0;
946 + }
947 + case ETHTOOL_SRINGPARAM: {
948 + struct ethtool_ringparam ering;
949 +
950 + if (copy_from_user(&ering, useraddr, sizeof(ering)))
951 + return -EFAULT;
952 +
953 + if ((ering.rx_pending > B44_RX_RING_SIZE - 1) ||
954 + (ering.rx_mini_pending != 0) ||
955 + (ering.rx_jumbo_pending != 0) ||
956 + (ering.tx_pending > B44_TX_RING_SIZE - 1))
957 + return -EINVAL;
958 +
959 + spin_lock_irq(&bp->lock);
960 +
961 + bp->rx_pending = ering.rx_pending;
962 + bp->tx_pending = ering.tx_pending;
963 +
964 + b44_halt(bp);
965 + b44_init_rings(bp);
966 + b44_init_hw(bp, 1);
967 + netif_wake_queue(bp->dev);
968 + spin_unlock_irq(&bp->lock);
969 +
970 + b44_enable_ints(bp);
971 +
972 + return 0;
973 + }
974 + case ETHTOOL_GPAUSEPARAM: {
975 + struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
976 +
977 + epause.autoneg =
978 + (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
979 + epause.rx_pause =
980 + (bp->flags & B44_FLAG_RX_PAUSE) != 0;
981 + epause.tx_pause =
982 + (bp->flags & B44_FLAG_TX_PAUSE) != 0;
983 + if (copy_to_user(useraddr, &epause, sizeof(epause)))
984 + return -EFAULT;
985 + return 0;
986 + }
987 + case ETHTOOL_SPAUSEPARAM: {
988 + struct ethtool_pauseparam epause;
989 +
990 + if (copy_from_user(&epause, useraddr, sizeof(epause)))
991 + return -EFAULT;
992 +
993 + spin_lock_irq(&bp->lock);
994 + if (epause.autoneg)
995 + bp->flags |= B44_FLAG_PAUSE_AUTO;
996 + else
997 + bp->flags &= ~B44_FLAG_PAUSE_AUTO;
998 + if (epause.rx_pause)
999 + bp->flags |= B44_FLAG_RX_PAUSE;
1000 + else
1001 + bp->flags &= ~B44_FLAG_RX_PAUSE;
1002 + if (epause.tx_pause)
1003 + bp->flags |= B44_FLAG_TX_PAUSE;
1004 + else
1005 + bp->flags &= ~B44_FLAG_TX_PAUSE;
1006 + if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1007 + b44_halt(bp);
1008 + b44_init_rings(bp);
1009 + b44_init_hw(bp, 1);
1010 + } else {
1011 + __b44_set_flow_ctrl(bp, bp->flags);
1012 + }
1013 + spin_unlock_irq(&bp->lock);
1014 +
1015 + b44_enable_ints(bp);
1016 +
1017 + return 0;
1018 + }
1019 + };
1020 +
1021 + return -EOPNOTSUPP;
1022 +}
1023 +
1024 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1025 {
1026 struct mii_ioctl_data *data = if_mii(ifr);
1027 @@ -2044,40 +2217,64 @@
1028 if (!netif_running(dev))
1029 goto out;
1030
1031 - spin_lock_irq(&bp->lock);
1032 - err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1033 - spin_unlock_irq(&bp->lock);
1034 -out:
1035 - return err;
1036 -}
1037 + switch (cmd) {
1038 + case SIOCETHTOOL:
1039 + return b44_ethtool_ioctl(dev, (void __user*) ifr->ifr_data);
1040
1041 -/* Read 128-bytes of EEPROM. */
1042 -static int b44_read_eeprom(struct b44 *bp, u8 *data)
1043 -{
1044 - long i;
1045 - __le16 *ptr = (__le16 *) data;
1046 + case SIOCGMIIPHY:
1047 + data->phy_id = bp->phy_addr;
1048
1049 - for (i = 0; i < 128; i += 2)
1050 - ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
1051 + /* fallthru */
1052 + case SIOCGMIIREG: {
1053 + u32 mii_regval;
1054 + spin_lock_irq(&bp->lock);
1055 + err = __b44_readphy(bp, data->phy_id & 0x1f, data->reg_num & 0x1f, &mii_regval);
1056 + spin_unlock_irq(&bp->lock);
1057
1058 - return 0;
1059 + data->val_out = mii_regval;
1060 +
1061 + return err;
1062 + }
1063 +
1064 + case SIOCSMIIREG:
1065 + if (!capable(CAP_NET_ADMIN))
1066 + return -EPERM;
1067 +
1068 + spin_lock_irq(&bp->lock);
1069 + err = __b44_writephy(bp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1070 + spin_unlock_irq(&bp->lock);
1071 +
1072 + return err;
1073 +
1074 + default:
1075 + break;
1076 + };
1077 + return -EOPNOTSUPP;
1078 +
1079 +out:
1080 + return err;
1081 }
1082
1083 static int __devinit b44_get_invariants(struct b44 *bp)
1084 {
1085 - u8 eeprom[128];
1086 - int err;
1087 + struct ssb_device *sdev = bp->sdev;
1088 + int err = 0;
1089 + u8 *addr;
1090
1091 - err = b44_read_eeprom(bp, &eeprom[0]);
1092 - if (err)
1093 - goto out;
1094 + bp->dma_offset = ssb_dma_translation(sdev);
1095
1096 - bp->dev->dev_addr[0] = eeprom[79];
1097 - bp->dev->dev_addr[1] = eeprom[78];
1098 - bp->dev->dev_addr[2] = eeprom[81];
1099 - bp->dev->dev_addr[3] = eeprom[80];
1100 - bp->dev->dev_addr[4] = eeprom[83];
1101 - bp->dev->dev_addr[5] = eeprom[82];
1102 + switch (instance) {
1103 + case 1:
1104 + addr = sdev->bus->sprom.et0mac;
1105 + bp->phy_addr = sdev->bus->sprom.et0phyaddr;
1106 + break;
1107 + default:
1108 + addr = sdev->bus->sprom.et1mac;
1109 + bp->phy_addr = sdev->bus->sprom.et1phyaddr;
1110 + break;
1111 + }
1112 +
1113 + memcpy(bp->dev->dev_addr, addr, 6);
1114
1115 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1116 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1117 @@ -2086,103 +2283,52 @@
1118
1119 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1120
1121 - bp->phy_addr = eeprom[90] & 0x1f;
1122 -
1123 bp->imask = IMASK_DEF;
1124
1125 - bp->core_unit = ssb_core_unit(bp);
1126 - bp->dma_offset = SB_PCI_DMA;
1127 -
1128 /* XXX - really required?
1129 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1130 */
1131
1132 - if (ssb_get_core_rev(bp) >= 7)
1133 + if (bp->sdev->id.revision >= 7)
1134 bp->flags |= B44_FLAG_B0_ANDLATER;
1135
1136 -out:
1137 return err;
1138 }
1139
1140 -static int __devinit b44_init_one(struct pci_dev *pdev,
1141 - const struct pci_device_id *ent)
1142 +static int __devinit b44_init_one(struct ssb_device *sdev,
1143 + const struct ssb_device_id *ent)
1144 {
1145 static int b44_version_printed = 0;
1146 - unsigned long b44reg_base, b44reg_len;
1147 struct net_device *dev;
1148 struct b44 *bp;
1149 int err, i;
1150
1151 + instance++;
1152 +
1153 if (b44_version_printed++ == 0)
1154 printk(KERN_INFO "%s", version);
1155
1156 - err = pci_enable_device(pdev);
1157 - if (err) {
1158 - dev_err(&pdev->dev, "Cannot enable PCI device, "
1159 - "aborting.\n");
1160 - return err;
1161 - }
1162 -
1163 - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1164 - dev_err(&pdev->dev,
1165 - "Cannot find proper PCI device "
1166 - "base address, aborting.\n");
1167 - err = -ENODEV;
1168 - goto err_out_disable_pdev;
1169 - }
1170 -
1171 - err = pci_request_regions(pdev, DRV_MODULE_NAME);
1172 - if (err) {
1173 - dev_err(&pdev->dev,
1174 - "Cannot obtain PCI resources, aborting.\n");
1175 - goto err_out_disable_pdev;
1176 - }
1177 -
1178 - pci_set_master(pdev);
1179 -
1180 - err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1181 - if (err) {
1182 - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1183 - goto err_out_free_res;
1184 - }
1185 -
1186 - err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1187 - if (err) {
1188 - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1189 - goto err_out_free_res;
1190 - }
1191 -
1192 - b44reg_base = pci_resource_start(pdev, 0);
1193 - b44reg_len = pci_resource_len(pdev, 0);
1194 -
1195 dev = alloc_etherdev(sizeof(*bp));
1196 if (!dev) {
1197 - dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
1198 + dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
1199 err = -ENOMEM;
1200 - goto err_out_free_res;
1201 + goto out;
1202 }
1203
1204 SET_MODULE_OWNER(dev);
1205 - SET_NETDEV_DEV(dev,&pdev->dev);
1206 + SET_NETDEV_DEV(dev,sdev->dev);
1207
1208 /* No interesting netdevice features in this card... */
1209 dev->features |= 0;
1210
1211 bp = netdev_priv(dev);
1212 - bp->pdev = pdev;
1213 + bp->sdev = sdev;
1214 bp->dev = dev;
1215
1216 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1217
1218 spin_lock_init(&bp->lock);
1219
1220 - bp->regs = ioremap(b44reg_base, b44reg_len);
1221 - if (bp->regs == 0UL) {
1222 - dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
1223 - err = -ENOMEM;
1224 - goto err_out_free_dev;
1225 - }
1226 -
1227 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1228 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1229
1230 @@ -2201,16 +2347,16 @@
1231 dev->poll_controller = b44_poll_controller;
1232 #endif
1233 dev->change_mtu = b44_change_mtu;
1234 - dev->irq = pdev->irq;
1235 + dev->irq = sdev->irq;
1236 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
1237
1238 netif_carrier_off(dev);
1239
1240 err = b44_get_invariants(bp);
1241 if (err) {
1242 - dev_err(&pdev->dev,
1243 + dev_err(sdev->dev,
1244 "Problem fetching invariants of chip, aborting.\n");
1245 - goto err_out_iounmap;
1246 + goto err_out_free_dev;
1247 }
1248
1249 bp->mii_if.dev = dev;
1250 @@ -2229,61 +2375,52 @@
1251
1252 err = register_netdev(dev);
1253 if (err) {
1254 - dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1255 - goto err_out_iounmap;
1256 + dev_err(sdev->dev, "Cannot register net device, aborting.\n");
1257 + goto out;
1258 }
1259
1260 - pci_set_drvdata(pdev, dev);
1261 -
1262 - pci_save_state(bp->pdev);
1263 + ssb_set_drvdata(sdev, dev);
1264
1265 /* Chip reset provides power to the b44 MAC & PCI cores, which
1266 * is necessary for MAC register access.
1267 */
1268 b44_chip_reset(bp);
1269
1270 - printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
1271 + printk(KERN_INFO "%s: Broadcom 10/100BaseT Ethernet ", dev->name);
1272 for (i = 0; i < 6; i++)
1273 printk("%2.2x%c", dev->dev_addr[i],
1274 i == 5 ? '\n' : ':');
1275
1276 - return 0;
1277 + /* Initialize phy */
1278 + spin_lock_irq(&bp->lock);
1279 + b44_chip_reset(bp);
1280 + spin_unlock_irq(&bp->lock);
1281
1282 -err_out_iounmap:
1283 - iounmap(bp->regs);
1284 + return 0;
1285
1286 err_out_free_dev:
1287 free_netdev(dev);
1288
1289 -err_out_free_res:
1290 - pci_release_regions(pdev);
1291 -
1292 -err_out_disable_pdev:
1293 - pci_disable_device(pdev);
1294 - pci_set_drvdata(pdev, NULL);
1295 +out:
1296 return err;
1297 }
1298
1299 -static void __devexit b44_remove_one(struct pci_dev *pdev)
1300 +static void __devexit b44_remove_one(struct ssb_device *pdev)
1301 {
1302 - struct net_device *dev = pci_get_drvdata(pdev);
1303 - struct b44 *bp = netdev_priv(dev);
1304 + struct net_device *dev = ssb_get_drvdata(pdev);
1305
1306 unregister_netdev(dev);
1307 - iounmap(bp->regs);
1308 free_netdev(dev);
1309 - pci_release_regions(pdev);
1310 - pci_disable_device(pdev);
1311 - pci_set_drvdata(pdev, NULL);
1312 + ssb_set_drvdata(pdev, NULL);
1313 }
1314
1315 -static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
1316 +static int b44_suspend(struct ssb_device *pdev, pm_message_t state)
1317 {
1318 - struct net_device *dev = pci_get_drvdata(pdev);
1319 + struct net_device *dev = ssb_get_drvdata(pdev);
1320 struct b44 *bp = netdev_priv(dev);
1321
1322 if (!netif_running(dev))
1323 - return 0;
1324 + return 0;
1325
1326 del_timer_sync(&bp->timer);
1327
1328 @@ -2301,33 +2438,22 @@
1329 b44_init_hw(bp, B44_PARTIAL_RESET);
1330 b44_setup_wol(bp);
1331 }
1332 - pci_disable_device(pdev);
1333 +
1334 return 0;
1335 }
1336
1337 -static int b44_resume(struct pci_dev *pdev)
1338 +static int b44_resume(struct ssb_device *pdev)
1339 {
1340 - struct net_device *dev = pci_get_drvdata(pdev);
1341 + struct net_device *dev = ssb_get_drvdata(pdev);
1342 struct b44 *bp = netdev_priv(dev);
1343 int rc = 0;
1344
1345 - pci_restore_state(pdev);
1346 - rc = pci_enable_device(pdev);
1347 - if (rc) {
1348 - printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
1349 - dev->name);
1350 - return rc;
1351 - }
1352 -
1353 - pci_set_master(pdev);
1354 -
1355 if (!netif_running(dev))
1356 return 0;
1357
1358 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1359 if (rc) {
1360 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
1361 - pci_disable_device(pdev);
1362 return rc;
1363 }
1364
1365 @@ -2346,29 +2472,31 @@
1366 return 0;
1367 }
1368
1369 -static struct pci_driver b44_driver = {
1370 +static struct ssb_driver b44_driver = {
1371 .name = DRV_MODULE_NAME,
1372 - .id_table = b44_pci_tbl,
1373 + .id_table = b44_ssb_tbl,
1374 .probe = b44_init_one,
1375 .remove = __devexit_p(b44_remove_one),
1376 - .suspend = b44_suspend,
1377 - .resume = b44_resume,
1378 + .suspend = b44_suspend,
1379 + .resume = b44_resume,
1380 };
1381
1382 static int __init b44_init(void)
1383 {
1384 unsigned int dma_desc_align_size = dma_get_cache_alignment();
1385
1386 + instance = 0;
1387 +
1388 /* Setup paramaters for syncing RX/TX DMA descriptors */
1389 dma_desc_align_mask = ~(dma_desc_align_size - 1);
1390 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
1391
1392 - return pci_register_driver(&b44_driver);
1393 + return ssb_driver_register(&b44_driver);
1394 }
1395
1396 static void __exit b44_cleanup(void)
1397 {
1398 - pci_unregister_driver(&b44_driver);
1399 + ssb_driver_unregister(&b44_driver);
1400 }
1401
1402 module_init(b44_init);
1403 --- a/drivers/net/b44.h
1404 +++ b/drivers/net/b44.h
1405 @@ -129,6 +129,7 @@
1406 #define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
1407 #define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
1408 #define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
1409 +#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */
1410 #define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
1411 #define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
1412 #define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
1413 @@ -227,75 +228,9 @@
1414 #define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
1415 #define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
1416
1417 -/* Silicon backplane register definitions */
1418 -#define B44_SBIMSTATE 0x0F90UL /* SB Initiator Agent State */
1419 -#define SBIMSTATE_PC 0x0000000f /* Pipe Count */
1420 -#define SBIMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */
1421 -#define SBIMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */
1422 -#define SBIMSTATE_AP_TS 0x00000010 /* Use timeslices only */
1423 -#define SBIMSTATE_AP_TK 0x00000020 /* Use token only */
1424 -#define SBIMSTATE_AP_RSV 0x00000030 /* Reserved */
1425 -#define SBIMSTATE_IBE 0x00020000 /* In Band Error */
1426 -#define SBIMSTATE_TO 0x00040000 /* Timeout */
1427 -#define B44_SBINTVEC 0x0F94UL /* SB Interrupt Mask */
1428 -#define SBINTVEC_PCI 0x00000001 /* Enable interrupts for PCI */
1429 -#define SBINTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */
1430 -#define SBINTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */
1431 -#define SBINTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */
1432 -#define SBINTVEC_USB 0x00000010 /* Enable interrupts for usb */
1433 -#define SBINTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */
1434 -#define SBINTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */
1435 -#define B44_SBTMSLOW 0x0F98UL /* SB Target State Low */
1436 -#define SBTMSLOW_RESET 0x00000001 /* Reset */
1437 -#define SBTMSLOW_REJECT 0x00000002 /* Reject */
1438 -#define SBTMSLOW_CLOCK 0x00010000 /* Clock Enable */
1439 -#define SBTMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
1440 -#define SBTMSLOW_PE 0x40000000 /* Power Management Enable */
1441 -#define SBTMSLOW_BE 0x80000000 /* BIST Enable */
1442 -#define B44_SBTMSHIGH 0x0F9CUL /* SB Target State High */
1443 -#define SBTMSHIGH_SERR 0x00000001 /* S-error */
1444 -#define SBTMSHIGH_INT 0x00000002 /* Interrupt */
1445 -#define SBTMSHIGH_BUSY 0x00000004 /* Busy */
1446 -#define SBTMSHIGH_GCR 0x20000000 /* Gated Clock Request */
1447 -#define SBTMSHIGH_BISTF 0x40000000 /* BIST Failed */
1448 -#define SBTMSHIGH_BISTD 0x80000000 /* BIST Done */
1449 -#define B44_SBIDHIGH 0x0FFCUL /* SB Identification High */
1450 -#define SBIDHIGH_RC_MASK 0x0000000f /* Revision Code */
1451 -#define SBIDHIGH_CC_MASK 0x0000fff0 /* Core Code */
1452 -#define SBIDHIGH_CC_SHIFT 4
1453 -#define SBIDHIGH_VC_MASK 0xffff0000 /* Vendor Code */
1454 -#define SBIDHIGH_VC_SHIFT 16
1455 -
1456 -/* SSB PCI config space registers. */
1457 -#define SSB_PMCSR 0x44
1458 -#define SSB_PE 0x100
1459 -#define SSB_BAR0_WIN 0x80
1460 -#define SSB_BAR1_WIN 0x84
1461 -#define SSB_SPROM_CONTROL 0x88
1462 -#define SSB_BAR1_CONTROL 0x8c
1463 -
1464 -/* SSB core and host control registers. */
1465 -#define SSB_CONTROL 0x0000UL
1466 -#define SSB_ARBCONTROL 0x0010UL
1467 -#define SSB_ISTAT 0x0020UL
1468 -#define SSB_IMASK 0x0024UL
1469 -#define SSB_MBOX 0x0028UL
1470 -#define SSB_BCAST_ADDR 0x0050UL
1471 -#define SSB_BCAST_DATA 0x0054UL
1472 -#define SSB_PCI_TRANS_0 0x0100UL
1473 -#define SSB_PCI_TRANS_1 0x0104UL
1474 -#define SSB_PCI_TRANS_2 0x0108UL
1475 -#define SSB_SPROM 0x0800UL
1476 -
1477 -#define SSB_PCI_MEM 0x00000000
1478 -#define SSB_PCI_IO 0x00000001
1479 -#define SSB_PCI_CFG0 0x00000002
1480 -#define SSB_PCI_CFG1 0x00000003
1481 -#define SSB_PCI_PREF 0x00000004
1482 -#define SSB_PCI_BURST 0x00000008
1483 -#define SSB_PCI_MASK0 0xfc000000
1484 -#define SSB_PCI_MASK1 0xfc000000
1485 -#define SSB_PCI_MASK2 0xc0000000
1486 +#define br32(bp, REG) ssb_read32((bp)->sdev, (REG))
1487 +#define bw32(bp, REG, VAL) ssb_write32((bp)->sdev, (REG), (VAL))
1488 +#define atoi(str) simple_strtoul(((str != NULL) ? str : ""), NULL, 0)
1489
1490 /* 4400 PHY registers */
1491 #define B44_MII_AUXCTRL 24 /* Auxiliary Control */
1492 @@ -346,10 +281,12 @@
1493
1494 struct ring_info {
1495 struct sk_buff *skb;
1496 - DECLARE_PCI_UNMAP_ADDR(mapping);
1497 + dma_addr_t mapping;
1498 };
1499
1500 #define B44_MCAST_TABLE_SIZE 32
1501 +#define B44_PHY_ADDR_NO_PHY 30
1502 +#define B44_MDC_RATIO 5000000
1503
1504 #define B44_STAT_REG_DECLARE \
1505 _B44(tx_good_octets) \
1506 @@ -425,9 +362,10 @@
1507
1508 u32 dma_offset;
1509 u32 flags;
1510 -#define B44_FLAG_B0_ANDLATER 0x00000001
1511 +#define B44_FLAG_INIT_COMPLETE 0x00000001
1512 #define B44_FLAG_BUGGY_TXPTR 0x00000002
1513 #define B44_FLAG_REORDER_BUG 0x00000004
1514 +#define B44_FLAG_B0_ANDLATER 0x00000008
1515 #define B44_FLAG_PAUSE_AUTO 0x00008000
1516 #define B44_FLAG_FULL_DUPLEX 0x00010000
1517 #define B44_FLAG_100_BASE_T 0x00020000
1518 @@ -450,8 +388,7 @@
1519 struct net_device_stats stats;
1520 struct b44_hw_stats hw_stats;
1521
1522 - void __iomem *regs;
1523 - struct pci_dev *pdev;
1524 + struct ssb_device *sdev;
1525 struct net_device *dev;
1526
1527 dma_addr_t rx_ring_dma, tx_ring_dma;
1528 --- a/drivers/net/Kconfig
1529 +++ b/drivers/net/Kconfig
1530 @@ -1577,7 +1577,7 @@
1531
1532 config B44
1533 tristate "Broadcom 4400 ethernet support"
1534 - depends on NET_PCI && PCI
1535 + depends on SSB && EXPERIMENTAL
1536 select MII
1537 help
1538 If you have a network (Ethernet) controller of this type, say Y and
This page took 0.107276 seconds and 5 git commands to generate.