1 --- a/drivers/net/b44.c
2 +++ b/drivers/net/b44.c
4 -/* b44.c: Broadcom 4400 device driver.
5 +/* b44.c: Broadcom 4400/47xx device driver.
7 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
8 - * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
9 + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
10 + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
11 + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
12 * Copyright (C) 2006 Broadcom Corporation.
14 * Distribute under GPL.
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 +#include <linux/ssb/ssb.h>
21 #include <asm/uaccess.h>
28 #define DRV_MODULE_NAME "b44"
30 static char version[] __devinitdata =
31 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
33 -MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
34 -MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
35 +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
36 +MODULE_DESCRIPTION("Broadcom 4400/47xx 10/100 PCI ethernet driver");
37 MODULE_LICENSE("GPL");
38 MODULE_VERSION(DRV_MODULE_VERSION);
41 module_param(b44_debug, int, 0);
42 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
44 -static struct pci_device_id b44_pci_tbl[] = {
45 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
46 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
47 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
48 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
49 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
50 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
51 - { } /* terminate list with empty entry */
52 +static struct ssb_device_id b44_ssb_tbl[] = {
53 + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
57 -MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
59 static void b44_halt(struct b44 *);
60 static void b44_init_rings(struct b44 *);
64 static int dma_desc_align_mask;
65 static int dma_desc_sync_size;
68 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
69 #define _B44(x...) # x,
74 -static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
75 - dma_addr_t dma_base,
76 - unsigned long offset,
77 - enum dma_data_direction dir)
79 - dma_sync_single_range_for_device(&pdev->dev, dma_base,
80 - offset & dma_desc_align_mask,
81 - dma_desc_sync_size, dir);
84 -static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
85 - dma_addr_t dma_base,
86 - unsigned long offset,
87 - enum dma_data_direction dir)
89 - dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
90 - offset & dma_desc_align_mask,
91 - dma_desc_sync_size, dir);
94 -static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
96 - return readl(bp->regs + reg);
99 -static inline void bw32(const struct b44 *bp,
100 - unsigned long reg, unsigned long val)
102 - writel(val, bp->regs + reg);
103 +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
104 + dma_addr_t dma_base,
105 + unsigned long offset,
106 + enum dma_data_direction dir)
108 + dma_sync_single_range_for_device(sdev->dev, dma_base,
109 + offset & dma_desc_align_mask,
110 + dma_desc_sync_size, dir);
113 +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
114 + dma_addr_t dma_base,
115 + unsigned long offset,
116 + enum dma_data_direction dir)
118 + dma_sync_single_range_for_cpu(sdev->dev, dma_base,
119 + offset & dma_desc_align_mask,
120 + dma_desc_sync_size, dir);
123 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
124 @@ -182,117 +169,29 @@
128 -/* Sonics SiliconBackplane support routines. ROFL, you should see all the
129 - * buzz words used on this company's website :-)
131 - * All of these routines must be invoked with bp->lock held and
132 - * interrupts disabled.
135 -#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
136 -#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
138 -static u32 ssb_get_core_rev(struct b44 *bp)
140 - return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
143 -static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
145 - u32 bar_orig, pci_rev, val;
147 - pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
148 - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
149 - pci_rev = ssb_get_core_rev(bp);
151 - val = br32(bp, B44_SBINTVEC);
153 - bw32(bp, B44_SBINTVEC, val);
155 - val = br32(bp, SSB_PCI_TRANS_2);
156 - val |= SSB_PCI_PREF | SSB_PCI_BURST;
157 - bw32(bp, SSB_PCI_TRANS_2, val);
159 - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
164 -static void ssb_core_disable(struct b44 *bp)
166 - if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
169 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
170 - b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
171 - b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
172 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
173 - SBTMSLOW_REJECT | SBTMSLOW_RESET));
174 - br32(bp, B44_SBTMSLOW);
176 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
177 - br32(bp, B44_SBTMSLOW);
181 -static void ssb_core_reset(struct b44 *bp)
182 +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
186 - ssb_core_disable(bp);
187 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
188 - br32(bp, B44_SBTMSLOW);
191 - /* Clear SERR if set, this is a hw bug workaround. */
192 - if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
193 - bw32(bp, B44_SBTMSHIGH, 0);
195 - val = br32(bp, B44_SBIMSTATE);
196 - if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
197 - bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
199 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
200 - br32(bp, B44_SBTMSLOW);
202 + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
203 + (index << CAM_CTRL_INDEX_SHIFT)));
205 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
206 - br32(bp, B44_SBTMSLOW);
209 + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
211 -static int ssb_core_unit(struct b44 *bp)
214 - u32 val = br32(bp, B44_SBADMATCH0);
216 + val = br32(bp, B44_CAM_DATA_LO);
218 - type = val & SBADMATCH0_TYPE_MASK;
221 - base = val & SBADMATCH0_BS0_MASK;
223 + data[2] = (val >> 24) & 0xFF;
224 + data[3] = (val >> 16) & 0xFF;
225 + data[4] = (val >> 8) & 0xFF;
226 + data[5] = (val >> 0) & 0xFF;
229 - base = val & SBADMATCH0_BS1_MASK;
231 + val = br32(bp, B44_CAM_DATA_HI);
235 - base = val & SBADMATCH0_BS2_MASK;
240 + data[0] = (val >> 8) & 0xFF;
241 + data[1] = (val >> 0) & 0xFF;
244 -static int ssb_is_core_up(struct b44 *bp)
246 - return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
247 - == SBTMSLOW_CLOCK);
250 -static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
251 +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
255 @@ -328,14 +227,14 @@
256 bw32(bp, B44_IMASK, bp->imask);
259 -static int b44_readphy(struct b44 *bp, int reg, u32 *val)
260 +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
264 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
265 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
266 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
267 - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
268 + (phy_addr << MDIO_DATA_PMD_SHIFT) |
269 (reg << MDIO_DATA_RA_SHIFT) |
270 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
271 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
272 @@ -344,18 +243,34 @@
276 -static int b44_writephy(struct b44 *bp, int reg, u32 val)
277 +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
279 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
280 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
281 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
282 - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
283 + (phy_addr << MDIO_DATA_PMD_SHIFT) |
284 (reg << MDIO_DATA_RA_SHIFT) |
285 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
286 (val & MDIO_DATA_DATA)));
287 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
290 +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
292 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
295 + return __b44_readphy(bp, bp->phy_addr, reg, val);
298 +static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
300 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
303 + return __b44_writephy(bp, bp->phy_addr, reg, val);
306 /* miilib interface */
307 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
308 * due to code existing before miilib use was added to this driver.
313 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
315 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
318 @@ -442,11 +359,27 @@
319 __b44_set_flow_ctrl(bp, pause_enab);
323 +extern char *nvram_get(char *name); //FIXME: move elsewhere
324 static int b44_setup_phy(struct b44 *bp)
330 + * workaround for bad hardware design in Linksys WAP54G v1.0
331 + * see https://dev.openwrt.org/ticket/146
332 + * check and reset bit "isolate"
334 + if ((atoi(nvram_get("boardnum")) == 2) &&
335 + (__b44_readphy(bp, 0, MII_BMCR, &val) == 0) &&
336 + (val & BMCR_ISOLATE) &&
337 + (__b44_writephy(bp, 0, MII_BMCR, val & ~BMCR_ISOLATE) != 0)) {
338 + printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
341 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
343 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
345 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
350 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
351 + bp->flags |= B44_FLAG_100_BASE_T;
352 + bp->flags |= B44_FLAG_FULL_DUPLEX;
353 + if (!netif_carrier_ok(bp->dev)) {
354 + u32 val = br32(bp, B44_TX_CTRL);
355 + val |= TX_CTRL_DUPLEX;
356 + bw32(bp, B44_TX_CTRL, val);
357 + netif_carrier_on(bp->dev);
358 + b44_link_report(bp);
363 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
364 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
366 @@ -617,10 +563,10 @@
370 - pci_unmap_single(bp->pdev,
371 + dma_unmap_single(bp->sdev->dev,
372 pci_unmap_addr(rp, mapping),
377 dev_kfree_skb_irq(skb);
383 - mapping = pci_map_single(bp->pdev, skb->data,
384 + mapping = dma_map_single(bp->sdev->dev, skb->data,
386 - PCI_DMA_FROMDEVICE);
389 /* Hardware bug work-around, the chip is unable to do PCI DMA
390 to/from anything above 1GB :-( */
391 @@ -667,18 +613,18 @@
392 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
394 if (!dma_mapping_error(mapping))
395 - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
396 + dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
397 dev_kfree_skb_any(skb);
398 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
401 - mapping = pci_map_single(bp->pdev, skb->data,
402 + mapping = dma_map_single(bp->sdev->dev, skb->data,
404 - PCI_DMA_FROMDEVICE);
406 if (dma_mapping_error(mapping) ||
407 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
408 if (!dma_mapping_error(mapping))
409 - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
410 + dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
411 dev_kfree_skb_any(skb);
415 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
417 if (bp->flags & B44_FLAG_RX_RING_HACK)
418 - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
419 - dest_idx * sizeof(dp),
420 - DMA_BIDIRECTIONAL);
421 + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
422 + dest_idx * sizeof(dp),
423 + DMA_BIDIRECTIONAL);
425 return RX_PKT_BUF_SZ;
428 pci_unmap_addr(src_map, mapping));
430 if (bp->flags & B44_FLAG_RX_RING_HACK)
431 - b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
432 - src_idx * sizeof(src_desc),
433 - DMA_BIDIRECTIONAL);
434 + b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
435 + src_idx * sizeof(src_desc),
436 + DMA_BIDIRECTIONAL);
438 ctrl = src_desc->ctrl;
439 if (dest_idx == (B44_RX_RING_SIZE - 1))
440 @@ -750,13 +696,13 @@
443 if (bp->flags & B44_FLAG_RX_RING_HACK)
444 - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
445 - dest_idx * sizeof(dest_desc),
446 - DMA_BIDIRECTIONAL);
447 + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
448 + dest_idx * sizeof(dest_desc),
449 + DMA_BIDIRECTIONAL);
451 - pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
452 + dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr),
454 - PCI_DMA_FROMDEVICE);
458 static int b44_rx(struct b44 *bp, int budget)
460 struct rx_header *rh;
463 - pci_dma_sync_single_for_cpu(bp->pdev, map,
464 + dma_sync_single_for_cpu(bp->sdev->dev, map,
466 - PCI_DMA_FROMDEVICE);
468 rh = (struct rx_header *) skb->data;
469 len = le16_to_cpu(rh->len);
470 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
472 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
475 - pci_unmap_single(bp->pdev, map,
476 - skb_size, PCI_DMA_FROMDEVICE);
477 + dma_unmap_single(bp->sdev->dev, map,
478 + skb_size, DMA_FROM_DEVICE);
479 /* Leave out rx_header */
480 skb_put(skb, len + RX_PKT_OFFSET);
481 skb_pull(skb, RX_PKT_OFFSET);
482 @@ -982,24 +928,24 @@
486 - mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
487 + mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
488 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
489 struct sk_buff *bounce_skb;
491 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
492 if (!dma_mapping_error(mapping))
493 - pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
494 + dma_unmap_single(bp->sdev->dev, mapping, len, DMA_TO_DEVICE);
496 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
500 - mapping = pci_map_single(bp->pdev, bounce_skb->data,
501 - len, PCI_DMA_TODEVICE);
502 + mapping = dma_map_single(bp->sdev->dev, bounce_skb->data,
503 + len, DMA_TO_DEVICE);
504 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
505 if (!dma_mapping_error(mapping))
506 - pci_unmap_single(bp->pdev, mapping,
507 - len, PCI_DMA_TODEVICE);
508 + dma_unmap_single(bp->sdev->dev, mapping,
509 + len, DMA_TO_DEVICE);
510 dev_kfree_skb_any(bounce_skb);
514 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
516 if (bp->flags & B44_FLAG_TX_RING_HACK)
517 - b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
518 - entry * sizeof(bp->tx_ring[0]),
520 + b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
521 + entry * sizeof(bp->tx_ring[0]),
524 entry = NEXT_TX(entry);
526 @@ -1097,10 +1043,10 @@
530 - pci_unmap_single(bp->pdev,
531 + dma_unmap_single(bp->sdev->dev,
532 pci_unmap_addr(rp, mapping),
534 - PCI_DMA_FROMDEVICE);
536 dev_kfree_skb_any(rp->skb);
539 @@ -1111,10 +1057,10 @@
543 - pci_unmap_single(bp->pdev,
544 + dma_unmap_single(bp->sdev->dev,
545 pci_unmap_addr(rp, mapping),
549 dev_kfree_skb_any(rp->skb);
552 @@ -1136,14 +1082,14 @@
553 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
555 if (bp->flags & B44_FLAG_RX_RING_HACK)
556 - dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
558 - PCI_DMA_BIDIRECTIONAL);
559 + dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma,
561 + DMA_BIDIRECTIONAL);
563 if (bp->flags & B44_FLAG_TX_RING_HACK)
564 - dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
567 + dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma,
571 for (i = 0; i < bp->rx_pending; i++) {
572 if (b44_alloc_rx_skb(bp, -1, i) < 0)
573 @@ -1163,24 +1109,24 @@
574 bp->tx_buffers = NULL;
576 if (bp->flags & B44_FLAG_RX_RING_HACK) {
577 - dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
579 - DMA_BIDIRECTIONAL);
580 + dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma,
582 + DMA_BIDIRECTIONAL);
585 - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
586 + dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
587 bp->rx_ring, bp->rx_ring_dma);
589 bp->flags &= ~B44_FLAG_RX_RING_HACK;
592 if (bp->flags & B44_FLAG_TX_RING_HACK) {
593 - dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
596 + dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma,
601 - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
602 + dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
603 bp->tx_ring, bp->tx_ring_dma);
605 bp->flags &= ~B44_FLAG_TX_RING_HACK;
606 @@ -1206,7 +1152,7 @@
609 size = DMA_TABLE_BYTES;
610 - bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
611 + bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, GFP_ATOMIC);
613 /* Allocation may have failed due to pci_alloc_consistent
614 insisting on use of GFP_DMA, which is more restrictive
615 @@ -1218,9 +1164,9 @@
619 - rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
621 - DMA_BIDIRECTIONAL);
622 + rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring,
624 + DMA_BIDIRECTIONAL);
626 if (dma_mapping_error(rx_ring_dma) ||
627 rx_ring_dma + size > DMA_30BIT_MASK) {
628 @@ -1233,9 +1179,9 @@
629 bp->flags |= B44_FLAG_RX_RING_HACK;
632 - bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
633 + bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, GFP_ATOMIC);
635 - /* Allocation may have failed due to pci_alloc_consistent
636 + /* Allocation may have failed due to dma_alloc_coherent
637 insisting on use of GFP_DMA, which is more restrictive
639 struct dma_desc *tx_ring;
640 @@ -1245,9 +1191,9 @@
644 - tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
647 + tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring,
651 if (dma_mapping_error(tx_ring_dma) ||
652 tx_ring_dma + size > DMA_30BIT_MASK) {
653 @@ -1282,7 +1228,9 @@
654 /* bp->lock is held. */
655 static void b44_chip_reset(struct b44 *bp)
657 - if (ssb_is_core_up(bp)) {
658 + struct ssb_device *sdev = bp->sdev;
660 + if (ssb_device_is_enabled(bp->sdev)) {
661 bw32(bp, B44_RCV_LAZY, 0);
662 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
663 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
664 @@ -1294,19 +1242,24 @@
666 bw32(bp, B44_DMARX_CTRL, 0);
667 bp->rx_prod = bp->rx_cons = 0;
669 - ssb_pci_setup(bp, (bp->core_unit == 0 ?
674 - ssb_core_reset(bp);
676 + ssb_device_enable(bp->sdev, 0);
679 - /* Make PHY accessible. */
680 - bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
681 + switch (sdev->bus->bustype) {
682 + case SSB_BUSTYPE_SSB:
683 + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
684 + (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO)
685 + & MDIO_CTRL_MAXF_MASK)));
687 + case SSB_BUSTYPE_PCI:
688 + case SSB_BUSTYPE_PCMCIA:
689 + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
690 (0x0d & MDIO_CTRL_MAXF_MASK)));
694 br32(bp, B44_MDIO_CTRL);
696 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
697 @@ -1349,6 +1302,7 @@
699 struct b44 *bp = netdev_priv(dev);
700 struct sockaddr *addr = p;
703 if (netif_running(dev))
705 @@ -1359,7 +1313,11 @@
706 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
708 spin_lock_irq(&bp->lock);
709 - __b44_set_mac_addr(bp);
711 + val = br32(bp, B44_RXCONFIG);
712 + if (!(val & RXCONFIG_CAM_ABSENT))
713 + __b44_set_mac_addr(bp);
715 spin_unlock_irq(&bp->lock);
718 @@ -1445,18 +1403,6 @@
723 -/*static*/ void b44_dump_state(struct b44 *bp)
725 - u32 val32, val32_2, val32_3, val32_4, val32_5;
728 - pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
729 - printk("DEBUG: PCI status [%04x] \n", val16);
734 #ifdef CONFIG_NET_POLL_CONTROLLER
736 * Polling receive - used by netconsole and other diagnostic tools
737 @@ -1570,7 +1516,6 @@
738 static void b44_setup_wol(struct b44 *bp)
743 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
745 @@ -1594,13 +1539,6 @@
747 b44_setup_pseudo_magicp(bp);
750 - val = br32(bp, B44_SBTMSLOW);
751 - bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
753 - pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
754 - pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
758 static int b44_close(struct net_device *dev)
759 @@ -1700,7 +1638,7 @@
761 val = br32(bp, B44_RXCONFIG);
762 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
763 - if (dev->flags & IFF_PROMISC) {
764 + if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
765 val |= RXCONFIG_PROMISC;
766 bw32(bp, B44_RXCONFIG, val);
768 @@ -1747,12 +1685,8 @@
770 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
772 - struct b44 *bp = netdev_priv(dev);
773 - struct pci_dev *pci_dev = bp->pdev;
775 strcpy (info->driver, DRV_MODULE_NAME);
776 strcpy (info->version, DRV_MODULE_VERSION);
777 - strcpy (info->bus_info, pci_name(pci_dev));
780 static int b44_nway_reset(struct net_device *dev)
781 @@ -2035,6 +1969,245 @@
782 .get_ethtool_stats = b44_get_ethtool_stats,
785 +static int b44_ethtool_ioctl (struct net_device *dev, void __user *useraddr)
787 + struct b44 *bp = dev->priv;
790 + if (copy_from_user (ðcmd, useraddr, sizeof (ethcmd)))
794 + case ETHTOOL_GDRVINFO: {
795 + struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
796 + strcpy (info.driver, DRV_MODULE_NAME);
797 + strcpy (info.version, DRV_MODULE_VERSION);
798 + memset(&info.fw_version, 0, sizeof(info.fw_version));
799 + info.eedump_len = 0;
800 + info.regdump_len = 0;
801 + if (copy_to_user (useraddr, &info, sizeof (info)))
806 + case ETHTOOL_GSET: {
807 + struct ethtool_cmd cmd = { ETHTOOL_GSET };
809 + if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
811 + cmd.supported = (SUPPORTED_Autoneg);
812 + cmd.supported |= (SUPPORTED_100baseT_Half |
813 + SUPPORTED_100baseT_Full |
814 + SUPPORTED_10baseT_Half |
815 + SUPPORTED_10baseT_Full |
818 + cmd.advertising = 0;
819 + if (bp->flags & B44_FLAG_ADV_10HALF)
820 + cmd.advertising |= ADVERTISE_10HALF;
821 + if (bp->flags & B44_FLAG_ADV_10FULL)
822 + cmd.advertising |= ADVERTISE_10FULL;
823 + if (bp->flags & B44_FLAG_ADV_100HALF)
824 + cmd.advertising |= ADVERTISE_100HALF;
825 + if (bp->flags & B44_FLAG_ADV_100FULL)
826 + cmd.advertising |= ADVERTISE_100FULL;
827 + cmd.advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
828 + cmd.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
829 + SPEED_100 : SPEED_10;
830 + cmd.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
831 + DUPLEX_FULL : DUPLEX_HALF;
833 + cmd.phy_address = bp->phy_addr;
834 + cmd.transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
835 + XCVR_INTERNAL : XCVR_EXTERNAL;
836 + cmd.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
837 + AUTONEG_DISABLE : AUTONEG_ENABLE;
840 + if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
844 + case ETHTOOL_SSET: {
845 + struct ethtool_cmd cmd;
847 + if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
850 + if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
853 + /* We do not support gigabit. */
854 + if (cmd.autoneg == AUTONEG_ENABLE) {
855 + if (cmd.advertising &
856 + (ADVERTISED_1000baseT_Half |
857 + ADVERTISED_1000baseT_Full))
859 + } else if ((cmd.speed != SPEED_100 &&
860 + cmd.speed != SPEED_10) ||
861 + (cmd.duplex != DUPLEX_HALF &&
862 + cmd.duplex != DUPLEX_FULL)) {
866 + spin_lock_irq(&bp->lock);
868 + if (cmd.autoneg == AUTONEG_ENABLE) {
869 + bp->flags &= ~B44_FLAG_FORCE_LINK;
870 + bp->flags &= ~(B44_FLAG_ADV_10HALF |
871 + B44_FLAG_ADV_10FULL |
872 + B44_FLAG_ADV_100HALF |
873 + B44_FLAG_ADV_100FULL);
874 + if (cmd.advertising & ADVERTISE_10HALF)
875 + bp->flags |= B44_FLAG_ADV_10HALF;
876 + if (cmd.advertising & ADVERTISE_10FULL)
877 + bp->flags |= B44_FLAG_ADV_10FULL;
878 + if (cmd.advertising & ADVERTISE_100HALF)
879 + bp->flags |= B44_FLAG_ADV_100HALF;
880 + if (cmd.advertising & ADVERTISE_100FULL)
881 + bp->flags |= B44_FLAG_ADV_100FULL;
883 + bp->flags |= B44_FLAG_FORCE_LINK;
884 + if (cmd.speed == SPEED_100)
885 + bp->flags |= B44_FLAG_100_BASE_T;
886 + if (cmd.duplex == DUPLEX_FULL)
887 + bp->flags |= B44_FLAG_FULL_DUPLEX;
892 + spin_unlock_irq(&bp->lock);
897 + case ETHTOOL_GMSGLVL: {
898 + struct ethtool_value edata = { ETHTOOL_GMSGLVL };
899 + edata.data = bp->msg_enable;
900 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
904 + case ETHTOOL_SMSGLVL: {
905 + struct ethtool_value edata;
906 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
908 + bp->msg_enable = edata.data;
911 + case ETHTOOL_NWAY_RST: {
915 + spin_lock_irq(&bp->lock);
916 + b44_readphy(bp, MII_BMCR, &bmcr);
917 + b44_readphy(bp, MII_BMCR, &bmcr);
919 + if (bmcr & BMCR_ANENABLE) {
920 + b44_writephy(bp, MII_BMCR,
921 + bmcr | BMCR_ANRESTART);
924 + spin_unlock_irq(&bp->lock);
928 + case ETHTOOL_GLINK: {
929 + struct ethtool_value edata = { ETHTOOL_GLINK };
930 + edata.data = netif_carrier_ok(bp->dev) ? 1 : 0;
931 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
935 + case ETHTOOL_GRINGPARAM: {
936 + struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
938 + ering.rx_max_pending = B44_RX_RING_SIZE - 1;
939 + ering.rx_pending = bp->rx_pending;
941 + /* XXX ethtool lacks a tx_max_pending, oops... */
943 + if (copy_to_user(useraddr, &ering, sizeof(ering)))
947 + case ETHTOOL_SRINGPARAM: {
948 + struct ethtool_ringparam ering;
950 + if (copy_from_user(&ering, useraddr, sizeof(ering)))
953 + if ((ering.rx_pending > B44_RX_RING_SIZE - 1) ||
954 + (ering.rx_mini_pending != 0) ||
955 + (ering.rx_jumbo_pending != 0) ||
956 + (ering.tx_pending > B44_TX_RING_SIZE - 1))
959 + spin_lock_irq(&bp->lock);
961 + bp->rx_pending = ering.rx_pending;
962 + bp->tx_pending = ering.tx_pending;
965 + b44_init_rings(bp);
966 + b44_init_hw(bp, 1);
967 + netif_wake_queue(bp->dev);
968 + spin_unlock_irq(&bp->lock);
970 + b44_enable_ints(bp);
974 + case ETHTOOL_GPAUSEPARAM: {
975 + struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
978 + (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
980 + (bp->flags & B44_FLAG_RX_PAUSE) != 0;
982 + (bp->flags & B44_FLAG_TX_PAUSE) != 0;
983 + if (copy_to_user(useraddr, &epause, sizeof(epause)))
987 + case ETHTOOL_SPAUSEPARAM: {
988 + struct ethtool_pauseparam epause;
990 + if (copy_from_user(&epause, useraddr, sizeof(epause)))
993 + spin_lock_irq(&bp->lock);
994 + if (epause.autoneg)
995 + bp->flags |= B44_FLAG_PAUSE_AUTO;
997 + bp->flags &= ~B44_FLAG_PAUSE_AUTO;
998 + if (epause.rx_pause)
999 + bp->flags |= B44_FLAG_RX_PAUSE;
1001 + bp->flags &= ~B44_FLAG_RX_PAUSE;
1002 + if (epause.tx_pause)
1003 + bp->flags |= B44_FLAG_TX_PAUSE;
1005 + bp->flags &= ~B44_FLAG_TX_PAUSE;
1006 + if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1008 + b44_init_rings(bp);
1009 + b44_init_hw(bp, 1);
1011 + __b44_set_flow_ctrl(bp, bp->flags);
1013 + spin_unlock_irq(&bp->lock);
1015 + b44_enable_ints(bp);
1021 + return -EOPNOTSUPP;
1024 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1026 struct mii_ioctl_data *data = if_mii(ifr);
1027 @@ -2044,40 +2217,64 @@
1028 if (!netif_running(dev))
1031 - spin_lock_irq(&bp->lock);
1032 - err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1033 - spin_unlock_irq(&bp->lock);
1039 + return b44_ethtool_ioctl(dev, (void __user*) ifr->ifr_data);
1041 -/* Read 128-bytes of EEPROM. */
1042 -static int b44_read_eeprom(struct b44 *bp, u8 *data)
1045 - __le16 *ptr = (__le16 *) data;
1047 + data->phy_id = bp->phy_addr;
1049 - for (i = 0; i < 128; i += 2)
1050 - ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
1052 + case SIOCGMIIREG: {
1054 + spin_lock_irq(&bp->lock);
1055 + err = __b44_readphy(bp, data->phy_id & 0x1f, data->reg_num & 0x1f, &mii_regval);
1056 + spin_unlock_irq(&bp->lock);
1059 + data->val_out = mii_regval;
1065 + if (!capable(CAP_NET_ADMIN))
1068 + spin_lock_irq(&bp->lock);
1069 + err = __b44_writephy(bp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1070 + spin_unlock_irq(&bp->lock);
1077 + return -EOPNOTSUPP;
1083 static int __devinit b44_get_invariants(struct b44 *bp)
1087 + struct ssb_device *sdev = bp->sdev;
1091 - err = b44_read_eeprom(bp, &eeprom[0]);
1094 + bp->dma_offset = ssb_dma_translation(sdev);
1096 - bp->dev->dev_addr[0] = eeprom[79];
1097 - bp->dev->dev_addr[1] = eeprom[78];
1098 - bp->dev->dev_addr[2] = eeprom[81];
1099 - bp->dev->dev_addr[3] = eeprom[80];
1100 - bp->dev->dev_addr[4] = eeprom[83];
1101 - bp->dev->dev_addr[5] = eeprom[82];
1102 + switch (instance) {
1104 + addr = sdev->bus->sprom.et0mac;
1105 + bp->phy_addr = sdev->bus->sprom.et0phyaddr;
1108 + addr = sdev->bus->sprom.et1mac;
1109 + bp->phy_addr = sdev->bus->sprom.et1phyaddr;
1113 + memcpy(bp->dev->dev_addr, addr, 6);
1115 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1116 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1117 @@ -2086,103 +2283,52 @@
1119 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1121 - bp->phy_addr = eeprom[90] & 0x1f;
1123 bp->imask = IMASK_DEF;
1125 - bp->core_unit = ssb_core_unit(bp);
1126 - bp->dma_offset = SB_PCI_DMA;
1128 /* XXX - really required?
1129 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1132 - if (ssb_get_core_rev(bp) >= 7)
1133 + if (bp->sdev->id.revision >= 7)
1134 bp->flags |= B44_FLAG_B0_ANDLATER;
1140 -static int __devinit b44_init_one(struct pci_dev *pdev,
1141 - const struct pci_device_id *ent)
1142 +static int __devinit b44_init_one(struct ssb_device *sdev,
1143 + const struct ssb_device_id *ent)
1145 static int b44_version_printed = 0;
1146 - unsigned long b44reg_base, b44reg_len;
1147 struct net_device *dev;
1153 if (b44_version_printed++ == 0)
1154 printk(KERN_INFO "%s", version);
1156 - err = pci_enable_device(pdev);
1158 - dev_err(&pdev->dev, "Cannot enable PCI device, "
1163 - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1164 - dev_err(&pdev->dev,
1165 - "Cannot find proper PCI device "
1166 - "base address, aborting.\n");
1168 - goto err_out_disable_pdev;
1171 - err = pci_request_regions(pdev, DRV_MODULE_NAME);
1173 - dev_err(&pdev->dev,
1174 - "Cannot obtain PCI resources, aborting.\n");
1175 - goto err_out_disable_pdev;
1178 - pci_set_master(pdev);
1180 - err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1182 - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1183 - goto err_out_free_res;
1186 - err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1188 - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1189 - goto err_out_free_res;
1192 - b44reg_base = pci_resource_start(pdev, 0);
1193 - b44reg_len = pci_resource_len(pdev, 0);
1195 dev = alloc_etherdev(sizeof(*bp));
1197 - dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
1198 + dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
1200 - goto err_out_free_res;
1204 SET_MODULE_OWNER(dev);
1205 - SET_NETDEV_DEV(dev,&pdev->dev);
1206 + SET_NETDEV_DEV(dev,sdev->dev);
1208 /* No interesting netdevice features in this card... */
1211 bp = netdev_priv(dev);
1216 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1218 spin_lock_init(&bp->lock);
1220 - bp->regs = ioremap(b44reg_base, b44reg_len);
1221 - if (bp->regs == 0UL) {
1222 - dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
1224 - goto err_out_free_dev;
1227 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1228 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1230 @@ -2201,16 +2347,16 @@
1231 dev->poll_controller = b44_poll_controller;
1233 dev->change_mtu = b44_change_mtu;
1234 - dev->irq = pdev->irq;
1235 + dev->irq = sdev->irq;
1236 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
1238 netif_carrier_off(dev);
1240 err = b44_get_invariants(bp);
1242 - dev_err(&pdev->dev,
1243 + dev_err(sdev->dev,
1244 "Problem fetching invariants of chip, aborting.\n");
1245 - goto err_out_iounmap;
1246 + goto err_out_free_dev;
1249 bp->mii_if.dev = dev;
1250 @@ -2229,61 +2375,52 @@
1252 err = register_netdev(dev);
1254 - dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1255 - goto err_out_iounmap;
1256 + dev_err(sdev->dev, "Cannot register net device, aborting.\n");
1260 - pci_set_drvdata(pdev, dev);
1262 - pci_save_state(bp->pdev);
1263 + ssb_set_drvdata(sdev, dev);
1265 /* Chip reset provides power to the b44 MAC & PCI cores, which
1266 * is necessary for MAC register access.
1270 - printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
1271 + printk(KERN_INFO "%s: Broadcom 10/100BaseT Ethernet ", dev->name);
1272 for (i = 0; i < 6; i++)
1273 printk("%2.2x%c", dev->dev_addr[i],
1274 i == 5 ? '\n' : ':');
1277 + /* Initialize phy */
1278 + spin_lock_irq(&bp->lock);
1279 + b44_chip_reset(bp);
1280 + spin_unlock_irq(&bp->lock);
1283 - iounmap(bp->regs);
1290 - pci_release_regions(pdev);
1292 -err_out_disable_pdev:
1293 - pci_disable_device(pdev);
1294 - pci_set_drvdata(pdev, NULL);
1299 -static void __devexit b44_remove_one(struct pci_dev *pdev)
1300 +static void __devexit b44_remove_one(struct ssb_device *pdev)
1302 - struct net_device *dev = pci_get_drvdata(pdev);
1303 - struct b44 *bp = netdev_priv(dev);
1304 + struct net_device *dev = ssb_get_drvdata(pdev);
1306 unregister_netdev(dev);
1307 - iounmap(bp->regs);
1309 - pci_release_regions(pdev);
1310 - pci_disable_device(pdev);
1311 - pci_set_drvdata(pdev, NULL);
1312 + ssb_set_drvdata(pdev, NULL);
1315 -static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
1316 +static int b44_suspend(struct ssb_device *pdev, pm_message_t state)
1318 - struct net_device *dev = pci_get_drvdata(pdev);
1319 + struct net_device *dev = ssb_get_drvdata(pdev);
1320 struct b44 *bp = netdev_priv(dev);
1322 if (!netif_running(dev))
1326 del_timer_sync(&bp->timer);
1328 @@ -2301,33 +2438,22 @@
1329 b44_init_hw(bp, B44_PARTIAL_RESET);
1332 - pci_disable_device(pdev);
1337 -static int b44_resume(struct pci_dev *pdev)
1338 +static int b44_resume(struct ssb_device *pdev)
1340 - struct net_device *dev = pci_get_drvdata(pdev);
1341 + struct net_device *dev = ssb_get_drvdata(pdev);
1342 struct b44 *bp = netdev_priv(dev);
1345 - pci_restore_state(pdev);
1346 - rc = pci_enable_device(pdev);
1348 - printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
1353 - pci_set_master(pdev);
1355 if (!netif_running(dev))
1358 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1360 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
1361 - pci_disable_device(pdev);
1365 @@ -2346,29 +2472,31 @@
1369 -static struct pci_driver b44_driver = {
1370 +static struct ssb_driver b44_driver = {
1371 .name = DRV_MODULE_NAME,
1372 - .id_table = b44_pci_tbl,
1373 + .id_table = b44_ssb_tbl,
1374 .probe = b44_init_one,
1375 .remove = __devexit_p(b44_remove_one),
1376 - .suspend = b44_suspend,
1377 - .resume = b44_resume,
1378 + .suspend = b44_suspend,
1379 + .resume = b44_resume,
1382 static int __init b44_init(void)
1384 unsigned int dma_desc_align_size = dma_get_cache_alignment();
1388 /* Setup paramaters for syncing RX/TX DMA descriptors */
1389 dma_desc_align_mask = ~(dma_desc_align_size - 1);
1390 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
1392 - return pci_register_driver(&b44_driver);
1393 + return ssb_driver_register(&b44_driver);
1396 static void __exit b44_cleanup(void)
1398 - pci_unregister_driver(&b44_driver);
1399 + ssb_driver_unregister(&b44_driver);
1402 module_init(b44_init);
1403 --- a/drivers/net/b44.h
1404 +++ b/drivers/net/b44.h
1406 #define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
1407 #define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
1408 #define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
1409 +#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */
1410 #define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
1411 #define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
1412 #define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
1413 @@ -227,75 +228,9 @@
1414 #define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
1415 #define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
1417 -/* Silicon backplane register definitions */
1418 -#define B44_SBIMSTATE 0x0F90UL /* SB Initiator Agent State */
1419 -#define SBIMSTATE_PC 0x0000000f /* Pipe Count */
1420 -#define SBIMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */
1421 -#define SBIMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */
1422 -#define SBIMSTATE_AP_TS 0x00000010 /* Use timeslices only */
1423 -#define SBIMSTATE_AP_TK 0x00000020 /* Use token only */
1424 -#define SBIMSTATE_AP_RSV 0x00000030 /* Reserved */
1425 -#define SBIMSTATE_IBE 0x00020000 /* In Band Error */
1426 -#define SBIMSTATE_TO 0x00040000 /* Timeout */
1427 -#define B44_SBINTVEC 0x0F94UL /* SB Interrupt Mask */
1428 -#define SBINTVEC_PCI 0x00000001 /* Enable interrupts for PCI */
1429 -#define SBINTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */
1430 -#define SBINTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */
1431 -#define SBINTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */
1432 -#define SBINTVEC_USB 0x00000010 /* Enable interrupts for usb */
1433 -#define SBINTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */
1434 -#define SBINTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */
1435 -#define B44_SBTMSLOW 0x0F98UL /* SB Target State Low */
1436 -#define SBTMSLOW_RESET 0x00000001 /* Reset */
1437 -#define SBTMSLOW_REJECT 0x00000002 /* Reject */
1438 -#define SBTMSLOW_CLOCK 0x00010000 /* Clock Enable */
1439 -#define SBTMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
1440 -#define SBTMSLOW_PE 0x40000000 /* Power Management Enable */
1441 -#define SBTMSLOW_BE 0x80000000 /* BIST Enable */
1442 -#define B44_SBTMSHIGH 0x0F9CUL /* SB Target State High */
1443 -#define SBTMSHIGH_SERR 0x00000001 /* S-error */
1444 -#define SBTMSHIGH_INT 0x00000002 /* Interrupt */
1445 -#define SBTMSHIGH_BUSY 0x00000004 /* Busy */
1446 -#define SBTMSHIGH_GCR 0x20000000 /* Gated Clock Request */
1447 -#define SBTMSHIGH_BISTF 0x40000000 /* BIST Failed */
1448 -#define SBTMSHIGH_BISTD 0x80000000 /* BIST Done */
1449 -#define B44_SBIDHIGH 0x0FFCUL /* SB Identification High */
1450 -#define SBIDHIGH_RC_MASK 0x0000000f /* Revision Code */
1451 -#define SBIDHIGH_CC_MASK 0x0000fff0 /* Core Code */
1452 -#define SBIDHIGH_CC_SHIFT 4
1453 -#define SBIDHIGH_VC_MASK 0xffff0000 /* Vendor Code */
1454 -#define SBIDHIGH_VC_SHIFT 16
1456 -/* SSB PCI config space registers. */
1457 -#define SSB_PMCSR 0x44
1458 -#define SSB_PE 0x100
1459 -#define SSB_BAR0_WIN 0x80
1460 -#define SSB_BAR1_WIN 0x84
1461 -#define SSB_SPROM_CONTROL 0x88
1462 -#define SSB_BAR1_CONTROL 0x8c
1464 -/* SSB core and host control registers. */
1465 -#define SSB_CONTROL 0x0000UL
1466 -#define SSB_ARBCONTROL 0x0010UL
1467 -#define SSB_ISTAT 0x0020UL
1468 -#define SSB_IMASK 0x0024UL
1469 -#define SSB_MBOX 0x0028UL
1470 -#define SSB_BCAST_ADDR 0x0050UL
1471 -#define SSB_BCAST_DATA 0x0054UL
1472 -#define SSB_PCI_TRANS_0 0x0100UL
1473 -#define SSB_PCI_TRANS_1 0x0104UL
1474 -#define SSB_PCI_TRANS_2 0x0108UL
1475 -#define SSB_SPROM 0x0800UL
1477 -#define SSB_PCI_MEM 0x00000000
1478 -#define SSB_PCI_IO 0x00000001
1479 -#define SSB_PCI_CFG0 0x00000002
1480 -#define SSB_PCI_CFG1 0x00000003
1481 -#define SSB_PCI_PREF 0x00000004
1482 -#define SSB_PCI_BURST 0x00000008
1483 -#define SSB_PCI_MASK0 0xfc000000
1484 -#define SSB_PCI_MASK1 0xfc000000
1485 -#define SSB_PCI_MASK2 0xc0000000
1486 +#define br32(bp, REG) ssb_read32((bp)->sdev, (REG))
1487 +#define bw32(bp, REG, VAL) ssb_write32((bp)->sdev, (REG), (VAL))
1488 +#define atoi(str) simple_strtoul(((str != NULL) ? str : ""), NULL, 0)
1490 /* 4400 PHY registers */
1491 #define B44_MII_AUXCTRL 24 /* Auxiliary Control */
1492 @@ -346,10 +281,12 @@
1495 struct sk_buff *skb;
1496 - DECLARE_PCI_UNMAP_ADDR(mapping);
1497 + dma_addr_t mapping;
1500 #define B44_MCAST_TABLE_SIZE 32
1501 +#define B44_PHY_ADDR_NO_PHY 30
1502 +#define B44_MDC_RATIO 5000000
1504 #define B44_STAT_REG_DECLARE \
1505 _B44(tx_good_octets) \
1506 @@ -425,9 +362,10 @@
1510 -#define B44_FLAG_B0_ANDLATER 0x00000001
1511 +#define B44_FLAG_INIT_COMPLETE 0x00000001
1512 #define B44_FLAG_BUGGY_TXPTR 0x00000002
1513 #define B44_FLAG_REORDER_BUG 0x00000004
1514 +#define B44_FLAG_B0_ANDLATER 0x00000008
1515 #define B44_FLAG_PAUSE_AUTO 0x00008000
1516 #define B44_FLAG_FULL_DUPLEX 0x00010000
1517 #define B44_FLAG_100_BASE_T 0x00020000
1519 struct net_device_stats stats;
1520 struct b44_hw_stats hw_stats;
1522 - void __iomem *regs;
1523 - struct pci_dev *pdev;
1524 + struct ssb_device *sdev;
1525 struct net_device *dev;
1527 dma_addr_t rx_ring_dma, tx_ring_dma;
1528 --- a/drivers/net/Kconfig
1529 +++ b/drivers/net/Kconfig
1530 @@ -1577,7 +1577,7 @@
1533 tristate "Broadcom 4400 ethernet support"
1534 - depends on NET_PCI && PCI
1535 + depends on SSB && EXPERIMENTAL
1538 If you have a network (Ethernet) controller of this type, say Y and