we don't need bluetooth on avr32 for now
[openwrt.git] / target / linux / brcm47xx-2.6 / patches / 120-b44_ssb_support.patch
1 diff -urN linux.old/drivers/net/b44.c linux.dev/drivers/net/b44.c
2 --- linux.old/drivers/net/b44.c 2006-12-11 20:32:53.000000000 +0100
3 +++ linux.dev/drivers/net/b44.c 2007-01-03 02:26:02.000000000 +0100
4 @@ -1,7 +1,9 @@
5 -/* b44.c: Broadcom 4400 device driver.
6 +/* b44.c: Broadcom 4400/47xx device driver.
7 *
8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
9 - * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
10 + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
11 + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
12 + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
13 * Copyright (C) 2006 Broadcom Corporation.
14 *
15 * Distribute under GPL.
16 @@ -20,11 +22,13 @@
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/dma-mapping.h>
20 +#include <linux/ssb/ssb.h>
21
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24 #include <asm/irq.h>
25
26 +
27 #include "b44.h"
28
29 #define DRV_MODULE_NAME "b44"
30 @@ -86,8 +90,8 @@
31 static char version[] __devinitdata =
32 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
33
34 -MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
35 -MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
36 +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
37 +MODULE_DESCRIPTION("Broadcom 4400/47xx 10/100 PCI ethernet driver");
38 MODULE_LICENSE("GPL");
39 MODULE_VERSION(DRV_MODULE_VERSION);
40
41 @@ -95,18 +99,11 @@
42 module_param(b44_debug, int, 0);
43 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
44
45 -static struct pci_device_id b44_pci_tbl[] = {
46 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
47 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
48 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
49 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
50 - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
51 - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
52 - { } /* terminate list with empty entry */
53 +static struct ssb_device_id b44_ssb_tbl[] = {
54 + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
55 + SSB_DEVTABLE_END
56 };
57
58 -MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
59 -
60 static void b44_halt(struct b44 *);
61 static void b44_init_rings(struct b44 *);
62
63 @@ -118,6 +115,7 @@
64
65 static int dma_desc_align_mask;
66 static int dma_desc_sync_size;
67 +static int instance;
68
69 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
70 #define _B44(x...) # x,
71 @@ -125,35 +123,24 @@
72 #undef _B44
73 };
74
75 -static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
76 - dma_addr_t dma_base,
77 - unsigned long offset,
78 - enum dma_data_direction dir)
79 -{
80 - dma_sync_single_range_for_device(&pdev->dev, dma_base,
81 - offset & dma_desc_align_mask,
82 - dma_desc_sync_size, dir);
83 -}
84 -
85 -static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
86 - dma_addr_t dma_base,
87 - unsigned long offset,
88 - enum dma_data_direction dir)
89 -{
90 - dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
91 - offset & dma_desc_align_mask,
92 - dma_desc_sync_size, dir);
93 -}
94 -
95 -static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
96 -{
97 - return readl(bp->regs + reg);
98 -}
99 -
100 -static inline void bw32(const struct b44 *bp,
101 - unsigned long reg, unsigned long val)
102 -{
103 - writel(val, bp->regs + reg);
104 +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
105 + dma_addr_t dma_base,
106 + unsigned long offset,
107 + enum dma_data_direction dir)
108 +{
109 + dma_sync_single_range_for_device(&sdev->dev, dma_base,
110 + offset & dma_desc_align_mask,
111 + dma_desc_sync_size, dir);
112 +}
113 +
114 +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
115 + dma_addr_t dma_base,
116 + unsigned long offset,
117 + enum dma_data_direction dir)
118 +{
119 + dma_sync_single_range_for_cpu(&sdev->dev, dma_base,
120 + offset & dma_desc_align_mask,
121 + dma_desc_sync_size, dir);
122 }
123
124 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
125 @@ -181,117 +168,29 @@
126 return 0;
127 }
128
129 -/* Sonics SiliconBackplane support routines. ROFL, you should see all the
130 - * buzz words used on this company's website :-)
131 - *
132 - * All of these routines must be invoked with bp->lock held and
133 - * interrupts disabled.
134 - */
135 -
136 -#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
137 -#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
138 -
139 -static u32 ssb_get_core_rev(struct b44 *bp)
140 -{
141 - return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
142 -}
143 -
144 -static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
145 -{
146 - u32 bar_orig, pci_rev, val;
147 -
148 - pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
149 - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
150 - pci_rev = ssb_get_core_rev(bp);
151 -
152 - val = br32(bp, B44_SBINTVEC);
153 - val |= cores;
154 - bw32(bp, B44_SBINTVEC, val);
155 -
156 - val = br32(bp, SSB_PCI_TRANS_2);
157 - val |= SSB_PCI_PREF | SSB_PCI_BURST;
158 - bw32(bp, SSB_PCI_TRANS_2, val);
159 -
160 - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
161 -
162 - return pci_rev;
163 -}
164 -
165 -static void ssb_core_disable(struct b44 *bp)
166 -{
167 - if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
168 - return;
169 -
170 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
171 - b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
172 - b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
173 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
174 - SBTMSLOW_REJECT | SBTMSLOW_RESET));
175 - br32(bp, B44_SBTMSLOW);
176 - udelay(1);
177 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
178 - br32(bp, B44_SBTMSLOW);
179 - udelay(1);
180 -}
181 -
182 -static void ssb_core_reset(struct b44 *bp)
183 +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
184 {
185 u32 val;
186
187 - ssb_core_disable(bp);
188 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
189 - br32(bp, B44_SBTMSLOW);
190 - udelay(1);
191 -
192 - /* Clear SERR if set, this is a hw bug workaround. */
193 - if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
194 - bw32(bp, B44_SBTMSHIGH, 0);
195 -
196 - val = br32(bp, B44_SBIMSTATE);
197 - if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
198 - bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
199 -
200 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
201 - br32(bp, B44_SBTMSLOW);
202 - udelay(1);
203 -
204 - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
205 - br32(bp, B44_SBTMSLOW);
206 - udelay(1);
207 -}
208 + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
209 + (index << CAM_CTRL_INDEX_SHIFT)));
210
211 -static int ssb_core_unit(struct b44 *bp)
212 -{
213 -#if 0
214 - u32 val = br32(bp, B44_SBADMATCH0);
215 - u32 base;
216 + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
217
218 - type = val & SBADMATCH0_TYPE_MASK;
219 - switch (type) {
220 - case 0:
221 - base = val & SBADMATCH0_BS0_MASK;
222 - break;
223 + val = br32(bp, B44_CAM_DATA_LO);
224
225 - case 1:
226 - base = val & SBADMATCH0_BS1_MASK;
227 - break;
228 + data[2] = (val >> 24) & 0xFF;
229 + data[3] = (val >> 16) & 0xFF;
230 + data[4] = (val >> 8) & 0xFF;
231 + data[5] = (val >> 0) & 0xFF;
232
233 - case 2:
234 - default:
235 - base = val & SBADMATCH0_BS2_MASK;
236 - break;
237 - };
238 -#endif
239 - return 0;
240 -}
241 + val = br32(bp, B44_CAM_DATA_HI);
242
243 -static int ssb_is_core_up(struct b44 *bp)
244 -{
245 - return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
246 - == SBTMSLOW_CLOCK);
247 + data[0] = (val >> 8) & 0xFF;
248 + data[1] = (val >> 0) & 0xFF;
249 }
250
251 -static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
252 +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
253 {
254 u32 val;
255
256 @@ -327,14 +226,14 @@
257 bw32(bp, B44_IMASK, bp->imask);
258 }
259
260 -static int b44_readphy(struct b44 *bp, int reg, u32 *val)
261 +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
262 {
263 int err;
264
265 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
266 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
267 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
268 - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
269 + (phy_addr << MDIO_DATA_PMD_SHIFT) |
270 (reg << MDIO_DATA_RA_SHIFT) |
271 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
272 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
273 @@ -343,18 +242,34 @@
274 return err;
275 }
276
277 -static int b44_writephy(struct b44 *bp, int reg, u32 val)
278 +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
279 {
280 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
281 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
282 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
283 - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
284 + (phy_addr << MDIO_DATA_PMD_SHIFT) |
285 (reg << MDIO_DATA_RA_SHIFT) |
286 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
287 (val & MDIO_DATA_DATA)));
288 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
289 }
290
291 +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
292 +{
293 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
294 + return 0;
295 +
296 + return __b44_readphy(bp, bp->phy_addr, reg, val);
297 +}
298 +
299 +static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
300 +{
301 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
302 + return 0;
303 +
304 + return __b44_writephy(bp, bp->phy_addr, reg, val);
305 +}
306 +
307 /* miilib interface */
308 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
309 * due to code existing before miilib use was added to this driver.
310 @@ -383,6 +298,8 @@
311 u32 val;
312 int err;
313
314 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
315 + return 0;
316 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
317 if (err)
318 return err;
319 @@ -441,11 +358,27 @@
320 __b44_set_flow_ctrl(bp, pause_enab);
321 }
322
323 +
324 +extern char *nvram_get(char *name); //FIXME: move elsewhere
325 static int b44_setup_phy(struct b44 *bp)
326 {
327 u32 val;
328 int err;
329
330 + /*
331 + * workaround for bad hardware design in Linksys WAP54G v1.0
332 + * see https://dev.openwrt.org/ticket/146
333 + * check and reset bit "isolate"
334 + */
335 + if ((atoi(nvram_get("boardnum")) == 2) &&
336 + (__b44_readphy(bp, 0, MII_BMCR, &val) == 0) &&
337 + (val & BMCR_ISOLATE) &&
338 + (__b44_writephy(bp, 0, MII_BMCR, val & ~BMCR_ISOLATE) != 0)) {
339 + printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
340 + }
341 +
342 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
343 + return 0;
344 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
345 goto out;
346 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
347 @@ -541,6 +474,19 @@
348 {
349 u32 bmsr, aux;
350
351 + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
352 + bp->flags |= B44_FLAG_100_BASE_T;
353 + bp->flags |= B44_FLAG_FULL_DUPLEX;
354 + if (!netif_carrier_ok(bp->dev)) {
355 + u32 val = br32(bp, B44_TX_CTRL);
356 + val |= TX_CTRL_DUPLEX;
357 + bw32(bp, B44_TX_CTRL, val);
358 + netif_carrier_on(bp->dev);
359 + b44_link_report(bp);
360 + }
361 + return;
362 + }
363 +
364 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
365 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
366 (bmsr != 0xffff)) {
367 @@ -617,10 +563,10 @@
368
369 BUG_ON(skb == NULL);
370
371 - pci_unmap_single(bp->pdev,
372 + dma_unmap_single(&bp->sdev->dev,
373 pci_unmap_addr(rp, mapping),
374 skb->len,
375 - PCI_DMA_TODEVICE);
376 + DMA_TO_DEVICE);
377 rp->skb = NULL;
378 dev_kfree_skb_irq(skb);
379 }
380 @@ -656,10 +602,10 @@
381 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
382 if (skb == NULL)
383 return -ENOMEM;
384 -
385 - mapping = pci_map_single(bp->pdev, skb->data,
386 +
387 + mapping = dma_map_single(&bp->sdev->dev, skb->data,
388 RX_PKT_BUF_SZ,
389 - PCI_DMA_FROMDEVICE);
390 + DMA_FROM_DEVICE);
391
392 /* Hardware bug work-around, the chip is unable to do PCI DMA
393 to/from anything above 1GB :-( */
394 @@ -667,18 +613,18 @@
395 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
396 /* Sigh... */
397 if (!dma_mapping_error(mapping))
398 - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
399 + dma_unmap_single(&bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
400 dev_kfree_skb_any(skb);
401 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
402 if (skb == NULL)
403 return -ENOMEM;
404 - mapping = pci_map_single(bp->pdev, skb->data,
405 + mapping = dma_map_single(&bp->sdev->dev, skb->data,
406 RX_PKT_BUF_SZ,
407 - PCI_DMA_FROMDEVICE);
408 + DMA_FROM_DEVICE);
409 if (dma_mapping_error(mapping) ||
410 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
411 if (!dma_mapping_error(mapping))
412 - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
413 + dma_unmap_single(&bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
414 dev_kfree_skb_any(skb);
415 return -ENOMEM;
416 }
417 @@ -707,9 +653,9 @@
418 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
419
420 if (bp->flags & B44_FLAG_RX_RING_HACK)
421 - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
422 - dest_idx * sizeof(dp),
423 - DMA_BIDIRECTIONAL);
424 + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
425 + dest_idx * sizeof(dp),
426 + DMA_BIDIRECTIONAL);
427
428 return RX_PKT_BUF_SZ;
429 }
430 @@ -736,9 +682,9 @@
431 pci_unmap_addr(src_map, mapping));
432
433 if (bp->flags & B44_FLAG_RX_RING_HACK)
434 - b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
435 - src_idx * sizeof(src_desc),
436 - DMA_BIDIRECTIONAL);
437 + b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
438 + src_idx * sizeof(src_desc),
439 + DMA_BIDIRECTIONAL);
440
441 ctrl = src_desc->ctrl;
442 if (dest_idx == (B44_RX_RING_SIZE - 1))
443 @@ -752,13 +698,13 @@
444 src_map->skb = NULL;
445
446 if (bp->flags & B44_FLAG_RX_RING_HACK)
447 - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
448 - dest_idx * sizeof(dest_desc),
449 - DMA_BIDIRECTIONAL);
450 + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
451 + dest_idx * sizeof(dest_desc),
452 + DMA_BIDIRECTIONAL);
453
454 - pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
455 + dma_sync_single_for_device(&bp->sdev->dev, le32_to_cpu(src_desc->addr),
456 RX_PKT_BUF_SZ,
457 - PCI_DMA_FROMDEVICE);
458 + DMA_FROM_DEVICE);
459 }
460
461 static int b44_rx(struct b44 *bp, int budget)
462 @@ -778,9 +724,9 @@
463 struct rx_header *rh;
464 u16 len;
465
466 - pci_dma_sync_single_for_cpu(bp->pdev, map,
467 + dma_sync_single_for_cpu(&bp->sdev->dev, map,
468 RX_PKT_BUF_SZ,
469 - PCI_DMA_FROMDEVICE);
470 + DMA_FROM_DEVICE);
471 rh = (struct rx_header *) skb->data;
472 len = le16_to_cpu(rh->len);
473 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
474 @@ -812,11 +758,11 @@
475 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
476 if (skb_size < 0)
477 goto drop_it;
478 - pci_unmap_single(bp->pdev, map,
479 - skb_size, PCI_DMA_FROMDEVICE);
480 + dma_unmap_single(&bp->sdev->dev, map,
481 + skb_size, DMA_FROM_DEVICE);
482 /* Leave out rx_header */
483 - skb_put(skb, len+bp->rx_offset);
484 - skb_pull(skb,bp->rx_offset);
485 + skb_put(skb, len+bp->rx_offset);
486 + skb_pull(skb,bp->rx_offset);
487 } else {
488 struct sk_buff *copy_skb;
489
490 @@ -986,23 +932,23 @@
491 goto err_out;
492 }
493
494 - mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
495 + mapping = dma_map_single(&bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
496 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
497 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
498 if (!dma_mapping_error(mapping))
499 - pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
500 + dma_unmap_single(&bp->sdev->dev, mapping, len, DMA_TO_DEVICE);
501
502 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
503 GFP_ATOMIC|GFP_DMA);
504 if (!bounce_skb)
505 goto err_out;
506
507 - mapping = pci_map_single(bp->pdev, bounce_skb->data,
508 - len, PCI_DMA_TODEVICE);
509 + mapping = dma_map_single(&bp->sdev->dev, bounce_skb->data,
510 + len, DMA_TO_DEVICE);
511 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
512 if (!dma_mapping_error(mapping))
513 - pci_unmap_single(bp->pdev, mapping,
514 - len, PCI_DMA_TODEVICE);
515 + dma_unmap_single(&bp->sdev->dev, mapping,
516 + len, DMA_TO_DEVICE);
517 dev_kfree_skb_any(bounce_skb);
518 goto err_out;
519 }
520 @@ -1025,9 +971,9 @@
521 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
522
523 if (bp->flags & B44_FLAG_TX_RING_HACK)
524 - b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
525 - entry * sizeof(bp->tx_ring[0]),
526 - DMA_TO_DEVICE);
527 + b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
528 + entry * sizeof(bp->tx_ring[0]),
529 + DMA_TO_DEVICE);
530
531 entry = NEXT_TX(entry);
532
533 @@ -1100,10 +1046,10 @@
534
535 if (rp->skb == NULL)
536 continue;
537 - pci_unmap_single(bp->pdev,
538 + dma_unmap_single(&bp->sdev->dev,
539 pci_unmap_addr(rp, mapping),
540 RX_PKT_BUF_SZ,
541 - PCI_DMA_FROMDEVICE);
542 + DMA_FROM_DEVICE);
543 dev_kfree_skb_any(rp->skb);
544 rp->skb = NULL;
545 }
546 @@ -1114,10 +1060,10 @@
547
548 if (rp->skb == NULL)
549 continue;
550 - pci_unmap_single(bp->pdev,
551 + dma_unmap_single(&bp->sdev->dev,
552 pci_unmap_addr(rp, mapping),
553 rp->skb->len,
554 - PCI_DMA_TODEVICE);
555 + DMA_TO_DEVICE);
556 dev_kfree_skb_any(rp->skb);
557 rp->skb = NULL;
558 }
559 @@ -1139,14 +1085,14 @@
560 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
561
562 if (bp->flags & B44_FLAG_RX_RING_HACK)
563 - dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
564 - DMA_TABLE_BYTES,
565 - PCI_DMA_BIDIRECTIONAL);
566 + dma_sync_single_for_device(&bp->sdev->dev, bp->rx_ring_dma,
567 + DMA_TABLE_BYTES,
568 + DMA_BIDIRECTIONAL);
569
570 if (bp->flags & B44_FLAG_TX_RING_HACK)
571 - dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
572 - DMA_TABLE_BYTES,
573 - PCI_DMA_TODEVICE);
574 + dma_sync_single_for_device(&bp->sdev->dev, bp->tx_ring_dma,
575 + DMA_TABLE_BYTES,
576 + DMA_TO_DEVICE);
577
578 for (i = 0; i < bp->rx_pending; i++) {
579 if (b44_alloc_rx_skb(bp, -1, i) < 0)
580 @@ -1166,24 +1112,24 @@
581 bp->tx_buffers = NULL;
582 if (bp->rx_ring) {
583 if (bp->flags & B44_FLAG_RX_RING_HACK) {
584 - dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
585 - DMA_TABLE_BYTES,
586 - DMA_BIDIRECTIONAL);
587 + dma_unmap_single(&bp->sdev->dev, bp->rx_ring_dma,
588 + DMA_TABLE_BYTES,
589 + DMA_BIDIRECTIONAL);
590 kfree(bp->rx_ring);
591 } else
592 - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
593 + dma_free_coherent(&bp->sdev->dev, DMA_TABLE_BYTES,
594 bp->rx_ring, bp->rx_ring_dma);
595 bp->rx_ring = NULL;
596 bp->flags &= ~B44_FLAG_RX_RING_HACK;
597 }
598 if (bp->tx_ring) {
599 if (bp->flags & B44_FLAG_TX_RING_HACK) {
600 - dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
601 - DMA_TABLE_BYTES,
602 - DMA_TO_DEVICE);
603 + dma_unmap_single(&bp->sdev->dev, bp->tx_ring_dma,
604 + DMA_TABLE_BYTES,
605 + DMA_TO_DEVICE);
606 kfree(bp->tx_ring);
607 } else
608 - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
609 + dma_free_coherent(&bp->sdev->dev, DMA_TABLE_BYTES,
610 bp->tx_ring, bp->tx_ring_dma);
611 bp->tx_ring = NULL;
612 bp->flags &= ~B44_FLAG_TX_RING_HACK;
613 @@ -1209,7 +1155,7 @@
614 goto out_err;
615
616 size = DMA_TABLE_BYTES;
617 - bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
618 + bp->rx_ring = dma_alloc_coherent(&bp->sdev->dev, size, &bp->rx_ring_dma, GFP_ATOMIC);
619 if (!bp->rx_ring) {
620 /* Allocation may have failed due to pci_alloc_consistent
621 insisting on use of GFP_DMA, which is more restrictive
622 @@ -1221,9 +1167,9 @@
623 if (!rx_ring)
624 goto out_err;
625
626 - rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
627 - DMA_TABLE_BYTES,
628 - DMA_BIDIRECTIONAL);
629 + rx_ring_dma = dma_map_single(&bp->sdev->dev, rx_ring,
630 + DMA_TABLE_BYTES,
631 + DMA_BIDIRECTIONAL);
632
633 if (dma_mapping_error(rx_ring_dma) ||
634 rx_ring_dma + size > DMA_30BIT_MASK) {
635 @@ -1236,9 +1182,9 @@
636 bp->flags |= B44_FLAG_RX_RING_HACK;
637 }
638
639 - bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
640 + bp->tx_ring = dma_alloc_coherent(&bp->sdev->dev, size, &bp->tx_ring_dma, GFP_ATOMIC);
641 if (!bp->tx_ring) {
642 - /* Allocation may have failed due to pci_alloc_consistent
643 + /* Allocation may have failed due to dma_alloc_coherent
644 insisting on use of GFP_DMA, which is more restrictive
645 than necessary... */
646 struct dma_desc *tx_ring;
647 @@ -1248,9 +1194,9 @@
648 if (!tx_ring)
649 goto out_err;
650
651 - tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
652 - DMA_TABLE_BYTES,
653 - DMA_TO_DEVICE);
654 + tx_ring_dma = dma_map_single(&bp->sdev->dev, tx_ring,
655 + DMA_TABLE_BYTES,
656 + DMA_TO_DEVICE);
657
658 if (dma_mapping_error(tx_ring_dma) ||
659 tx_ring_dma + size > DMA_30BIT_MASK) {
660 @@ -1285,7 +1231,9 @@
661 /* bp->lock is held. */
662 static void b44_chip_reset(struct b44 *bp)
663 {
664 - if (ssb_is_core_up(bp)) {
665 + struct ssb_device *sdev = bp->sdev;
666 +
667 + if (ssb_device_is_enabled(bp->sdev)) {
668 bw32(bp, B44_RCV_LAZY, 0);
669 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
670 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
671 @@ -1297,19 +1245,23 @@
672 }
673 bw32(bp, B44_DMARX_CTRL, 0);
674 bp->rx_prod = bp->rx_cons = 0;
675 - } else {
676 - ssb_pci_setup(bp, (bp->core_unit == 0 ?
677 - SBINTVEC_ENET0 :
678 - SBINTVEC_ENET1));
679 }
680
681 - ssb_core_reset(bp);
682 -
683 + ssb_device_enable(bp->sdev, 0);
684 b44_clear_stats(bp);
685
686 - /* Make PHY accessible. */
687 - bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
688 + switch (sdev->bus->bustype) {
689 + case SSB_BUSTYPE_SSB:
690 + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
691 + (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO)
692 + & MDIO_CTRL_MAXF_MASK)));
693 + break;
694 + case SSB_BUSTYPE_PCI:
695 + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
696 (0x0d & MDIO_CTRL_MAXF_MASK)));
697 + break;
698 + }
699 +
700 br32(bp, B44_MDIO_CTRL);
701
702 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
703 @@ -1352,6 +1304,7 @@
704 {
705 struct b44 *bp = netdev_priv(dev);
706 struct sockaddr *addr = p;
707 + u32 val;
708
709 if (netif_running(dev))
710 return -EBUSY;
711 @@ -1362,7 +1315,11 @@
712 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
713
714 spin_lock_irq(&bp->lock);
715 - __b44_set_mac_addr(bp);
716 +
717 + val = br32(bp, B44_RXCONFIG);
718 + if (!(val & RXCONFIG_CAM_ABSENT))
719 + __b44_set_mac_addr(bp);
720 +
721 spin_unlock_irq(&bp->lock);
722
723 return 0;
724 @@ -1448,18 +1405,6 @@
725 return err;
726 }
727
728 -#if 0
729 -/*static*/ void b44_dump_state(struct b44 *bp)
730 -{
731 - u32 val32, val32_2, val32_3, val32_4, val32_5;
732 - u16 val16;
733 -
734 - pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
735 - printk("DEBUG: PCI status [%04x] \n", val16);
736 -
737 -}
738 -#endif
739 -
740 #ifdef CONFIG_NET_POLL_CONTROLLER
741 /*
742 * Polling receive - used by netconsole and other diagnostic tools
743 @@ -1574,7 +1519,6 @@
744 static void b44_setup_wol(struct b44 *bp)
745 {
746 u32 val;
747 - u16 pmval;
748
749 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
750
751 @@ -1598,13 +1542,6 @@
752 } else {
753 b44_setup_pseudo_magicp(bp);
754 }
755 -
756 - val = br32(bp, B44_SBTMSLOW);
757 - bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
758 -
759 - pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
760 - pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
761 -
762 }
763
764 static int b44_close(struct net_device *dev)
765 @@ -1704,7 +1641,7 @@
766
767 val = br32(bp, B44_RXCONFIG);
768 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
769 - if (dev->flags & IFF_PROMISC) {
770 + if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
771 val |= RXCONFIG_PROMISC;
772 bw32(bp, B44_RXCONFIG, val);
773 } else {
774 @@ -1751,12 +1688,8 @@
775
776 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
777 {
778 - struct b44 *bp = netdev_priv(dev);
779 - struct pci_dev *pci_dev = bp->pdev;
780 -
781 strcpy (info->driver, DRV_MODULE_NAME);
782 strcpy (info->version, DRV_MODULE_VERSION);
783 - strcpy (info->bus_info, pci_name(pci_dev));
784 }
785
786 static int b44_nway_reset(struct net_device *dev)
787 @@ -2040,6 +1973,245 @@
788 .get_perm_addr = ethtool_op_get_perm_addr,
789 };
790
791 +static int b44_ethtool_ioctl (struct net_device *dev, void __user *useraddr)
792 +{
793 + struct b44 *bp = dev->priv;
794 + u32 ethcmd;
795 +
796 + if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
797 + return -EFAULT;
798 +
799 + switch (ethcmd) {
800 + case ETHTOOL_GDRVINFO: {
801 + struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
802 + strcpy (info.driver, DRV_MODULE_NAME);
803 + strcpy (info.version, DRV_MODULE_VERSION);
804 + memset(&info.fw_version, 0, sizeof(info.fw_version));
805 + info.eedump_len = 0;
806 + info.regdump_len = 0;
807 + if (copy_to_user (useraddr, &info, sizeof (info)))
808 + return -EFAULT;
809 + return 0;
810 + }
811 +
812 + case ETHTOOL_GSET: {
813 + struct ethtool_cmd cmd = { ETHTOOL_GSET };
814 +
815 + if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
816 + return -EAGAIN;
817 + cmd.supported = (SUPPORTED_Autoneg);
818 + cmd.supported |= (SUPPORTED_100baseT_Half |
819 + SUPPORTED_100baseT_Full |
820 + SUPPORTED_10baseT_Half |
821 + SUPPORTED_10baseT_Full |
822 + SUPPORTED_MII);
823 +
824 + cmd.advertising = 0;
825 + if (bp->flags & B44_FLAG_ADV_10HALF)
826 + cmd.advertising |= ADVERTISE_10HALF;
827 + if (bp->flags & B44_FLAG_ADV_10FULL)
828 + cmd.advertising |= ADVERTISE_10FULL;
829 + if (bp->flags & B44_FLAG_ADV_100HALF)
830 + cmd.advertising |= ADVERTISE_100HALF;
831 + if (bp->flags & B44_FLAG_ADV_100FULL)
832 + cmd.advertising |= ADVERTISE_100FULL;
833 + cmd.advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
834 + cmd.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
835 + SPEED_100 : SPEED_10;
836 + cmd.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
837 + DUPLEX_FULL : DUPLEX_HALF;
838 + cmd.port = 0;
839 + cmd.phy_address = bp->phy_addr;
840 + cmd.transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
841 + XCVR_INTERNAL : XCVR_EXTERNAL;
842 + cmd.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
843 + AUTONEG_DISABLE : AUTONEG_ENABLE;
844 + cmd.maxtxpkt = 0;
845 + cmd.maxrxpkt = 0;
846 + if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
847 + return -EFAULT;
848 + return 0;
849 + }
850 + case ETHTOOL_SSET: {
851 + struct ethtool_cmd cmd;
852 +
853 + if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
854 + return -EAGAIN;
855 +
856 + if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
857 + return -EFAULT;
858 +
859 + /* We do not support gigabit. */
860 + if (cmd.autoneg == AUTONEG_ENABLE) {
861 + if (cmd.advertising &
862 + (ADVERTISED_1000baseT_Half |
863 + ADVERTISED_1000baseT_Full))
864 + return -EINVAL;
865 + } else if ((cmd.speed != SPEED_100 &&
866 + cmd.speed != SPEED_10) ||
867 + (cmd.duplex != DUPLEX_HALF &&
868 + cmd.duplex != DUPLEX_FULL)) {
869 + return -EINVAL;
870 + }
871 +
872 + spin_lock_irq(&bp->lock);
873 +
874 + if (cmd.autoneg == AUTONEG_ENABLE) {
875 + bp->flags &= ~B44_FLAG_FORCE_LINK;
876 + bp->flags &= ~(B44_FLAG_ADV_10HALF |
877 + B44_FLAG_ADV_10FULL |
878 + B44_FLAG_ADV_100HALF |
879 + B44_FLAG_ADV_100FULL);
880 + if (cmd.advertising & ADVERTISE_10HALF)
881 + bp->flags |= B44_FLAG_ADV_10HALF;
882 + if (cmd.advertising & ADVERTISE_10FULL)
883 + bp->flags |= B44_FLAG_ADV_10FULL;
884 + if (cmd.advertising & ADVERTISE_100HALF)
885 + bp->flags |= B44_FLAG_ADV_100HALF;
886 + if (cmd.advertising & ADVERTISE_100FULL)
887 + bp->flags |= B44_FLAG_ADV_100FULL;
888 + } else {
889 + bp->flags |= B44_FLAG_FORCE_LINK;
890 + if (cmd.speed == SPEED_100)
891 + bp->flags |= B44_FLAG_100_BASE_T;
892 + if (cmd.duplex == DUPLEX_FULL)
893 + bp->flags |= B44_FLAG_FULL_DUPLEX;
894 + }
895 +
896 + b44_setup_phy(bp);
897 +
898 + spin_unlock_irq(&bp->lock);
899 +
900 + return 0;
901 + }
902 +
903 + case ETHTOOL_GMSGLVL: {
904 + struct ethtool_value edata = { ETHTOOL_GMSGLVL };
905 + edata.data = bp->msg_enable;
906 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
907 + return -EFAULT;
908 + return 0;
909 + }
910 + case ETHTOOL_SMSGLVL: {
911 + struct ethtool_value edata;
912 + if (copy_from_user(&edata, useraddr, sizeof(edata)))
913 + return -EFAULT;
914 + bp->msg_enable = edata.data;
915 + return 0;
916 + }
917 + case ETHTOOL_NWAY_RST: {
918 + u32 bmcr;
919 + int r;
920 +
921 + spin_lock_irq(&bp->lock);
922 + b44_readphy(bp, MII_BMCR, &bmcr);
923 + b44_readphy(bp, MII_BMCR, &bmcr);
924 + r = -EINVAL;
925 + if (bmcr & BMCR_ANENABLE) {
926 + b44_writephy(bp, MII_BMCR,
927 + bmcr | BMCR_ANRESTART);
928 + r = 0;
929 + }
930 + spin_unlock_irq(&bp->lock);
931 +
932 + return r;
933 + }
934 + case ETHTOOL_GLINK: {
935 + struct ethtool_value edata = { ETHTOOL_GLINK };
936 + edata.data = netif_carrier_ok(bp->dev) ? 1 : 0;
937 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
938 + return -EFAULT;
939 + return 0;
940 + }
941 + case ETHTOOL_GRINGPARAM: {
942 + struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
943 +
944 + ering.rx_max_pending = B44_RX_RING_SIZE - 1;
945 + ering.rx_pending = bp->rx_pending;
946 +
947 + /* XXX ethtool lacks a tx_max_pending, oops... */
948 +
949 + if (copy_to_user(useraddr, &ering, sizeof(ering)))
950 + return -EFAULT;
951 + return 0;
952 + }
953 + case ETHTOOL_SRINGPARAM: {
954 + struct ethtool_ringparam ering;
955 +
956 + if (copy_from_user(&ering, useraddr, sizeof(ering)))
957 + return -EFAULT;
958 +
959 + if ((ering.rx_pending > B44_RX_RING_SIZE - 1) ||
960 + (ering.rx_mini_pending != 0) ||
961 + (ering.rx_jumbo_pending != 0) ||
962 + (ering.tx_pending > B44_TX_RING_SIZE - 1))
963 + return -EINVAL;
964 +
965 + spin_lock_irq(&bp->lock);
966 +
967 + bp->rx_pending = ering.rx_pending;
968 + bp->tx_pending = ering.tx_pending;
969 +
970 + b44_halt(bp);
971 + b44_init_rings(bp);
972 + b44_init_hw(bp, 1);
973 + netif_wake_queue(bp->dev);
974 + spin_unlock_irq(&bp->lock);
975 +
976 + b44_enable_ints(bp);
977 +
978 + return 0;
979 + }
980 + case ETHTOOL_GPAUSEPARAM: {
981 + struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
982 +
983 + epause.autoneg =
984 + (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
985 + epause.rx_pause =
986 + (bp->flags & B44_FLAG_RX_PAUSE) != 0;
987 + epause.tx_pause =
988 + (bp->flags & B44_FLAG_TX_PAUSE) != 0;
989 + if (copy_to_user(useraddr, &epause, sizeof(epause)))
990 + return -EFAULT;
991 + return 0;
992 + }
993 + case ETHTOOL_SPAUSEPARAM: {
994 + struct ethtool_pauseparam epause;
995 +
996 + if (copy_from_user(&epause, useraddr, sizeof(epause)))
997 + return -EFAULT;
998 +
999 + spin_lock_irq(&bp->lock);
1000 + if (epause.autoneg)
1001 + bp->flags |= B44_FLAG_PAUSE_AUTO;
1002 + else
1003 + bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1004 + if (epause.rx_pause)
1005 + bp->flags |= B44_FLAG_RX_PAUSE;
1006 + else
1007 + bp->flags &= ~B44_FLAG_RX_PAUSE;
1008 + if (epause.tx_pause)
1009 + bp->flags |= B44_FLAG_TX_PAUSE;
1010 + else
1011 + bp->flags &= ~B44_FLAG_TX_PAUSE;
1012 + if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1013 + b44_halt(bp);
1014 + b44_init_rings(bp);
1015 + b44_init_hw(bp, 1);
1016 + } else {
1017 + __b44_set_flow_ctrl(bp, bp->flags);
1018 + }
1019 + spin_unlock_irq(&bp->lock);
1020 +
1021 + b44_enable_ints(bp);
1022 +
1023 + return 0;
1024 + }
1025 + };
1026 +
1027 + return -EOPNOTSUPP;
1028 +}
1029 +
1030 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1031 {
1032 struct mii_ioctl_data *data = if_mii(ifr);
1033 @@ -2049,40 +2221,64 @@
1034 if (!netif_running(dev))
1035 goto out;
1036
1037 - spin_lock_irq(&bp->lock);
1038 - err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1039 - spin_unlock_irq(&bp->lock);
1040 -out:
1041 - return err;
1042 -}
1043 + switch (cmd) {
1044 + case SIOCETHTOOL:
1045 + return b44_ethtool_ioctl(dev, (void __user*) ifr->ifr_data);
1046
1047 -/* Read 128-bytes of EEPROM. */
1048 -static int b44_read_eeprom(struct b44 *bp, u8 *data)
1049 -{
1050 - long i;
1051 - __le16 *ptr = (__le16 *) data;
1052 + case SIOCGMIIPHY:
1053 + data->phy_id = bp->phy_addr;
1054
1055 - for (i = 0; i < 128; i += 2)
1056 - ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
1057 + /* fallthru */
1058 + case SIOCGMIIREG: {
1059 + u32 mii_regval;
1060 + spin_lock_irq(&bp->lock);
1061 + err = __b44_readphy(bp, data->phy_id & 0x1f, data->reg_num & 0x1f, &mii_regval);
1062 + spin_unlock_irq(&bp->lock);
1063
1064 - return 0;
1065 + data->val_out = mii_regval;
1066 +
1067 + return err;
1068 + }
1069 +
1070 + case SIOCSMIIREG:
1071 + if (!capable(CAP_NET_ADMIN))
1072 + return -EPERM;
1073 +
1074 + spin_lock_irq(&bp->lock);
1075 + err = __b44_writephy(bp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1076 + spin_unlock_irq(&bp->lock);
1077 +
1078 + return err;
1079 +
1080 + default:
1081 + break;
1082 + };
1083 + return -EOPNOTSUPP;
1084 +
1085 +out:
1086 + return err;
1087 }
1088
1089 static int __devinit b44_get_invariants(struct b44 *bp)
1090 {
1091 - u8 eeprom[128];
1092 - int err;
1093 + struct ssb_device *sdev = bp->sdev;
1094 + int err = 0;
1095 + u8 *addr;
1096
1097 - err = b44_read_eeprom(bp, &eeprom[0]);
1098 - if (err)
1099 - goto out;
1100 + bp->dma_offset = ssb_dma_translation(sdev);
1101
1102 - bp->dev->dev_addr[0] = eeprom[79];
1103 - bp->dev->dev_addr[1] = eeprom[78];
1104 - bp->dev->dev_addr[2] = eeprom[81];
1105 - bp->dev->dev_addr[3] = eeprom[80];
1106 - bp->dev->dev_addr[4] = eeprom[83];
1107 - bp->dev->dev_addr[5] = eeprom[82];
1108 + switch (instance) {
1109 + case 1:
1110 + addr = sdev->bus->sprom.r1.et0mac;
1111 + bp->phy_addr = sdev->bus->sprom.r1.et0phyaddr;
1112 + break;
1113 + default:
1114 + addr = sdev->bus->sprom.r1.et1mac;
1115 + bp->phy_addr = sdev->bus->sprom.r1.et1phyaddr;
1116 + break;
1117 + }
1118 +
1119 + memcpy(bp->dev->dev_addr, addr, 6);
1120
1121 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1122 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1123 @@ -2091,108 +2287,52 @@
1124
1125 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1126
1127 - bp->phy_addr = eeprom[90] & 0x1f;
1128 -
1129 /* With this, plus the rx_header prepended to the data by the
1130 * hardware, we'll land the ethernet header on a 2-byte boundary.
1131 */
1132 bp->rx_offset = 30;
1133 -
1134 bp->imask = IMASK_DEF;
1135 -
1136 - bp->core_unit = ssb_core_unit(bp);
1137 - bp->dma_offset = SB_PCI_DMA;
1138 -
1139 /* XXX - really required?
1140 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1141 - */
1142 + */
1143
1144 - if (ssb_get_core_rev(bp) >= 7)
1145 - bp->flags |= B44_FLAG_B0_ANDLATER;
1146 -
1147 -out:
1148 return err;
1149 }
1150
1151 -static int __devinit b44_init_one(struct pci_dev *pdev,
1152 - const struct pci_device_id *ent)
1153 +static int __devinit b44_init_one(struct ssb_device *sdev,
1154 + const struct ssb_device_id *ent)
1155 {
1156 static int b44_version_printed = 0;
1157 - unsigned long b44reg_base, b44reg_len;
1158 struct net_device *dev;
1159 struct b44 *bp;
1160 int err, i;
1161
1162 + instance++;
1163 +
1164 if (b44_version_printed++ == 0)
1165 printk(KERN_INFO "%s", version);
1166
1167 - err = pci_enable_device(pdev);
1168 - if (err) {
1169 - dev_err(&pdev->dev, "Cannot enable PCI device, "
1170 - "aborting.\n");
1171 - return err;
1172 - }
1173 -
1174 - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1175 - dev_err(&pdev->dev,
1176 - "Cannot find proper PCI device "
1177 - "base address, aborting.\n");
1178 - err = -ENODEV;
1179 - goto err_out_disable_pdev;
1180 - }
1181 -
1182 - err = pci_request_regions(pdev, DRV_MODULE_NAME);
1183 - if (err) {
1184 - dev_err(&pdev->dev,
1185 - "Cannot obtain PCI resources, aborting.\n");
1186 - goto err_out_disable_pdev;
1187 - }
1188 -
1189 - pci_set_master(pdev);
1190 -
1191 - err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1192 - if (err) {
1193 - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1194 - goto err_out_free_res;
1195 - }
1196 -
1197 - err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1198 - if (err) {
1199 - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1200 - goto err_out_free_res;
1201 - }
1202 -
1203 - b44reg_base = pci_resource_start(pdev, 0);
1204 - b44reg_len = pci_resource_len(pdev, 0);
1205 -
1206 dev = alloc_etherdev(sizeof(*bp));
1207 if (!dev) {
1208 - dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
1209 + dev_err(&sdev->dev, "Etherdev alloc failed, aborting.\n");
1210 err = -ENOMEM;
1211 - goto err_out_free_res;
1212 + goto out;
1213 }
1214
1215 SET_MODULE_OWNER(dev);
1216 - SET_NETDEV_DEV(dev,&pdev->dev);
1217 + SET_NETDEV_DEV(dev,&sdev->dev);
1218
1219 /* No interesting netdevice features in this card... */
1220 dev->features |= 0;
1221
1222 bp = netdev_priv(dev);
1223 - bp->pdev = pdev;
1224 + bp->sdev = sdev;
1225 bp->dev = dev;
1226
1227 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1228
1229 spin_lock_init(&bp->lock);
1230
1231 - bp->regs = ioremap(b44reg_base, b44reg_len);
1232 - if (bp->regs == 0UL) {
1233 - dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
1234 - err = -ENOMEM;
1235 - goto err_out_free_dev;
1236 - }
1237 -
1238 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1239 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1240
1241 @@ -2211,16 +2351,16 @@
1242 dev->poll_controller = b44_poll_controller;
1243 #endif
1244 dev->change_mtu = b44_change_mtu;
1245 - dev->irq = pdev->irq;
1246 + dev->irq = sdev->irq;
1247 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
1248
1249 netif_carrier_off(dev);
1250
1251 err = b44_get_invariants(bp);
1252 if (err) {
1253 - dev_err(&pdev->dev,
1254 + dev_err(&sdev->dev,
1255 "Problem fetching invariants of chip, aborting.\n");
1256 - goto err_out_iounmap;
1257 + goto err_out_free_dev;
1258 }
1259
1260 bp->mii_if.dev = dev;
1261 @@ -2239,61 +2379,52 @@
1262
1263 err = register_netdev(dev);
1264 if (err) {
1265 - dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1266 - goto err_out_iounmap;
1267 + dev_err(&sdev->dev, "Cannot register net device, aborting.\n");
1268 + goto out;
1269 }
1270
1271 - pci_set_drvdata(pdev, dev);
1272 -
1273 - pci_save_state(bp->pdev);
1274 + ssb_set_drvdata(sdev, dev);
1275
1276 /* Chip reset provides power to the b44 MAC & PCI cores, which
1277 * is necessary for MAC register access.
1278 */
1279 b44_chip_reset(bp);
1280
1281 - printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
1282 + printk(KERN_INFO "%s: Broadcom 10/100BaseT Ethernet ", dev->name);
1283 for (i = 0; i < 6; i++)
1284 printk("%2.2x%c", dev->dev_addr[i],
1285 i == 5 ? '\n' : ':');
1286
1287 - return 0;
1288 + /* Initialize phy */
1289 + spin_lock_irq(&bp->lock);
1290 + b44_chip_reset(bp);
1291 + spin_unlock_irq(&bp->lock);
1292
1293 -err_out_iounmap:
1294 - iounmap(bp->regs);
1295 + return 0;
1296
1297 err_out_free_dev:
1298 free_netdev(dev);
1299
1300 -err_out_free_res:
1301 - pci_release_regions(pdev);
1302 -
1303 -err_out_disable_pdev:
1304 - pci_disable_device(pdev);
1305 - pci_set_drvdata(pdev, NULL);
1306 +out:
1307 return err;
1308 }
1309
1310 -static void __devexit b44_remove_one(struct pci_dev *pdev)
1311 +static void __devexit b44_remove_one(struct ssb_device *pdev)
1312 {
1313 - struct net_device *dev = pci_get_drvdata(pdev);
1314 - struct b44 *bp = netdev_priv(dev);
1315 + struct net_device *dev = ssb_get_drvdata(pdev);
1316
1317 unregister_netdev(dev);
1318 - iounmap(bp->regs);
1319 free_netdev(dev);
1320 - pci_release_regions(pdev);
1321 - pci_disable_device(pdev);
1322 - pci_set_drvdata(pdev, NULL);
1323 + ssb_set_drvdata(pdev, NULL);
1324 }
1325
1326 -static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
1327 +static int b44_suspend(struct ssb_device *pdev, pm_message_t state)
1328 {
1329 - struct net_device *dev = pci_get_drvdata(pdev);
1330 + struct net_device *dev = ssb_get_drvdata(pdev);
1331 struct b44 *bp = netdev_priv(dev);
1332
1333 if (!netif_running(dev))
1334 - return 0;
1335 + return 0;
1336
1337 del_timer_sync(&bp->timer);
1338
1339 @@ -2311,26 +2442,16 @@
1340 b44_init_hw(bp, B44_PARTIAL_RESET);
1341 b44_setup_wol(bp);
1342 }
1343 - pci_disable_device(pdev);
1344 +
1345 return 0;
1346 }
1347
1348 -static int b44_resume(struct pci_dev *pdev)
1349 +static int b44_resume(struct ssb_device *pdev)
1350 {
1351 - struct net_device *dev = pci_get_drvdata(pdev);
1352 + struct net_device *dev = ssb_get_drvdata(pdev);
1353 struct b44 *bp = netdev_priv(dev);
1354 int rc = 0;
1355
1356 - pci_restore_state(pdev);
1357 - rc = pci_enable_device(pdev);
1358 - if (rc) {
1359 - printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
1360 - dev->name);
1361 - return rc;
1362 - }
1363 -
1364 - pci_set_master(pdev);
1365 -
1366 if (!netif_running(dev))
1367 return 0;
1368
1369 @@ -2356,29 +2477,31 @@
1370 return 0;
1371 }
1372
1373 -static struct pci_driver b44_driver = {
1374 +static struct ssb_driver b44_driver = {
1375 .name = DRV_MODULE_NAME,
1376 - .id_table = b44_pci_tbl,
1377 + .id_table = b44_ssb_tbl,
1378 .probe = b44_init_one,
1379 .remove = __devexit_p(b44_remove_one),
1380 - .suspend = b44_suspend,
1381 - .resume = b44_resume,
1382 + .suspend = b44_suspend,
1383 + .resume = b44_resume,
1384 };
1385
1386 static int __init b44_init(void)
1387 {
1388 unsigned int dma_desc_align_size = dma_get_cache_alignment();
1389
1390 + instance = 0;
1391 +
1392 /* Setup paramaters for syncing RX/TX DMA descriptors */
1393 dma_desc_align_mask = ~(dma_desc_align_size - 1);
1394 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
1395
1396 - return pci_register_driver(&b44_driver);
1397 + return ssb_driver_register(&b44_driver);
1398 }
1399
1400 static void __exit b44_cleanup(void)
1401 {
1402 - pci_unregister_driver(&b44_driver);
1403 + ssb_driver_unregister(&b44_driver);
1404 }
1405
1406 module_init(b44_init);
1407 diff -urN linux.old/drivers/net/b44.h linux.dev/drivers/net/b44.h
1408 --- linux.old/drivers/net/b44.h 2006-12-11 20:32:53.000000000 +0100
1409 +++ linux.dev/drivers/net/b44.h 2007-01-03 02:26:02.000000000 +0100
1410 @@ -129,6 +129,7 @@
1411 #define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
1412 #define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
1413 #define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
1414 +#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */
1415 #define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
1416 #define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
1417 #define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
1418 @@ -227,75 +228,9 @@
1419 #define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
1420 #define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
1421
1422 -/* Silicon backplane register definitions */
1423 -#define B44_SBIMSTATE 0x0F90UL /* SB Initiator Agent State */
1424 -#define SBIMSTATE_PC 0x0000000f /* Pipe Count */
1425 -#define SBIMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */
1426 -#define SBIMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */
1427 -#define SBIMSTATE_AP_TS 0x00000010 /* Use timeslices only */
1428 -#define SBIMSTATE_AP_TK 0x00000020 /* Use token only */
1429 -#define SBIMSTATE_AP_RSV 0x00000030 /* Reserved */
1430 -#define SBIMSTATE_IBE 0x00020000 /* In Band Error */
1431 -#define SBIMSTATE_TO 0x00040000 /* Timeout */
1432 -#define B44_SBINTVEC 0x0F94UL /* SB Interrupt Mask */
1433 -#define SBINTVEC_PCI 0x00000001 /* Enable interrupts for PCI */
1434 -#define SBINTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */
1435 -#define SBINTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */
1436 -#define SBINTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */
1437 -#define SBINTVEC_USB 0x00000010 /* Enable interrupts for usb */
1438 -#define SBINTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */
1439 -#define SBINTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */
1440 -#define B44_SBTMSLOW 0x0F98UL /* SB Target State Low */
1441 -#define SBTMSLOW_RESET 0x00000001 /* Reset */
1442 -#define SBTMSLOW_REJECT 0x00000002 /* Reject */
1443 -#define SBTMSLOW_CLOCK 0x00010000 /* Clock Enable */
1444 -#define SBTMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
1445 -#define SBTMSLOW_PE 0x40000000 /* Power Management Enable */
1446 -#define SBTMSLOW_BE 0x80000000 /* BIST Enable */
1447 -#define B44_SBTMSHIGH 0x0F9CUL /* SB Target State High */
1448 -#define SBTMSHIGH_SERR 0x00000001 /* S-error */
1449 -#define SBTMSHIGH_INT 0x00000002 /* Interrupt */
1450 -#define SBTMSHIGH_BUSY 0x00000004 /* Busy */
1451 -#define SBTMSHIGH_GCR 0x20000000 /* Gated Clock Request */
1452 -#define SBTMSHIGH_BISTF 0x40000000 /* BIST Failed */
1453 -#define SBTMSHIGH_BISTD 0x80000000 /* BIST Done */
1454 -#define B44_SBIDHIGH 0x0FFCUL /* SB Identification High */
1455 -#define SBIDHIGH_RC_MASK 0x0000000f /* Revision Code */
1456 -#define SBIDHIGH_CC_MASK 0x0000fff0 /* Core Code */
1457 -#define SBIDHIGH_CC_SHIFT 4
1458 -#define SBIDHIGH_VC_MASK 0xffff0000 /* Vendor Code */
1459 -#define SBIDHIGH_VC_SHIFT 16
1460 -
1461 -/* SSB PCI config space registers. */
1462 -#define SSB_PMCSR 0x44
1463 -#define SSB_PE 0x100
1464 -#define SSB_BAR0_WIN 0x80
1465 -#define SSB_BAR1_WIN 0x84
1466 -#define SSB_SPROM_CONTROL 0x88
1467 -#define SSB_BAR1_CONTROL 0x8c
1468 -
1469 -/* SSB core and host control registers. */
1470 -#define SSB_CONTROL 0x0000UL
1471 -#define SSB_ARBCONTROL 0x0010UL
1472 -#define SSB_ISTAT 0x0020UL
1473 -#define SSB_IMASK 0x0024UL
1474 -#define SSB_MBOX 0x0028UL
1475 -#define SSB_BCAST_ADDR 0x0050UL
1476 -#define SSB_BCAST_DATA 0x0054UL
1477 -#define SSB_PCI_TRANS_0 0x0100UL
1478 -#define SSB_PCI_TRANS_1 0x0104UL
1479 -#define SSB_PCI_TRANS_2 0x0108UL
1480 -#define SSB_SPROM 0x0800UL
1481 -
1482 -#define SSB_PCI_MEM 0x00000000
1483 -#define SSB_PCI_IO 0x00000001
1484 -#define SSB_PCI_CFG0 0x00000002
1485 -#define SSB_PCI_CFG1 0x00000003
1486 -#define SSB_PCI_PREF 0x00000004
1487 -#define SSB_PCI_BURST 0x00000008
1488 -#define SSB_PCI_MASK0 0xfc000000
1489 -#define SSB_PCI_MASK1 0xfc000000
1490 -#define SSB_PCI_MASK2 0xc0000000
1491 +#define br32(bp, REG) ssb_read32((bp)->sdev, (REG))
1492 +#define bw32(bp, REG, VAL) ssb_write32((bp)->sdev, (REG), (VAL))
1493 +#define atoi(str) simple_strtoul(((str != NULL) ? str : ""), NULL, 0)
1494
1495 /* 4400 PHY registers */
1496 #define B44_MII_AUXCTRL 24 /* Auxiliary Control */
1497 @@ -346,10 +281,12 @@
1498
1499 struct ring_info {
1500 struct sk_buff *skb;
1501 - DECLARE_PCI_UNMAP_ADDR(mapping);
1502 + dma_addr_t mapping;
1503 };
1504
1505 #define B44_MCAST_TABLE_SIZE 32
1506 +#define B44_PHY_ADDR_NO_PHY 30
1507 +#define B44_MDC_RATIO 5000000
1508
1509 #define B44_STAT_REG_DECLARE \
1510 _B44(tx_good_octets) \
1511 @@ -425,9 +362,10 @@
1512
1513 u32 dma_offset;
1514 u32 flags;
1515 -#define B44_FLAG_B0_ANDLATER 0x00000001
1516 +#define B44_FLAG_INIT_COMPLETE 0x00000001
1517 #define B44_FLAG_BUGGY_TXPTR 0x00000002
1518 #define B44_FLAG_REORDER_BUG 0x00000004
1519 +#define B44_FLAG_B0_ANDLATER 0x00000008
1520 #define B44_FLAG_PAUSE_AUTO 0x00008000
1521 #define B44_FLAG_FULL_DUPLEX 0x00010000
1522 #define B44_FLAG_100_BASE_T 0x00020000
1523 @@ -452,8 +390,7 @@
1524 struct net_device_stats stats;
1525 struct b44_hw_stats hw_stats;
1526
1527 - void __iomem *regs;
1528 - struct pci_dev *pdev;
1529 + struct ssb_device *sdev;
1530 struct net_device *dev;
1531
1532 dma_addr_t rx_ring_dma, tx_ring_dma;
1533 diff -urN linux.old/drivers/net/Kconfig linux.dev/drivers/net/Kconfig
1534 --- linux.old/drivers/net/Kconfig 2007-01-03 02:25:09.000000000 +0100
1535 +++ linux.dev/drivers/net/Kconfig 2007-01-03 02:26:02.000000000 +0100
1536 @@ -1511,7 +1511,7 @@
1537
1538 config B44
1539 tristate "Broadcom 4400 ethernet support"
1540 - depends on NET_PCI && PCI
1541 + depends on SSB && EXPERIMENTAL
1542 select MII
1543 help
1544 If you have a network (Ethernet) controller of this type, say Y and
1545
This page took 0.097304 seconds and 5 git commands to generate.