[brcm63xx] add experimental support for 2.6.28.10, tested on bcm6338
[openwrt.git] / target / linux / brcm63xx / files / drivers / net / bcm63xx_enet.c
1 /*
2 * Driver for BCM963xx builtin Ethernet mac
3 *
4 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/clk.h>
23 #include <linux/etherdevice.h>
24 #include <linux/delay.h>
25 #include <linux/ethtool.h>
26 #include <linux/crc32.h>
27 #include <linux/err.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/platform_device.h>
30 #include <linux/if_vlan.h>
31 #include <linux/version.h>
32
33 #include <bcm63xx_dev_enet.h>
34 #include "bcm63xx_enet.h"
35
36 static char bcm_enet_driver_name[] = "bcm63xx_enet";
37 static char bcm_enet_driver_version[] = "1.0";
38
39 static int copybreak __read_mostly = 128;
40 module_param(copybreak, int, 0);
41 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
42
43 /* io memory shared between all devices */
44 static void __iomem *bcm_enet_shared_base;
45
46 /*
47 * io helpers to access mac registers
48 */
49 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
50 {
51 return bcm_readl(priv->base + off);
52 }
53
54 static inline void enet_writel(struct bcm_enet_priv *priv,
55 u32 val, u32 off)
56 {
57 bcm_writel(val, priv->base + off);
58 }
59
60 /*
61 * io helpers to access shared registers
62 */
63 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
64 {
65 return bcm_readl(bcm_enet_shared_base + off);
66 }
67
68 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
69 u32 val, u32 off)
70 {
71 bcm_writel(val, bcm_enet_shared_base + off);
72 }
73
74 /*
75 * write given data into mii register and wait for transfer to end
76 * with timeout (average measured transfer time is 25us)
77 */
78 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
79 {
80 int limit;
81
82 /* make sure mii interrupt status is cleared */
83 enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
84
85 enet_writel(priv, data, ENET_MIIDATA_REG);
86 wmb();
87
88 /* busy wait on mii interrupt bit, with timeout */
89 limit = 1000;
90 do {
91 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
92 break;
93 udelay(1);
94 } while (limit-- >= 0);
95
96 return (limit < 0) ? 1 : 0;
97 }
98
99 /*
100 * MII internal read callback
101 */
102 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
103 int regnum)
104 {
105 u32 tmp, val;
106
107 tmp = regnum << ENET_MIIDATA_REG_SHIFT;
108 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
109 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
110 tmp |= ENET_MIIDATA_OP_READ_MASK;
111
112 if (do_mdio_op(priv, tmp))
113 return -1;
114
115 val = enet_readl(priv, ENET_MIIDATA_REG);
116 val &= 0xffff;
117 return val;
118 }
119
120 /*
121 * MII internal write callback
122 */
123 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
124 int regnum, u16 value)
125 {
126 u32 tmp;
127
128 tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
129 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
130 tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
131 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
132 tmp |= ENET_MIIDATA_OP_WRITE_MASK;
133
134 (void)do_mdio_op(priv, tmp);
135 return 0;
136 }
137
138 /*
139 * MII read callback from phylib
140 */
141 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
142 int regnum)
143 {
144 return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
145 }
146
147 /*
148 * MII write callback from phylib
149 */
150 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
151 int regnum, u16 value)
152 {
153 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
154 }
155
156 /*
157 * MII read callback from mii core
158 */
159 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
160 int regnum)
161 {
162 return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
163 }
164
165 /*
166 * MII write callback from mii core
167 */
168 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
169 int regnum, int value)
170 {
171 bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
172 }
173
174 /*
175 * refill rx queue
176 */
177 static int bcm_enet_refill_rx(struct net_device *dev)
178 {
179 struct bcm_enet_priv *priv;
180
181 priv = netdev_priv(dev);
182
183 while (priv->rx_desc_count < priv->rx_ring_size) {
184 struct bcm_enet_desc *desc;
185 struct sk_buff *skb;
186 dma_addr_t p;
187 int desc_idx;
188 u32 len_stat;
189
190 desc_idx = priv->rx_dirty_desc;
191 desc = &priv->rx_desc_cpu[desc_idx];
192
193 if (!priv->rx_skb[desc_idx]) {
194 skb = netdev_alloc_skb(dev, priv->rx_skb_size);
195 if (!skb)
196 break;
197 priv->rx_skb[desc_idx] = skb;
198
199 p = dma_map_single(&priv->pdev->dev, skb->data,
200 priv->rx_skb_size,
201 DMA_FROM_DEVICE);
202 desc->address = p;
203 }
204
205 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
206 len_stat |= DMADESC_OWNER_MASK;
207 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
208 len_stat |= DMADESC_WRAP_MASK;
209 priv->rx_dirty_desc = 0;
210 } else {
211 priv->rx_dirty_desc++;
212 }
213 wmb();
214 desc->len_stat = len_stat;
215
216 priv->rx_desc_count++;
217
218 /* tell dma engine we allocated one buffer */
219 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
220 }
221
222 /* If rx ring is still empty, set a timer to try allocating
223 * again at a later time. */
224 if (priv->rx_desc_count == 0 && netif_running(dev)) {
225 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
226 priv->rx_timeout.expires = jiffies + HZ;
227 add_timer(&priv->rx_timeout);
228 }
229
230 return 0;
231 }
232
233 /*
234 * timer callback to defer refill rx queue in case we're OOM
235 */
236 static void bcm_enet_refill_rx_timer(unsigned long data)
237 {
238 struct net_device *dev;
239 struct bcm_enet_priv *priv;
240
241 dev = (struct net_device *)data;
242 priv = netdev_priv(dev);
243
244 spin_lock(&priv->rx_lock);
245 bcm_enet_refill_rx((struct net_device *)data);
246 spin_unlock(&priv->rx_lock);
247 }
248
249 /*
250 * extract packet from rx queue
251 */
252 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
253 {
254 struct bcm_enet_priv *priv;
255 struct device *kdev;
256 int processed;
257
258 priv = netdev_priv(dev);
259 kdev = &priv->pdev->dev;
260 processed = 0;
261
262 /* don't scan ring further than number of refilled
263 * descriptor */
264 if (budget > priv->rx_desc_count)
265 budget = priv->rx_desc_count;
266
267 do {
268 struct bcm_enet_desc *desc;
269 struct sk_buff *skb;
270 int desc_idx;
271 u32 len_stat;
272 unsigned int len;
273
274 desc_idx = priv->rx_curr_desc;
275 desc = &priv->rx_desc_cpu[desc_idx];
276
277 /* make sure we actually read the descriptor status at
278 * each loop */
279 rmb();
280
281 len_stat = desc->len_stat;
282
283 /* break if dma ownership belongs to hw */
284 if (len_stat & DMADESC_OWNER_MASK)
285 break;
286
287 processed++;
288 priv->rx_curr_desc++;
289 if (priv->rx_curr_desc == priv->rx_ring_size)
290 priv->rx_curr_desc = 0;
291 priv->rx_desc_count--;
292
293 /* if the packet does not have start of packet _and_
294 * end of packet flag set, then just recycle it */
295 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
296 priv->stats.rx_dropped++;
297 continue;
298 }
299
300 /* recycle packet if it's marked as bad */
301 if (unlikely(len_stat & DMADESC_ERR_MASK)) {
302 priv->stats.rx_errors++;
303
304 if (len_stat & DMADESC_OVSIZE_MASK)
305 priv->stats.rx_length_errors++;
306 if (len_stat & DMADESC_CRC_MASK)
307 priv->stats.rx_crc_errors++;
308 if (len_stat & DMADESC_UNDER_MASK)
309 priv->stats.rx_frame_errors++;
310 if (len_stat & DMADESC_OV_MASK)
311 priv->stats.rx_fifo_errors++;
312 continue;
313 }
314
315 /* valid packet */
316 skb = priv->rx_skb[desc_idx];
317 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
318 /* don't include FCS */
319 len -= 4;
320
321 if (len < copybreak) {
322 struct sk_buff *nskb;
323
324 nskb = netdev_alloc_skb(dev, len + 2);
325 if (!nskb) {
326 /* forget packet, just rearm desc */
327 priv->stats.rx_dropped++;
328 continue;
329 }
330
331 /* since we're copying the data, we can align
332 * them properly */
333 skb_reserve(nskb, NET_IP_ALIGN);
334 dma_sync_single_for_cpu(kdev, desc->address,
335 len, DMA_FROM_DEVICE);
336 memcpy(nskb->data, skb->data, len);
337 dma_sync_single_for_device(kdev, desc->address,
338 len, DMA_FROM_DEVICE);
339 skb = nskb;
340 } else {
341 dma_unmap_single(&priv->pdev->dev, desc->address,
342 priv->rx_skb_size, DMA_FROM_DEVICE);
343 priv->rx_skb[desc_idx] = NULL;
344 }
345
346 skb_put(skb, len);
347 skb->dev = dev;
348 skb->protocol = eth_type_trans(skb, dev);
349 priv->stats.rx_packets++;
350 priv->stats.rx_bytes += len;
351 dev->last_rx = jiffies;
352 netif_receive_skb(skb);
353
354 } while (--budget > 0);
355
356 if (processed || !priv->rx_desc_count) {
357 bcm_enet_refill_rx(dev);
358
359 /* kick rx dma */
360 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
361 ENETDMA_CHANCFG_REG(priv->rx_chan));
362 }
363
364 return processed;
365 }
366
367
368 /*
369 * try to or force reclaim of transmitted buffers
370 */
371 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
372 {
373 struct bcm_enet_priv *priv;
374 int released;
375
376 priv = netdev_priv(dev);
377 released = 0;
378
379 while (priv->tx_desc_count < priv->tx_ring_size) {
380 struct bcm_enet_desc *desc;
381 struct sk_buff *skb;
382
383 /* We run in a bh and fight against start_xmit, which
384 * is called with bh disabled */
385 spin_lock(&priv->tx_lock);
386
387 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
388
389 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
390 spin_unlock(&priv->tx_lock);
391 break;
392 }
393
394 /* ensure other field of the descriptor were not read
395 * before we checked ownership */
396 rmb();
397
398 skb = priv->tx_skb[priv->tx_dirty_desc];
399 priv->tx_skb[priv->tx_dirty_desc] = NULL;
400 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
401 DMA_TO_DEVICE);
402
403 priv->tx_dirty_desc++;
404 if (priv->tx_dirty_desc == priv->tx_ring_size)
405 priv->tx_dirty_desc = 0;
406 priv->tx_desc_count++;
407
408 spin_unlock(&priv->tx_lock);
409
410 if (desc->len_stat & DMADESC_UNDER_MASK)
411 priv->stats.tx_errors++;
412
413 dev_kfree_skb(skb);
414 released++;
415 }
416
417 if (netif_queue_stopped(dev) && released)
418 netif_wake_queue(dev);
419
420 return released;
421 }
422
423 /*
424 * poll func, called by network core
425 */
426 static int bcm_enet_poll(struct napi_struct *napi, int budget)
427 {
428 struct bcm_enet_priv *priv;
429 struct net_device *dev;
430 int tx_work_done, rx_work_done;
431
432 priv = container_of(napi, struct bcm_enet_priv, napi);
433 dev = priv->net_dev;
434
435 /* ack interrupts */
436 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
437 ENETDMA_IR_REG(priv->rx_chan));
438 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
439 ENETDMA_IR_REG(priv->tx_chan));
440
441 /* reclaim sent skb */
442 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
443
444 spin_lock(&priv->rx_lock);
445 rx_work_done = bcm_enet_receive_queue(dev, budget);
446 spin_unlock(&priv->rx_lock);
447
448 if (rx_work_done >= budget || tx_work_done > 0) {
449 /* rx/tx queue is not yet empty/clean */
450 return rx_work_done;
451 }
452
453 /* no more packet in rx/tx queue, remove device from poll
454 * queue */
455 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
456 netif_rx_complete(dev, napi);
457 #else
458 napi_complete(napi);
459 #endif
460
461 /* restore rx/tx interrupt */
462 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
463 ENETDMA_IRMASK_REG(priv->rx_chan));
464 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
465 ENETDMA_IRMASK_REG(priv->tx_chan));
466
467 return rx_work_done;
468 }
469
470 /*
471 * mac interrupt handler
472 */
473 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
474 {
475 struct net_device *dev;
476 struct bcm_enet_priv *priv;
477 u32 stat;
478
479 dev = dev_id;
480 priv = netdev_priv(dev);
481
482 stat = enet_readl(priv, ENET_IR_REG);
483 if (!(stat & ENET_IR_MIB))
484 return IRQ_NONE;
485
486 /* clear & mask interrupt */
487 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
488 enet_writel(priv, 0, ENET_IRMASK_REG);
489
490 /* read mib registers in workqueue */
491 schedule_work(&priv->mib_update_task);
492
493 return IRQ_HANDLED;
494 }
495
496 /*
497 * rx/tx dma interrupt handler
498 */
499 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
500 {
501 struct net_device *dev;
502 struct bcm_enet_priv *priv;
503
504 dev = dev_id;
505 priv = netdev_priv(dev);
506
507 /* mask rx/tx interrupts */
508 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
509 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
510
511 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
512 netif_rx_schedule(dev, &priv->napi);
513 #else
514 napi_schedule(&priv->napi);
515 #endif
516
517 return IRQ_HANDLED;
518 }
519
520 /*
521 * tx request callback
522 */
523 static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
524 {
525 struct bcm_enet_priv *priv;
526 struct bcm_enet_desc *desc;
527 u32 len_stat;
528 int ret;
529
530 priv = netdev_priv(dev);
531
532 /* lock against tx reclaim */
533 spin_lock(&priv->tx_lock);
534
535 /* make sure the tx hw queue is not full, should not happen
536 * since we stop queue before it's the case */
537 if (unlikely(!priv->tx_desc_count)) {
538 netif_stop_queue(dev);
539 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
540 "available?\n");
541 ret = NETDEV_TX_BUSY;
542 goto out_unlock;
543 }
544
545 /* point to the next available desc */
546 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
547 priv->tx_skb[priv->tx_curr_desc] = skb;
548
549 /* fill descriptor */
550 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
551 DMA_TO_DEVICE);
552
553 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
554 len_stat |= DMADESC_ESOP_MASK |
555 DMADESC_APPEND_CRC |
556 DMADESC_OWNER_MASK;
557
558 priv->tx_curr_desc++;
559 if (priv->tx_curr_desc == priv->tx_ring_size) {
560 priv->tx_curr_desc = 0;
561 len_stat |= DMADESC_WRAP_MASK;
562 }
563 priv->tx_desc_count--;
564
565 /* dma might be already polling, make sure we update desc
566 * fields in correct order */
567 wmb();
568 desc->len_stat = len_stat;
569 wmb();
570
571 /* kick tx dma */
572 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
573 ENETDMA_CHANCFG_REG(priv->tx_chan));
574
575 /* stop queue if no more desc available */
576 if (!priv->tx_desc_count)
577 netif_stop_queue(dev);
578
579 priv->stats.tx_bytes += skb->len;
580 priv->stats.tx_packets++;
581 dev->trans_start = jiffies;
582 ret = NETDEV_TX_OK;
583
584 out_unlock:
585 spin_unlock(&priv->tx_lock);
586 return ret;
587 }
588
589 /*
590 * Change the interface's mac address.
591 */
592 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
593 {
594 struct bcm_enet_priv *priv;
595 struct sockaddr *addr = p;
596 u32 val;
597
598 priv = netdev_priv(dev);
599 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
600
601 /* use perfect match register 0 to store my mac address */
602 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
603 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
604 enet_writel(priv, val, ENET_PML_REG(0));
605
606 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
607 val |= ENET_PMH_DATAVALID_MASK;
608 enet_writel(priv, val, ENET_PMH_REG(0));
609
610 return 0;
611 }
612
613 /*
614 * Change rx mode (promiscous/allmulti) and update multicast list
615 */
616 static void bcm_enet_set_multicast_list(struct net_device *dev)
617 {
618 struct bcm_enet_priv *priv;
619 struct dev_mc_list *mc_list;
620 u32 val;
621 int i;
622
623 priv = netdev_priv(dev);
624
625 val = enet_readl(priv, ENET_RXCFG_REG);
626
627 if (dev->flags & IFF_PROMISC)
628 val |= ENET_RXCFG_PROMISC_MASK;
629 else
630 val &= ~ENET_RXCFG_PROMISC_MASK;
631
632 /* only 3 perfect match registers left, first one is used for
633 * own mac address */
634 if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
635 val |= ENET_RXCFG_ALLMCAST_MASK;
636 else
637 val &= ~ENET_RXCFG_ALLMCAST_MASK;
638
639 /* no need to set perfect match registers if we catch all
640 * multicast */
641 if (val & ENET_RXCFG_ALLMCAST_MASK) {
642 enet_writel(priv, val, ENET_RXCFG_REG);
643 return;
644 }
645
646 for (i = 0, mc_list = dev->mc_list;
647 (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
648 i++, mc_list = mc_list->next) {
649 u8 *dmi_addr;
650 u32 tmp;
651
652 /* filter non ethernet address */
653 if (mc_list->dmi_addrlen != 6)
654 continue;
655
656 /* update perfect match registers */
657 dmi_addr = mc_list->dmi_addr;
658 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
659 (dmi_addr[4] << 8) | dmi_addr[5];
660 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
661
662 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
663 tmp |= ENET_PMH_DATAVALID_MASK;
664 enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
665 }
666
667 for (; i < 3; i++) {
668 enet_writel(priv, 0, ENET_PML_REG(i + 1));
669 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
670 }
671
672 enet_writel(priv, val, ENET_RXCFG_REG);
673 }
674
675 /*
676 * set mac duplex parameters
677 */
678 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
679 {
680 u32 val;
681
682 val = enet_readl(priv, ENET_TXCTL_REG);
683 if (fullduplex)
684 val |= ENET_TXCTL_FD_MASK;
685 else
686 val &= ~ENET_TXCTL_FD_MASK;
687 enet_writel(priv, val, ENET_TXCTL_REG);
688 }
689
690 /*
691 * set mac flow control parameters
692 */
693 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
694 {
695 u32 val;
696
697 /* rx flow control (pause frame handling) */
698 val = enet_readl(priv, ENET_RXCFG_REG);
699 if (rx_en)
700 val |= ENET_RXCFG_ENFLOW_MASK;
701 else
702 val &= ~ENET_RXCFG_ENFLOW_MASK;
703 enet_writel(priv, val, ENET_RXCFG_REG);
704
705 /* tx flow control (pause frame generation) */
706 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
707 if (tx_en)
708 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
709 else
710 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
711 enet_dma_writel(priv, val, ENETDMA_CFG_REG);
712 }
713
714 /*
715 * link changed callback (from phylib)
716 */
717 static void bcm_enet_adjust_phy_link(struct net_device *dev)
718 {
719 struct bcm_enet_priv *priv;
720 struct phy_device *phydev;
721 int status_changed;
722
723 priv = netdev_priv(dev);
724 phydev = priv->phydev;
725 status_changed = 0;
726
727 if (priv->old_link != phydev->link) {
728 status_changed = 1;
729 priv->old_link = phydev->link;
730 }
731
732 /* reflect duplex change in mac configuration */
733 if (phydev->link && phydev->duplex != priv->old_duplex) {
734 bcm_enet_set_duplex(priv,
735 (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
736 status_changed = 1;
737 priv->old_duplex = phydev->duplex;
738 }
739
740 /* enable flow control if remote advertise it (trust phylib to
741 * check that duplex is full */
742 if (phydev->link && phydev->pause != priv->old_pause) {
743 int rx_pause_en, tx_pause_en;
744
745 if (phydev->pause) {
746 /* pause was advertised by lpa and us */
747 rx_pause_en = 1;
748 tx_pause_en = 1;
749 } else if (!priv->pause_auto) {
750 /* pause setting overrided by user */
751 rx_pause_en = priv->pause_rx;
752 tx_pause_en = priv->pause_tx;
753 } else {
754 rx_pause_en = 0;
755 tx_pause_en = 0;
756 }
757
758 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
759 status_changed = 1;
760 priv->old_pause = phydev->pause;
761 }
762
763 if (status_changed) {
764 pr_info("%s: link %s", dev->name, phydev->link ?
765 "UP" : "DOWN");
766 if (phydev->link)
767 printk(" - %d/%s - flow control %s", phydev->speed,
768 DUPLEX_FULL == phydev->duplex ? "full" : "half",
769 phydev->pause == 1 ? "rx&tx" : "off");
770
771 printk("\n");
772 }
773 }
774
775 /*
776 * link changed callback (if phylib is not used)
777 */
778 static void bcm_enet_adjust_link(struct net_device *dev)
779 {
780 struct bcm_enet_priv *priv;
781
782 priv = netdev_priv(dev);
783 bcm_enet_set_duplex(priv, priv->force_duplex_full);
784 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
785
786 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
787 dev->name,
788 priv->force_speed_100 ? 100 : 10,
789 priv->force_duplex_full ? "full" : "half",
790 priv->pause_rx ? "rx" : "off",
791 priv->pause_tx ? "tx" : "off");
792 }
793
794 /*
795 * open callback, allocate dma rings & buffers and start rx operation
796 */
797 static int bcm_enet_open(struct net_device *dev)
798 {
799 struct bcm_enet_priv *priv;
800 struct sockaddr addr;
801 struct device *kdev;
802 struct phy_device *phydev;
803 int irq_requested, i, ret;
804 unsigned int size;
805 char phy_id[BUS_ID_SIZE];
806 void *p;
807 u32 val;
808
809 priv = netdev_priv(dev);
810 priv->rx_desc_cpu = priv->tx_desc_cpu = NULL;
811 priv->rx_skb = priv->tx_skb = NULL;
812
813 kdev = &priv->pdev->dev;
814
815 if (priv->has_phy) {
816 /* connect to PHY */
817 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
818 priv->mac_id ? "1" : "0", priv->phy_id);
819
820 phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
821 PHY_INTERFACE_MODE_MII);
822
823 if (IS_ERR(phydev)) {
824 dev_err(kdev, "could not attach to PHY\n");
825 return PTR_ERR(phydev);
826 }
827
828 /* mask with MAC supported features */
829 phydev->supported &= (SUPPORTED_10baseT_Half |
830 SUPPORTED_10baseT_Full |
831 SUPPORTED_100baseT_Half |
832 SUPPORTED_100baseT_Full |
833 SUPPORTED_Autoneg |
834 SUPPORTED_Pause |
835 SUPPORTED_MII);
836 phydev->advertising = phydev->supported;
837
838 if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
839 phydev->advertising |= SUPPORTED_Pause;
840 else
841 phydev->advertising &= ~SUPPORTED_Pause;
842
843 dev_info(kdev, "attached PHY at address %d [%s]\n",
844 phydev->addr, phydev->drv->name);
845
846 priv->old_link = 0;
847 priv->old_duplex = -1;
848 priv->old_pause = -1;
849 priv->phydev = phydev;
850 }
851
852 /* mask all interrupts and request them */
853 enet_writel(priv, 0, ENET_IRMASK_REG);
854 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
855 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
856
857 irq_requested = 0;
858 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
859 if (ret)
860 goto out;
861 irq_requested++;
862
863 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
864 IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
865 if (ret)
866 goto out;
867 irq_requested++;
868
869 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
870 IRQF_DISABLED, dev->name, dev);
871 if (ret)
872 goto out;
873 irq_requested++;
874
875 /* initialize perfect match registers */
876 for (i = 0; i < 4; i++) {
877 enet_writel(priv, 0, ENET_PML_REG(i));
878 enet_writel(priv, 0, ENET_PMH_REG(i));
879 }
880
881 /* write device mac address */
882 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
883 bcm_enet_set_mac_address(dev, &addr);
884
885 /* allocate rx dma ring */
886 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
887 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
888 if (!p) {
889 dev_err(kdev, "cannot allocate rx ring %u\n", size);
890 ret = -ENOMEM;
891 goto out;
892 }
893
894 memset(p, 0, size);
895 priv->rx_desc_alloc_size = size;
896 priv->rx_desc_cpu = p;
897
898 /* allocate tx dma ring */
899 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
900 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
901 if (!p) {
902 dev_err(kdev, "cannot allocate tx ring\n");
903 ret = -ENOMEM;
904 goto out;
905 }
906
907 memset(p, 0, size);
908 priv->tx_desc_alloc_size = size;
909 priv->tx_desc_cpu = p;
910
911 priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
912 GFP_KERNEL);
913 if (!priv->tx_skb) {
914 dev_err(kdev, "cannot allocate rx skb queue\n");
915 ret = -ENOMEM;
916 goto out;
917 }
918
919 priv->tx_desc_count = priv->tx_ring_size;
920 priv->tx_dirty_desc = 0;
921 priv->tx_curr_desc = 0;
922 spin_lock_init(&priv->tx_lock);
923
924 /* init & fill rx ring with skbs */
925 priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
926 GFP_KERNEL);
927 if (!priv->rx_skb) {
928 dev_err(kdev, "cannot allocate rx skb queue\n");
929 ret = -ENOMEM;
930 goto out;
931 }
932
933 priv->rx_desc_count = 0;
934 priv->rx_dirty_desc = 0;
935 priv->rx_curr_desc = 0;
936
937 /* initialize flow control buffer allocation */
938 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
939 ENETDMA_BUFALLOC_REG(priv->rx_chan));
940
941 if (bcm_enet_refill_rx(dev)) {
942 dev_err(kdev, "cannot allocate rx skb queue\n");
943 ret = -ENOMEM;
944 goto out;
945 }
946
947 /* write rx & tx ring addresses */
948 enet_dma_writel(priv, priv->rx_desc_dma,
949 ENETDMA_RSTART_REG(priv->rx_chan));
950 enet_dma_writel(priv, priv->tx_desc_dma,
951 ENETDMA_RSTART_REG(priv->tx_chan));
952
953 /* clear remaining state ram for rx & tx channel */
954 enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
955 enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
956 enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
957 enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
958 enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
959 enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
960
961 /* set max rx/tx length */
962 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
963 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
964
965 /* set dma maximum burst len */
966 enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
967 ENETDMA_MAXBURST_REG(priv->rx_chan));
968 enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
969 ENETDMA_MAXBURST_REG(priv->tx_chan));
970
971 /* set correct transmit fifo watermark */
972 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
973
974 /* set flow control low/high threshold to 1/3 / 2/3 */
975 val = priv->rx_ring_size / 3;
976 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
977 val = (priv->rx_ring_size * 2) / 3;
978 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
979
980 /* all set, enable mac and interrupts, start dma engine and
981 * kick rx dma channel */
982 wmb();
983 enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
984 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
985 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
986 ENETDMA_CHANCFG_REG(priv->rx_chan));
987
988 /* watch "mib counters about to overflow" interrupt */
989 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
990 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
991
992 /* watch "packet transferred" interrupt in rx and tx */
993 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
994 ENETDMA_IR_REG(priv->rx_chan));
995 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
996 ENETDMA_IR_REG(priv->tx_chan));
997
998 /* make sure we enable napi before rx interrupt */
999 napi_enable(&priv->napi);
1000
1001 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1002 ENETDMA_IRMASK_REG(priv->rx_chan));
1003 enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1004 ENETDMA_IRMASK_REG(priv->tx_chan));
1005
1006 if (priv->has_phy)
1007 phy_start(priv->phydev);
1008 else
1009 bcm_enet_adjust_link(dev);
1010
1011 netif_start_queue(dev);
1012 return 0;
1013
1014 out:
1015 phy_disconnect(priv->phydev);
1016 if (irq_requested > 2)
1017 free_irq(priv->irq_tx, dev);
1018 if (irq_requested > 1)
1019 free_irq(priv->irq_rx, dev);
1020 if (irq_requested > 0)
1021 free_irq(dev->irq, dev);
1022 for (i = 0; i < priv->rx_ring_size; i++) {
1023 struct bcm_enet_desc *desc;
1024
1025 if (!priv->rx_skb[i])
1026 continue;
1027
1028 desc = &priv->rx_desc_cpu[i];
1029 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1030 DMA_FROM_DEVICE);
1031 kfree_skb(priv->rx_skb[i]);
1032 }
1033 if (priv->rx_desc_cpu)
1034 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1035 priv->rx_desc_cpu, priv->rx_desc_dma);
1036 if (priv->tx_desc_cpu)
1037 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1038 priv->tx_desc_cpu, priv->tx_desc_dma);
1039 kfree(priv->rx_skb);
1040 kfree(priv->tx_skb);
1041 return ret;
1042 }
1043
1044 /*
1045 * disable mac
1046 */
1047 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1048 {
1049 int limit;
1050 u32 val;
1051
1052 val = enet_readl(priv, ENET_CTL_REG);
1053 val |= ENET_CTL_DISABLE_MASK;
1054 enet_writel(priv, val, ENET_CTL_REG);
1055
1056 limit = 1000;
1057 do {
1058 u32 val;
1059
1060 val = enet_readl(priv, ENET_CTL_REG);
1061 if (!(val & ENET_CTL_DISABLE_MASK))
1062 break;
1063 udelay(1);
1064 } while (limit--);
1065 }
1066
1067 /*
1068 * disable dma in given channel
1069 */
1070 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1071 {
1072 int limit;
1073
1074 enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
1075
1076 limit = 1000;
1077 do {
1078 u32 val;
1079
1080 val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
1081 if (!(val & ENETDMA_CHANCFG_EN_MASK))
1082 break;
1083 udelay(1);
1084 } while (limit--);
1085 }
1086
1087 /*
1088 * stop callback
1089 */
1090 static int bcm_enet_stop(struct net_device *dev)
1091 {
1092 struct bcm_enet_priv *priv;
1093 struct device *kdev;
1094 int i;
1095
1096 priv = netdev_priv(dev);
1097 kdev = &priv->pdev->dev;
1098
1099 netif_stop_queue(dev);
1100 napi_disable(&priv->napi);
1101 if (priv->has_phy)
1102 phy_stop(priv->phydev);
1103 del_timer_sync(&priv->rx_timeout);
1104
1105 /* mask all interrupts */
1106 enet_writel(priv, 0, ENET_IRMASK_REG);
1107 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1108 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1109
1110 /* make sure no mib update is scheduled */
1111 flush_scheduled_work();
1112
1113 /* disable dma & mac */
1114 bcm_enet_disable_dma(priv, priv->tx_chan);
1115 bcm_enet_disable_dma(priv, priv->rx_chan);
1116 bcm_enet_disable_mac(priv);
1117
1118 /* force reclaim of all tx buffers */
1119 bcm_enet_tx_reclaim(dev, 1);
1120
1121 /* free the rx skb ring */
1122 for (i = 0; i < priv->rx_ring_size; i++) {
1123 struct bcm_enet_desc *desc;
1124
1125 if (!priv->rx_skb[i])
1126 continue;
1127
1128 desc = &priv->rx_desc_cpu[i];
1129 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1130 DMA_FROM_DEVICE);
1131 kfree_skb(priv->rx_skb[i]);
1132 }
1133
1134 /* free remaining allocated memory */
1135 kfree(priv->rx_skb);
1136 kfree(priv->tx_skb);
1137 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1138 priv->rx_desc_cpu, priv->rx_desc_dma);
1139 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1140 priv->tx_desc_cpu, priv->tx_desc_dma);
1141 free_irq(priv->irq_tx, dev);
1142 free_irq(priv->irq_rx, dev);
1143 free_irq(dev->irq, dev);
1144
1145 /* release phy */
1146 if (priv->has_phy) {
1147 phy_disconnect(priv->phydev);
1148 priv->phydev = NULL;
1149 }
1150
1151 return 0;
1152 }
1153
1154 /*
1155 * core request to return device rx/tx stats
1156 */
1157 static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
1158 {
1159 struct bcm_enet_priv *priv;
1160
1161 priv = netdev_priv(dev);
1162 return &priv->stats;
1163 }
1164
1165 /*
1166 * ethtool callbacks
1167 */
1168 struct bcm_enet_stats {
1169 char stat_string[ETH_GSTRING_LEN];
1170 int sizeof_stat;
1171 int stat_offset;
1172 int mib_reg;
1173 };
1174
1175 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1176 offsetof(struct bcm_enet_priv, m)
1177
1178 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1179 { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
1180 { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
1181 { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
1182 { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
1183 { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
1184 { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
1185 { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
1186 { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
1187
1188 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1189 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1190 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1191 { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1192 { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1193 { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1194 { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1195 { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1196 { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1197 { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1198 { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1199 { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1200 { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1201 { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1202 { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1203 { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1204 { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1205 { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1206 { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1207 { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1208 { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1209
1210 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1211 { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1212 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1213 { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1214 { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1215 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1216 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1217 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1218 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1219 { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1220 { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1221 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1222 { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1223 { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1224 { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1225 { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1226 { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1227 { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1228 { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1229 { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1230 { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1231 { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1232
1233 };
1234
1235 #define BCM_ENET_STATS_LEN \
1236 (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1237
1238 static const u32 unused_mib_regs[] = {
1239 ETH_MIB_TX_ALL_OCTETS,
1240 ETH_MIB_TX_ALL_PKTS,
1241 ETH_MIB_RX_ALL_OCTETS,
1242 ETH_MIB_RX_ALL_PKTS,
1243 };
1244
1245
1246 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1247 struct ethtool_drvinfo *drvinfo)
1248 {
1249 strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
1250 strncpy(drvinfo->version, bcm_enet_driver_version, 32);
1251 strncpy(drvinfo->fw_version, "N/A", 32);
1252 strncpy(drvinfo->bus_info, "bcm63xx", 32);
1253 drvinfo->n_stats = BCM_ENET_STATS_LEN;
1254 }
1255
1256 static int bcm_enet_get_stats_count(struct net_device *netdev)
1257 {
1258 return BCM_ENET_STATS_LEN;
1259 }
1260
1261 static void bcm_enet_get_strings(struct net_device *netdev,
1262 u32 stringset, u8 *data)
1263 {
1264 int i;
1265
1266 switch (stringset) {
1267 case ETH_SS_STATS:
1268 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1269 memcpy(data + i * ETH_GSTRING_LEN,
1270 bcm_enet_gstrings_stats[i].stat_string,
1271 ETH_GSTRING_LEN);
1272 }
1273 break;
1274 }
1275 }
1276
1277 static void update_mib_counters(struct bcm_enet_priv *priv)
1278 {
1279 int i;
1280
1281 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1282 const struct bcm_enet_stats *s;
1283 u32 val;
1284 char *p;
1285
1286 s = &bcm_enet_gstrings_stats[i];
1287 if (s->mib_reg == -1)
1288 continue;
1289
1290 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1291 p = (char *)priv + s->stat_offset;
1292
1293 if (s->sizeof_stat == sizeof(u64))
1294 *(u64 *)p += val;
1295 else
1296 *(u32 *)p += val;
1297 }
1298
1299 /* also empty unused mib counters to make sure mib counter
1300 * overflow interrupt is cleared */
1301 for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1302 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1303 }
1304
1305 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1306 {
1307 struct bcm_enet_priv *priv;
1308
1309 priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1310 mutex_lock(&priv->mib_update_lock);
1311 update_mib_counters(priv);
1312 mutex_unlock(&priv->mib_update_lock);
1313
1314 /* reenable mib interrupt */
1315 if (netif_running(priv->net_dev))
1316 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1317 }
1318
1319 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1320 struct ethtool_stats *stats,
1321 u64 *data)
1322 {
1323 struct bcm_enet_priv *priv;
1324 int i;
1325
1326 priv = netdev_priv(netdev);
1327
1328 mutex_lock(&priv->mib_update_lock);
1329 update_mib_counters(priv);
1330
1331 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1332 const struct bcm_enet_stats *s;
1333 char *p;
1334
1335 s = &bcm_enet_gstrings_stats[i];
1336 p = (char *)priv + s->stat_offset;
1337 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1338 *(u64 *)p : *(u32 *)p;
1339 }
1340 mutex_unlock(&priv->mib_update_lock);
1341 }
1342
1343 static int bcm_enet_get_settings(struct net_device *dev,
1344 struct ethtool_cmd *cmd)
1345 {
1346 struct bcm_enet_priv *priv;
1347
1348 priv = netdev_priv(dev);
1349
1350 cmd->maxrxpkt = 0;
1351 cmd->maxtxpkt = 0;
1352
1353 if (priv->has_phy) {
1354 if (!priv->phydev)
1355 return -ENODEV;
1356 return phy_ethtool_gset(priv->phydev, cmd);
1357 } else {
1358 cmd->autoneg = 0;
1359 cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
1360 cmd->duplex = (priv->force_duplex_full) ?
1361 DUPLEX_FULL : DUPLEX_HALF;
1362 cmd->supported = ADVERTISED_10baseT_Half |
1363 ADVERTISED_10baseT_Full |
1364 ADVERTISED_100baseT_Half |
1365 ADVERTISED_100baseT_Full;
1366 cmd->advertising = 0;
1367 cmd->port = PORT_MII;
1368 cmd->transceiver = XCVR_EXTERNAL;
1369 }
1370 return 0;
1371 }
1372
1373 static int bcm_enet_set_settings(struct net_device *dev,
1374 struct ethtool_cmd *cmd)
1375 {
1376 struct bcm_enet_priv *priv;
1377
1378 priv = netdev_priv(dev);
1379 if (priv->has_phy) {
1380 if (!priv->phydev)
1381 return -ENODEV;
1382 return phy_ethtool_sset(priv->phydev, cmd);
1383 } else {
1384
1385 if (cmd->autoneg ||
1386 (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1387 cmd->port != PORT_MII)
1388 return -EINVAL;
1389
1390 priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1391 priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1392
1393 if (netif_running(dev))
1394 bcm_enet_adjust_link(dev);
1395 return 0;
1396 }
1397 }
1398
1399 static void bcm_enet_get_ringparam(struct net_device *dev,
1400 struct ethtool_ringparam *ering)
1401 {
1402 struct bcm_enet_priv *priv;
1403
1404 priv = netdev_priv(dev);
1405
1406 /* rx/tx ring is actually only limited by memory */
1407 ering->rx_max_pending = 8192;
1408 ering->tx_max_pending = 8192;
1409 ering->rx_mini_max_pending = 0;
1410 ering->rx_jumbo_max_pending = 0;
1411 ering->rx_pending = priv->rx_ring_size;
1412 ering->tx_pending = priv->tx_ring_size;
1413 }
1414
1415 static int bcm_enet_set_ringparam(struct net_device *dev,
1416 struct ethtool_ringparam *ering)
1417 {
1418 struct bcm_enet_priv *priv;
1419 int was_running;
1420
1421 priv = netdev_priv(dev);
1422
1423 was_running = 0;
1424 if (netif_running(dev)) {
1425 bcm_enet_stop(dev);
1426 was_running = 1;
1427 }
1428
1429 priv->rx_ring_size = ering->rx_pending;
1430 priv->tx_ring_size = ering->tx_pending;
1431
1432 if (was_running) {
1433 int err;
1434
1435 err = bcm_enet_open(dev);
1436 if (err)
1437 dev_close(dev);
1438 else
1439 bcm_enet_set_multicast_list(dev);
1440 }
1441 return 0;
1442 }
1443
1444 static void bcm_enet_get_pauseparam(struct net_device *dev,
1445 struct ethtool_pauseparam *ecmd)
1446 {
1447 struct bcm_enet_priv *priv;
1448
1449 priv = netdev_priv(dev);
1450 ecmd->autoneg = priv->pause_auto;
1451 ecmd->rx_pause = priv->pause_rx;
1452 ecmd->tx_pause = priv->pause_tx;
1453 }
1454
1455 static int bcm_enet_set_pauseparam(struct net_device *dev,
1456 struct ethtool_pauseparam *ecmd)
1457 {
1458 struct bcm_enet_priv *priv;
1459
1460 priv = netdev_priv(dev);
1461
1462 if (priv->has_phy) {
1463 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1464 /* asymetric pause mode not supported,
1465 * actually possible but integrated PHY has RO
1466 * asym_pause bit */
1467 return -EINVAL;
1468 }
1469 } else {
1470 /* no pause autoneg on direct mii connection */
1471 if (ecmd->autoneg)
1472 return -EINVAL;
1473 }
1474
1475 priv->pause_auto = ecmd->autoneg;
1476 priv->pause_rx = ecmd->rx_pause;
1477 priv->pause_tx = ecmd->tx_pause;
1478
1479 return 0;
1480 }
1481
1482 static struct ethtool_ops bcm_enet_ethtool_ops = {
1483 .get_strings = bcm_enet_get_strings,
1484 .get_stats_count = bcm_enet_get_stats_count,
1485 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1486 .get_settings = bcm_enet_get_settings,
1487 .set_settings = bcm_enet_set_settings,
1488 .get_drvinfo = bcm_enet_get_drvinfo,
1489 .get_link = ethtool_op_get_link,
1490 .get_ringparam = bcm_enet_get_ringparam,
1491 .set_ringparam = bcm_enet_set_ringparam,
1492 .get_pauseparam = bcm_enet_get_pauseparam,
1493 .set_pauseparam = bcm_enet_set_pauseparam,
1494 };
1495
1496 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1497 {
1498 struct bcm_enet_priv *priv;
1499
1500 priv = netdev_priv(dev);
1501 if (priv->has_phy) {
1502 if (!priv->phydev)
1503 return -ENODEV;
1504 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1505 } else {
1506 struct mii_if_info mii;
1507
1508 mii.dev = dev;
1509 mii.mdio_read = bcm_enet_mdio_read_mii;
1510 mii.mdio_write = bcm_enet_mdio_write_mii;
1511 mii.phy_id = 0;
1512 mii.phy_id_mask = 0x3f;
1513 mii.reg_num_mask = 0x1f;
1514 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1515 }
1516 }
1517
1518 /*
1519 * calculate actual hardware mtu
1520 */
1521 static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1522 {
1523 int actual_mtu;
1524
1525 actual_mtu = mtu;
1526
1527 /* add ethernet header + vlan tag size */
1528 actual_mtu += VLAN_ETH_HLEN;
1529
1530 if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1531 return -EINVAL;
1532
1533 /*
1534 * setup maximum size before we get overflow mark in
1535 * descriptor, note that this will not prevent reception of
1536 * big frames, they will be split into multiple buffers
1537 * anyway
1538 */
1539 priv->hw_mtu = actual_mtu;
1540
1541 /*
1542 * align rx buffer size to dma burst len, account FCS since
1543 * it's appended
1544 */
1545 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1546 BCMENET_DMA_MAXBURST * 4);
1547 return 0;
1548 }
1549
1550 /*
1551 * adjust mtu, can't be called while device is running
1552 */
1553 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1554 {
1555 int ret;
1556
1557 if (netif_running(dev))
1558 return -EBUSY;
1559
1560 ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1561 if (ret)
1562 return ret;
1563 dev->mtu = new_mtu;
1564 return 0;
1565 }
1566
1567 /*
1568 * preinit hardware to allow mii operation while device is down
1569 */
1570 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1571 {
1572 u32 val;
1573 int limit;
1574
1575 /* make sure mac is disabled */
1576 bcm_enet_disable_mac(priv);
1577
1578 /* soft reset mac */
1579 val = ENET_CTL_SRESET_MASK;
1580 enet_writel(priv, val, ENET_CTL_REG);
1581 wmb();
1582
1583 limit = 1000;
1584 do {
1585 val = enet_readl(priv, ENET_CTL_REG);
1586 if (!(val & ENET_CTL_SRESET_MASK))
1587 break;
1588 udelay(1);
1589 } while (limit--);
1590
1591 /* select correct mii interface */
1592 val = enet_readl(priv, ENET_CTL_REG);
1593 if (priv->use_external_mii)
1594 val |= ENET_CTL_EPHYSEL_MASK;
1595 else
1596 val &= ~ENET_CTL_EPHYSEL_MASK;
1597 enet_writel(priv, val, ENET_CTL_REG);
1598
1599 /* turn on mdc clock */
1600 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1601 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1602
1603 /* set mib counters to self-clear when read */
1604 val = enet_readl(priv, ENET_MIBCTL_REG);
1605 val |= ENET_MIBCTL_RDCLEAR_MASK;
1606 enet_writel(priv, val, ENET_MIBCTL_REG);
1607 }
1608
1609 /*
1610 * allocate netdevice, request register memory and register device.
1611 */
1612 static int __devinit bcm_enet_probe(struct platform_device *pdev)
1613 {
1614 struct bcm_enet_priv *priv;
1615 struct net_device *dev;
1616 struct bcm63xx_enet_platform_data *pd;
1617 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1618 struct mii_bus *bus;
1619 const char *clk_name;
1620 unsigned int iomem_size;
1621 int i, ret, mdio_registered, mem_requested;
1622
1623 /* stop if shared driver failed, assume driver->probe will be
1624 * called in the same order we register devices (correct ?) */
1625 if (!bcm_enet_shared_base)
1626 return -ENODEV;
1627
1628 mdio_registered = mem_requested = 0;
1629
1630 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1631 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1632 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1633 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1634 if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1635 return -ENODEV;
1636
1637 ret = 0;
1638 dev = alloc_etherdev(sizeof(*priv));
1639 if (!dev)
1640 return -ENOMEM;
1641 priv = netdev_priv(dev);
1642 memset(priv, 0, sizeof(*priv));
1643
1644 ret = compute_hw_mtu(priv, dev->mtu);
1645 if (ret)
1646 goto out;
1647
1648 iomem_size = res_mem->end - res_mem->start + 1;
1649 if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1650 ret = -EBUSY;
1651 goto err;
1652 }
1653 mem_requested = 1;
1654
1655 priv->base = ioremap(res_mem->start, iomem_size);
1656 if (priv->base == NULL) {
1657 ret = -ENOMEM;
1658 goto err;
1659 }
1660 dev->irq = priv->irq = res_irq->start;
1661 priv->irq_rx = res_irq_rx->start;
1662 priv->irq_tx = res_irq_tx->start;
1663 priv->mac_id = pdev->id;
1664
1665 /* get rx & tx dma channel id for this mac */
1666 if (priv->mac_id == 0) {
1667 priv->rx_chan = 0;
1668 priv->tx_chan = 1;
1669 clk_name = "enet0";
1670 } else {
1671 priv->rx_chan = 2;
1672 priv->tx_chan = 3;
1673 clk_name = "enet1";
1674 }
1675
1676 priv->mac_clk = clk_get(&pdev->dev, clk_name);
1677 if (IS_ERR(priv->mac_clk)) {
1678 ret = PTR_ERR(priv->mac_clk);
1679 priv->mac_clk = NULL;
1680 goto err;
1681 }
1682 clk_enable(priv->mac_clk);
1683
1684 /* initialize default and fetch platform data */
1685 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1686 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1687
1688 pd = pdev->dev.platform_data;
1689 if (pd) {
1690 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1691 priv->has_phy = pd->has_phy;
1692 priv->phy_id = pd->phy_id;
1693 priv->has_phy_interrupt = pd->has_phy_interrupt;
1694 priv->phy_interrupt = pd->phy_interrupt;
1695 priv->use_external_mii = !pd->use_internal_phy;
1696 priv->pause_auto = pd->pause_auto;
1697 priv->pause_rx = pd->pause_rx;
1698 priv->pause_tx = pd->pause_tx;
1699 priv->force_duplex_full = pd->force_duplex_full;
1700 priv->force_speed_100 = pd->force_speed_100;
1701 }
1702
1703 if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1704 /* using internal PHY, enable clock */
1705 priv->phy_clk = clk_get(&pdev->dev, "ephy");
1706 if (IS_ERR(priv->phy_clk)) {
1707 ret = PTR_ERR(priv->phy_clk);
1708 priv->phy_clk = NULL;
1709 goto err;
1710 }
1711 clk_enable(priv->phy_clk);
1712 }
1713
1714 /* do minimal hardware init to be able to probe mii bus */
1715 bcm_enet_hw_preinit(priv);
1716
1717 /* MII bus registration */
1718 if (priv->has_phy) {
1719 bus = &priv->mii_bus;
1720 bus->name = "bcm63xx_enet MII bus";
1721 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
1722 bus->dev = &pdev->dev;
1723 #else
1724 bus->parent = &pdev->dev;
1725 #endif
1726 bus->priv = priv;
1727 bus->read = bcm_enet_mdio_read_phylib;
1728 bus->write = bcm_enet_mdio_write_phylib;
1729 sprintf(bus->id, "%d", priv->mac_id);
1730
1731 /* only probe bus where we think the PHY is, because
1732 * the mdio read operation return 0 instead of 0xffff
1733 * if a slave is not present on hw */
1734 bus->phy_mask = ~(1 << priv->phy_id);
1735
1736 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1737 if (!bus->irq) {
1738 ret = -ENOMEM;
1739 goto err;
1740 }
1741
1742 if (priv->has_phy_interrupt)
1743 bus->irq[priv->phy_id] = priv->phy_interrupt;
1744 else
1745 bus->irq[priv->phy_id] = PHY_POLL;
1746
1747 ret = mdiobus_register(bus);
1748 if (ret) {
1749 dev_err(&pdev->dev, "unable to register mdio bus\n");
1750 goto err;
1751 }
1752 mdio_registered = 1;
1753 } else {
1754
1755 /* run platform code to initialize PHY device */
1756 if (pd->mii_config &&
1757 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1758 bcm_enet_mdio_write_mii)) {
1759 dev_err(&pdev->dev, "unable to configure mdio bus\n");
1760 goto err;
1761 }
1762 }
1763
1764 spin_lock_init(&priv->rx_lock);
1765
1766 /* init rx timeout (used for oom) */
1767 init_timer(&priv->rx_timeout);
1768 priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1769 priv->rx_timeout.data = (unsigned long)dev;
1770
1771 /* init the mib update lock&work */
1772 mutex_init(&priv->mib_update_lock);
1773 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1774
1775 /* zero mib counters */
1776 for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1777 enet_writel(priv, 0, ENET_MIB_REG(i));
1778
1779 /* register netdevice */
1780 dev->open = bcm_enet_open;
1781 dev->stop = bcm_enet_stop;
1782 dev->hard_start_xmit = bcm_enet_start_xmit;
1783 dev->get_stats = bcm_enet_get_stats;
1784 dev->set_mac_address = bcm_enet_set_mac_address;
1785 dev->set_multicast_list = bcm_enet_set_multicast_list;
1786 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1787 dev->do_ioctl = bcm_enet_ioctl;
1788 #ifdef CONFIG_NET_POLL_CONTROLLER
1789 dev->poll_controller = bcm_enet_netpoll;
1790 #endif
1791 dev->change_mtu = bcm_enet_change_mtu;
1792
1793 SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1794 SET_NETDEV_DEV(dev, &pdev->dev);
1795
1796 ret = register_netdev(dev);
1797 if (ret)
1798 goto err;
1799
1800 platform_set_drvdata(pdev, dev);
1801 priv->pdev = pdev;
1802 priv->net_dev = dev;
1803
1804 return 0;
1805
1806 err:
1807 if (mem_requested)
1808 release_mem_region(res_mem->start, iomem_size);
1809 if (mdio_registered)
1810 mdiobus_unregister(&priv->mii_bus);
1811 kfree(priv->mii_bus.irq);
1812 if (priv->mac_clk) {
1813 clk_disable(priv->mac_clk);
1814 clk_put(priv->mac_clk);
1815 }
1816 if (priv->phy_clk) {
1817 clk_disable(priv->phy_clk);
1818 clk_put(priv->phy_clk);
1819 }
1820 if (priv->base) {
1821 /* turn off mdc clock */
1822 enet_writel(priv, 0, ENET_MIISC_REG);
1823 iounmap(priv->base);
1824 }
1825 out:
1826 free_netdev(dev);
1827 return ret;
1828 }
1829
1830
1831 /*
1832 * exit func, stops hardware and unregisters netdevice
1833 */
1834 static int __devexit bcm_enet_remove(struct platform_device *pdev)
1835 {
1836 struct bcm_enet_priv *priv;
1837 struct net_device *dev;
1838 struct resource *res;
1839
1840 /* stop netdevice */
1841 dev = platform_get_drvdata(pdev);
1842 priv = netdev_priv(dev);
1843 unregister_netdev(dev);
1844
1845 /* turn off mdc clock */
1846 enet_writel(priv, 0, ENET_MIISC_REG);
1847
1848 if (priv->has_phy) {
1849 mdiobus_unregister(&priv->mii_bus);
1850 kfree(priv->mii_bus.irq);
1851 } else {
1852 struct bcm63xx_enet_platform_data *pd;
1853
1854 pd = pdev->dev.platform_data;
1855 if (pd && pd->mii_config)
1856 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1857 bcm_enet_mdio_write_mii);
1858 }
1859
1860 /* release device resources */
1861 iounmap(priv->base);
1862 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1863 release_mem_region(res->start, res->end - res->start + 1);
1864
1865 /* disable hw block clocks */
1866 if (priv->phy_clk) {
1867 clk_disable(priv->phy_clk);
1868 clk_put(priv->phy_clk);
1869 }
1870 clk_disable(priv->mac_clk);
1871 clk_put(priv->mac_clk);
1872
1873 platform_set_drvdata(pdev, NULL);
1874 free_netdev(dev);
1875 return 0;
1876 }
1877
1878 struct platform_driver bcm63xx_enet_driver = {
1879 .probe = bcm_enet_probe,
1880 .remove = __devexit_p(bcm_enet_remove),
1881 .driver = {
1882 .name = "bcm63xx_enet",
1883 .owner = THIS_MODULE,
1884 },
1885 };
1886
1887 /*
1888 * reserve & remap memory space shared between all macs
1889 */
1890 static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
1891 {
1892 struct resource *res;
1893 unsigned int iomem_size;
1894
1895 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1896 if (!res)
1897 return -ENODEV;
1898
1899 iomem_size = res->end - res->start + 1;
1900 if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
1901 return -EBUSY;
1902
1903 bcm_enet_shared_base = ioremap(res->start, iomem_size);
1904 if (!bcm_enet_shared_base) {
1905 release_mem_region(res->start, iomem_size);
1906 return -ENOMEM;
1907 }
1908 return 0;
1909 }
1910
1911 static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
1912 {
1913 struct resource *res;
1914
1915 iounmap(bcm_enet_shared_base);
1916 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1917 release_mem_region(res->start, res->end - res->start + 1);
1918 return 0;
1919 }
1920
1921 /*
1922 * this "shared" driver is needed because both macs share a single
1923 * address space
1924 */
1925 struct platform_driver bcm63xx_enet_shared_driver = {
1926 .probe = bcm_enet_shared_probe,
1927 .remove = __devexit_p(bcm_enet_shared_remove),
1928 .driver = {
1929 .name = "bcm63xx_enet_shared",
1930 .owner = THIS_MODULE,
1931 },
1932 };
1933
1934 /*
1935 * entry point
1936 */
1937 static int __init bcm_enet_init(void)
1938 {
1939 int ret;
1940
1941 ret = platform_driver_register(&bcm63xx_enet_shared_driver);
1942 if (ret)
1943 return ret;
1944
1945 ret = platform_driver_register(&bcm63xx_enet_driver);
1946 if (ret)
1947 platform_driver_unregister(&bcm63xx_enet_shared_driver);
1948
1949 return ret;
1950 }
1951
1952 static void __exit bcm_enet_exit(void)
1953 {
1954 platform_driver_unregister(&bcm63xx_enet_driver);
1955 platform_driver_unregister(&bcm63xx_enet_shared_driver);
1956 }
1957
1958
1959 module_init(bcm_enet_init);
1960 module_exit(bcm_enet_exit);
1961
1962 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
1963 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
1964 MODULE_LICENSE("GPL");
This page took 0.149484 seconds and 5 git commands to generate.