1 From 49aa7ffcd9bd2d9a0af99fced7b8511160dbf345 Mon Sep 17 00:00:00 2001
2 From: Maxime Bizon <mbizon@freebox.fr>
3 Date: Sun, 21 Sep 2008 03:43:26 +0200
4 Subject: [PATCH] [MIPS] BCM63XX: Add integrated ethernet mac support.
6 Signed-off-by: Maxime Bizon <mbizon@freebox.fr>
8 arch/mips/bcm63xx/Makefile | 1 +
9 arch/mips/bcm63xx/dev-enet.c | 158 ++
10 drivers/net/Kconfig | 9 +
11 drivers/net/Makefile | 1 +
12 drivers/net/bcm63xx_enet.c | 1894 ++++++++++++++++++++++
13 drivers/net/bcm63xx_enet.h | 294 ++++
14 include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h | 45 +
15 7 files changed, 2402 insertions(+), 0 deletions(-)
16 create mode 100644 arch/mips/bcm63xx/dev-enet.c
17 create mode 100644 drivers/net/bcm63xx_enet.c
18 create mode 100644 drivers/net/bcm63xx_enet.h
19 create mode 100644 include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h
21 diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile
22 index 99e335d..5358093 100644
23 --- a/arch/mips/bcm63xx/Makefile
24 +++ b/arch/mips/bcm63xx/Makefile
25 @@ -3,4 +3,5 @@ obj-y += dev-uart.o
27 obj-y += dev-usb-ohci.o
28 obj-y += dev-usb-ehci.o
30 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
31 diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
33 index 0000000..c6e472e
35 +++ b/arch/mips/bcm63xx/dev-enet.c
38 + * This file is subject to the terms and conditions of the GNU General Public
39 + * License. See the file "COPYING" in the main directory of this archive
42 + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
45 +#include <linux/init.h>
46 +#include <linux/kernel.h>
47 +#include <linux/platform_device.h>
48 +#include <bcm63xx_dev_enet.h>
49 +#include <bcm63xx_io.h>
50 +#include <bcm63xx_regs.h>
52 +static struct resource shared_res[] = {
54 + .start = -1, /* filled at runtime */
55 + .end = -1, /* filled at runtime */
56 + .flags = IORESOURCE_MEM,
60 +static struct platform_device bcm63xx_enet_shared_device = {
61 + .name = "bcm63xx_enet_shared",
63 + .num_resources = ARRAY_SIZE(shared_res),
64 + .resource = shared_res,
67 +static int shared_device_registered = 0;
69 +static struct resource enet0_res[] = {
71 + .start = -1, /* filled at runtime */
72 + .end = -1, /* filled at runtime */
73 + .flags = IORESOURCE_MEM,
76 + .start = -1, /* filled at runtime */
77 + .flags = IORESOURCE_IRQ,
80 + .start = -1, /* filled at runtime */
81 + .start = IRQ_ENET0_RXDMA,
82 + .flags = IORESOURCE_IRQ,
85 + .start = -1, /* filled at runtime */
86 + .start = IRQ_ENET0_TXDMA,
87 + .flags = IORESOURCE_IRQ,
91 +static struct bcm63xx_enet_platform_data enet0_pd;
93 +static struct platform_device bcm63xx_enet0_device = {
94 + .name = "bcm63xx_enet",
96 + .num_resources = ARRAY_SIZE(enet0_res),
97 + .resource = enet0_res,
99 + .platform_data = &enet0_pd,
103 +static struct resource enet1_res[] = {
105 + .start = -1, /* filled at runtime */
106 + .end = -1, /* filled at runtime */
107 + .flags = IORESOURCE_MEM,
110 + .start = -1, /* filled at runtime */
111 + .flags = IORESOURCE_IRQ,
114 + .start = -1, /* filled at runtime */
115 + .flags = IORESOURCE_IRQ,
118 + .start = -1, /* filled at runtime */
119 + .flags = IORESOURCE_IRQ,
123 +static struct bcm63xx_enet_platform_data enet1_pd;
125 +static struct platform_device bcm63xx_enet1_device = {
126 + .name = "bcm63xx_enet",
128 + .num_resources = ARRAY_SIZE(enet1_res),
129 + .resource = enet1_res,
131 + .platform_data = &enet1_pd,
135 +int __init bcm63xx_enet_register(int unit,
136 + const struct bcm63xx_enet_platform_data *pd)
138 + struct platform_device *pdev;
139 + struct bcm63xx_enet_platform_data *dpd;
145 + if (!shared_device_registered) {
146 + shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
147 + shared_res[0].end = shared_res[0].start;
148 + shared_res[0].end += RSET_ENETDMA_SIZE - 1;
150 + ret = platform_device_register(&bcm63xx_enet_shared_device);
153 + shared_device_registered = 1;
157 + enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0);
158 + enet0_res[0].end = enet0_res[0].start;
159 + enet0_res[0].end += RSET_ENET_SIZE - 1;
160 + enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0);
161 + enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA);
162 + enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA);
163 + pdev = &bcm63xx_enet0_device;
165 + enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1);
166 + enet1_res[0].end = enet1_res[0].start;
167 + enet1_res[0].end += RSET_ENET_SIZE - 1;
168 + enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1);
169 + enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA);
170 + enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA);
171 + pdev = &bcm63xx_enet1_device;
174 + /* copy given platform data */
175 + dpd = pdev->dev.platform_data;
176 + memcpy(dpd, pd, sizeof (*pd));
178 + /* adjust them in case internal phy is used */
179 + if (dpd->use_internal_phy) {
181 + /* internal phy only exists for enet0 */
186 + dpd->has_phy_interrupt = 1;
187 + dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
190 + ret = platform_device_register(pdev);
195 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
196 index bbd0e67..01f7e5a 100644
197 --- a/drivers/net/Kconfig
198 +++ b/drivers/net/Kconfig
199 @@ -1840,6 +1840,15 @@ config NE_H8300
200 Say Y here if you want to use the NE2000 compatible
201 controller on the Renesas H8/300 processor.
204 + tristate "Broadcom 63xx internal mac support"
209 + This driver supports the ethernet MACs in the Broadcom 63xx
210 + MIPS chipset family (BCM63XX).
212 source "drivers/net/fs_enet/Kconfig"
215 diff --git a/drivers/net/Makefile b/drivers/net/Makefile
216 index 284ed83..ab22f32 100644
217 --- a/drivers/net/Makefile
218 +++ b/drivers/net/Makefile
219 @@ -123,6 +123,7 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
220 obj-$(CONFIG_B44) += b44.o
221 obj-$(CONFIG_FORCEDETH) += forcedeth.o
222 obj-$(CONFIG_NE_H8300) += ne-h8300.o
223 +obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
224 obj-$(CONFIG_AX88796) += ax88796.o
226 obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
227 diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
229 index 0000000..40c2565
231 +++ b/drivers/net/bcm63xx_enet.c
234 + * Driver for BCM963xx builtin Ethernet mac
236 + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
238 + * This program is free software; you can redistribute it and/or modify
239 + * it under the terms of the GNU General Public License as published by
240 + * the Free Software Foundation; either version 2 of the License, or
241 + * (at your option) any later version.
243 + * This program is distributed in the hope that it will be useful,
244 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
245 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
246 + * GNU General Public License for more details.
248 + * You should have received a copy of the GNU General Public License
249 + * along with this program; if not, write to the Free Software
250 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
252 +#include <linux/init.h>
253 +#include <linux/module.h>
254 +#include <linux/clk.h>
255 +#include <linux/etherdevice.h>
256 +#include <linux/delay.h>
257 +#include <linux/ethtool.h>
258 +#include <linux/crc32.h>
259 +#include <linux/err.h>
260 +#include <linux/dma-mapping.h>
261 +#include <linux/platform_device.h>
263 +#include <bcm63xx_dev_enet.h>
264 +#include "bcm63xx_enet.h"
266 +static char bcm_enet_driver_name[] = "bcm63xx_enet";
267 +static char bcm_enet_driver_version[] = "1.0";
269 +static int copybreak __read_mostly = 128;
270 +module_param(copybreak, int, 0);
271 +MODULE_PARM_DESC(copybreak, "Receive copy threshold");
273 +/* io memory shared between all devices */
274 +static void __iomem *bcm_enet_shared_base;
277 + * io helpers to access mac registers
279 +static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
281 + return bcm_readl(priv->base + off);
284 +static inline void enet_writel(struct bcm_enet_priv *priv,
287 + bcm_writel(val, priv->base + off);
291 + * io helpers to access shared registers
293 +static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
295 + return bcm_readl(bcm_enet_shared_base + off);
298 +static inline void enet_dma_writel(struct bcm_enet_priv *priv,
301 + bcm_writel(val, bcm_enet_shared_base + off);
305 + * write given data into mii register and wait for transfer to end
306 + * with timeout (average measured transfer time is 25us)
308 +static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
312 + /* make sure mii interrupt status is cleared */
313 + enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
315 + enet_writel(priv, data, ENET_MIIDATA_REG);
318 + /* busy wait on mii interrupt bit, with timeout */
321 + if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
324 + } while (limit-- >= 0);
326 + return (limit < 0) ? 1 : 0;
330 + * MII internal read callback
332 +static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
337 + tmp = regnum << ENET_MIIDATA_REG_SHIFT;
338 + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
339 + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
340 + tmp |= ENET_MIIDATA_OP_READ_MASK;
342 + if (do_mdio_op(priv, tmp))
345 + val = enet_readl(priv, ENET_MIIDATA_REG);
351 + * MII internal write callback
353 +static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
354 + int regnum, u16 value)
358 + tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
359 + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
360 + tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
361 + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
362 + tmp |= ENET_MIIDATA_OP_WRITE_MASK;
364 + (void)do_mdio_op(priv, tmp);
369 + * MII read callback from phylib
371 +static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
374 + return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
378 + * MII write callback from phylib
380 +static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
381 + int regnum, u16 value)
383 + return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
387 + * MII read callback from mii core
389 +static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
392 + return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
396 + * MII write callback from mii core
398 +static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
399 + int regnum, int value)
401 + bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
407 +static int bcm_enet_refill_rx(struct net_device *dev)
409 + struct bcm_enet_priv *priv;
411 + priv = netdev_priv(dev);
413 + while (priv->rx_desc_count < priv->rx_ring_size) {
414 + struct bcm_enet_desc *desc;
415 + struct sk_buff *skb;
420 + desc_idx = priv->rx_dirty_desc;
421 + desc = &priv->rx_desc_cpu[desc_idx];
423 + if (!priv->rx_skb[desc_idx]) {
424 + skb = netdev_alloc_skb(dev, BCMENET_MAX_RX_SIZE);
427 + priv->rx_skb[desc_idx] = skb;
429 + p = dma_map_single(&priv->pdev->dev, skb->data,
430 + BCMENET_MAX_RX_SIZE,
435 + len_stat = BCMENET_MAX_RX_SIZE << DMADESC_LENGTH_SHIFT;
436 + len_stat |= DMADESC_OWNER_MASK;
437 + if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
438 + len_stat |= DMADESC_WRAP_MASK;
439 + priv->rx_dirty_desc = 0;
441 + priv->rx_dirty_desc++;
444 + desc->len_stat = len_stat;
446 + priv->rx_desc_count++;
448 + /* tell dma engine we allocated one buffer */
449 + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
452 + /* If rx ring is still empty, set a timer to try allocating
453 + * again at a later time. */
454 + if (priv->rx_desc_count == 0 && netif_running(dev)) {
455 + dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
456 + priv->rx_timeout.expires = jiffies + HZ;
457 + add_timer(&priv->rx_timeout);
464 + * timer callback to defer refill rx queue in case we're OOM
466 +static void bcm_enet_refill_rx_timer(unsigned long data)
468 + struct net_device *dev;
469 + struct bcm_enet_priv *priv;
471 + dev = (struct net_device *)data;
472 + priv = netdev_priv(dev);
474 + spin_lock(&priv->rx_lock);
475 + bcm_enet_refill_rx((struct net_device *)data);
476 + spin_unlock(&priv->rx_lock);
480 + * extract packet from rx queue
482 +static int bcm_enet_receive_queue(struct net_device *dev, int budget)
484 + struct bcm_enet_priv *priv;
485 + struct device *kdev;
488 + priv = netdev_priv(dev);
489 + kdev = &priv->pdev->dev;
492 + /* don't scan ring further than number of refilled
494 + if (budget > priv->rx_desc_count)
495 + budget = priv->rx_desc_count;
498 + struct bcm_enet_desc *desc;
499 + struct sk_buff *skb;
504 + desc_idx = priv->rx_curr_desc;
505 + desc = &priv->rx_desc_cpu[desc_idx];
507 + /* make sure we actually read the descriptor status at
511 + len_stat = desc->len_stat;
513 + /* break if dma ownership belongs to hw */
514 + if (len_stat & DMADESC_OWNER_MASK)
518 + priv->rx_curr_desc++;
519 + if (priv->rx_curr_desc == priv->rx_ring_size)
520 + priv->rx_curr_desc = 0;
521 + priv->rx_desc_count--;
523 + /* if the packet does not have start of packet _and_
524 + * end of packet flag set, then just recycle it */
525 + if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
526 + priv->stats.rx_dropped++;
530 + /* recycle packet if it's marked as bad */
531 + if (unlikely(len_stat & DMADESC_ERR_MASK)) {
532 + priv->stats.rx_errors++;
534 + if (len_stat & DMADESC_OVSIZE_MASK)
535 + priv->stats.rx_length_errors++;
536 + if (len_stat & DMADESC_CRC_MASK)
537 + priv->stats.rx_crc_errors++;
538 + if (len_stat & DMADESC_UNDER_MASK)
539 + priv->stats.rx_frame_errors++;
540 + if (len_stat & DMADESC_OV_MASK)
541 + priv->stats.rx_fifo_errors++;
546 + skb = priv->rx_skb[desc_idx];
547 + len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
548 + /* don't include FCS */
551 + if (len < copybreak) {
552 + struct sk_buff *nskb;
554 + nskb = netdev_alloc_skb(dev, len + 2);
556 + /* forget packet, just rearm desc */
557 + priv->stats.rx_dropped++;
561 + /* since we're copying the data, we can align
563 + skb_reserve(nskb, NET_IP_ALIGN);
564 + dma_sync_single_for_cpu(kdev, desc->address,
565 + len, DMA_FROM_DEVICE);
566 + memcpy(nskb->data, skb->data, len);
567 + dma_sync_single_for_device(kdev, desc->address,
568 + len, DMA_FROM_DEVICE);
571 + dma_unmap_single(&priv->pdev->dev, desc->address,
572 + BCMENET_MAX_RX_SIZE, DMA_FROM_DEVICE);
573 + priv->rx_skb[desc_idx] = NULL;
578 + skb->protocol = eth_type_trans(skb, dev);
579 + priv->stats.rx_packets++;
580 + priv->stats.rx_bytes += len;
581 + dev->last_rx = jiffies;
582 + netif_receive_skb(skb);
584 + } while (--budget > 0);
586 + if (processed || !priv->rx_desc_count) {
587 + bcm_enet_refill_rx(dev);
590 + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
591 + ENETDMA_CHANCFG_REG(priv->rx_chan));
599 + * try to or force reclaim of transmitted buffers
601 +static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
603 + struct bcm_enet_priv *priv;
606 + priv = netdev_priv(dev);
609 + while (priv->tx_desc_count < priv->tx_ring_size) {
610 + struct bcm_enet_desc *desc;
611 + struct sk_buff *skb;
613 + /* We run in a bh and fight against start_xmit, which
614 + * is called with bh disabled */
615 + spin_lock(&priv->tx_lock);
617 + desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
619 + if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
620 + spin_unlock(&priv->tx_lock);
624 + /* ensure other field of the descriptor were not read
625 + * before we checked ownership */
628 + skb = priv->tx_skb[priv->tx_dirty_desc];
629 + priv->tx_skb[priv->tx_dirty_desc] = NULL;
630 + dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
633 + priv->tx_dirty_desc++;
634 + if (priv->tx_dirty_desc == priv->tx_ring_size)
635 + priv->tx_dirty_desc = 0;
636 + priv->tx_desc_count++;
638 + spin_unlock(&priv->tx_lock);
640 + if (desc->len_stat & DMADESC_UNDER_MASK)
641 + priv->stats.tx_errors++;
643 + dev_kfree_skb(skb);
647 + if (netif_queue_stopped(dev) && released)
648 + netif_wake_queue(dev);
654 + * poll func, called by network core
656 +static int bcm_enet_poll(struct napi_struct *napi, int budget)
658 + struct bcm_enet_priv *priv;
659 + struct net_device *dev;
660 + int tx_work_done, rx_work_done;
662 + priv = container_of(napi, struct bcm_enet_priv, napi);
663 + dev = priv->net_dev;
665 + /* ack interrupts */
666 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
667 + ENETDMA_IR_REG(priv->rx_chan));
668 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
669 + ENETDMA_IR_REG(priv->tx_chan));
671 + /* reclaim sent skb */
672 + tx_work_done = bcm_enet_tx_reclaim(dev, 0);
674 + spin_lock(&priv->rx_lock);
675 + rx_work_done = bcm_enet_receive_queue(dev, budget);
676 + spin_unlock(&priv->rx_lock);
678 + if (rx_work_done >= budget || tx_work_done > 0) {
679 + /* rx/tx queue is not yet empty/clean */
680 + return rx_work_done;
683 + /* no more packet in rx/tx queue, remove device from poll
685 + __netif_rx_complete(dev, napi);
687 + /* restore rx/tx interrupt */
688 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
689 + ENETDMA_IRMASK_REG(priv->rx_chan));
690 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
691 + ENETDMA_IRMASK_REG(priv->tx_chan));
693 + return rx_work_done;
697 + * mac interrupt handler
699 +static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
701 + struct net_device *dev;
702 + struct bcm_enet_priv *priv;
706 + priv = netdev_priv(dev);
708 + stat = enet_readl(priv, ENET_IR_REG);
709 + if (!(stat & ENET_IR_MIB))
712 + /* clear & mask interrupt */
713 + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
714 + enet_writel(priv, 0, ENET_IRMASK_REG);
716 + /* read mib registers in workqueue */
717 + schedule_work(&priv->mib_update_task);
719 + return IRQ_HANDLED;
723 + * rx/tx dma interrupt handler
725 +static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
727 + struct net_device *dev;
728 + struct bcm_enet_priv *priv;
731 + priv = netdev_priv(dev);
733 + /* mask rx/tx interrupts */
734 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
735 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
737 + netif_rx_schedule(dev, &priv->napi);
739 + return IRQ_HANDLED;
743 + * tx request callback
745 +static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
747 + struct bcm_enet_priv *priv;
748 + struct bcm_enet_desc *desc;
752 + priv = netdev_priv(dev);
754 + /* lock against tx reclaim */
755 + spin_lock(&priv->tx_lock);
757 + /* make sure the tx hw queue is not full, should not happen
758 + * since we stop queue before it's the case */
759 + if (unlikely(!priv->tx_desc_count)) {
760 + netif_stop_queue(dev);
761 + dev_err(&priv->pdev->dev, "xmit called with no tx desc "
763 + ret = NETDEV_TX_BUSY;
767 + /* point to the next available desc */
768 + desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
769 + priv->tx_skb[priv->tx_curr_desc] = skb;
771 + /* fill descriptor */
772 + desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
775 + len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
776 + len_stat |= DMADESC_ESOP_MASK |
777 + DMADESC_APPEND_CRC |
778 + DMADESC_OWNER_MASK;
780 + priv->tx_curr_desc++;
781 + if (priv->tx_curr_desc == priv->tx_ring_size) {
782 + priv->tx_curr_desc = 0;
783 + len_stat |= DMADESC_WRAP_MASK;
785 + priv->tx_desc_count--;
787 + /* dma might be already polling, make sure we update desc
788 + * fields in correct order */
790 + desc->len_stat = len_stat;
794 + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
795 + ENETDMA_CHANCFG_REG(priv->tx_chan));
797 + /* stop queue if no more desc available */
798 + if (!priv->tx_desc_count)
799 + netif_stop_queue(dev);
801 + priv->stats.tx_bytes += skb->len;
802 + priv->stats.tx_packets++;
803 + dev->trans_start = jiffies;
804 + ret = NETDEV_TX_OK;
807 + spin_unlock(&priv->tx_lock);
812 + * Change the interface's mac address.
814 +static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
816 + struct bcm_enet_priv *priv;
817 + struct sockaddr *addr = p;
820 + priv = netdev_priv(dev);
821 + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
823 + /* use perfect match register 0 to store my mac address */
824 + val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
825 + (dev->dev_addr[4] << 8) | dev->dev_addr[5];
826 + enet_writel(priv, val, ENET_PML_REG(0));
828 + val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
829 + val |= ENET_PMH_DATAVALID_MASK;
830 + enet_writel(priv, val, ENET_PMH_REG(0));
836 + * Change rx mode (promiscous/allmulti) and update multicast list
838 +static void bcm_enet_set_multicast_list(struct net_device *dev)
840 + struct bcm_enet_priv *priv;
841 + struct dev_mc_list *mc_list;
845 + priv = netdev_priv(dev);
847 + val = enet_readl(priv, ENET_RXCFG_REG);
849 + if (dev->flags & IFF_PROMISC)
850 + val |= ENET_RXCFG_PROMISC_MASK;
852 + val &= ~ENET_RXCFG_PROMISC_MASK;
854 + /* only 3 perfect match registers left, first one is used for
855 + * own mac address */
856 + if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
857 + val |= ENET_RXCFG_ALLMCAST_MASK;
859 + val &= ~ENET_RXCFG_ALLMCAST_MASK;
861 + /* no need to set perfect match registers if we catch all
863 + if (val & ENET_RXCFG_ALLMCAST_MASK) {
864 + enet_writel(priv, val, ENET_RXCFG_REG);
868 + for (i = 0, mc_list = dev->mc_list;
869 + (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
870 + i++, mc_list = mc_list->next) {
874 + /* filter non ethernet address */
875 + if (mc_list->dmi_addrlen != 6)
878 + /* update perfect match registers */
879 + dmi_addr = mc_list->dmi_addr;
880 + tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
881 + (dmi_addr[4] << 8) | dmi_addr[5];
882 + enet_writel(priv, tmp, ENET_PML_REG(i + 1));
884 + tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
885 + tmp |= ENET_PMH_DATAVALID_MASK;
886 + enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
889 + for (; i < 3; i++) {
890 + enet_writel(priv, 0, ENET_PML_REG(i + 1));
891 + enet_writel(priv, 0, ENET_PMH_REG(i + 1));
894 + enet_writel(priv, val, ENET_RXCFG_REG);
898 + * set mac duplex parameters
900 +static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
904 + val = enet_readl(priv, ENET_TXCTL_REG);
906 + val |= ENET_TXCTL_FD_MASK;
908 + val &= ~ENET_TXCTL_FD_MASK;
909 + enet_writel(priv, val, ENET_TXCTL_REG);
913 + * set mac flow control parameters
915 +static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
919 + /* rx flow control (pause frame handling) */
920 + val = enet_readl(priv, ENET_RXCFG_REG);
922 + val |= ENET_RXCFG_ENFLOW_MASK;
924 + val &= ~ENET_RXCFG_ENFLOW_MASK;
925 + enet_writel(priv, val, ENET_RXCFG_REG);
927 + /* tx flow control (pause frame generation) */
928 + val = enet_dma_readl(priv, ENETDMA_CFG_REG);
930 + val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
932 + val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
933 + enet_dma_writel(priv, val, ENETDMA_CFG_REG);
937 + * link changed callback (from phylib)
939 +static void bcm_enet_adjust_phy_link(struct net_device *dev)
941 + struct bcm_enet_priv *priv;
942 + struct phy_device *phydev;
943 + int status_changed;
945 + priv = netdev_priv(dev);
946 + phydev = priv->phydev;
947 + status_changed = 0;
949 + if (priv->old_link != phydev->link) {
950 + status_changed = 1;
951 + priv->old_link = phydev->link;
954 + /* reflect duplex change in mac configuration */
955 + if (phydev->link && phydev->duplex != priv->old_duplex) {
956 + bcm_enet_set_duplex(priv,
957 + (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
958 + status_changed = 1;
959 + priv->old_duplex = phydev->duplex;
962 + /* enable flow control if remote advertise it (trust phylib to
963 + * check that duplex is full */
964 + if (phydev->link && phydev->pause != priv->old_pause) {
965 + int rx_pause_en, tx_pause_en;
967 + if (phydev->pause) {
968 + /* pause was advertised by lpa and us */
971 + } else if (!priv->pause_auto) {
972 + /* pause setting overrided by user */
973 + rx_pause_en = priv->pause_rx;
974 + tx_pause_en = priv->pause_tx;
980 + bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
981 + status_changed = 1;
982 + priv->old_pause = phydev->pause;
985 + if (status_changed) {
986 + pr_info("%s: link %s", dev->name, phydev->link ?
989 + printk(" - %d/%s - flow control %s", phydev->speed,
990 + DUPLEX_FULL == phydev->duplex ? "full" : "half",
991 + phydev->pause == 1 ? "rx&tx" : "off");
998 + * link changed callback (if phylib is not used)
1000 +static void bcm_enet_adjust_link(struct net_device *dev)
1002 + struct bcm_enet_priv *priv;
1004 + priv = netdev_priv(dev);
1005 + bcm_enet_set_duplex(priv, priv->force_duplex_full);
1006 + bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
1008 + pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
1010 + priv->force_speed_100 ? 100 : 10,
1011 + priv->force_duplex_full ? "full" : "half",
1012 + priv->pause_rx ? "rx" : "off",
1013 + priv->pause_tx ? "tx" : "off");
1017 + * open callback, allocate dma rings & buffers and start rx operation
1019 +static int bcm_enet_open(struct net_device *dev)
1021 + struct bcm_enet_priv *priv;
1022 + struct sockaddr addr;
1023 + struct device *kdev;
1024 + struct phy_device *phydev;
1025 + int irq_requested, i, ret;
1026 + unsigned int size;
1027 + char phy_id[BUS_ID_SIZE];
1031 + priv = netdev_priv(dev);
1032 + priv->rx_desc_cpu = priv->tx_desc_cpu = NULL;
1033 + priv->rx_skb = priv->tx_skb = NULL;
1035 + kdev = &priv->pdev->dev;
1037 + if (priv->has_phy) {
1038 + /* connect to PHY */
1039 + snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
1040 + priv->mac_id ? "1" : "0", priv->phy_id);
1042 + phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
1043 + PHY_INTERFACE_MODE_MII);
1045 + if (IS_ERR(phydev)) {
1046 + dev_err(kdev, "could not attach to PHY\n");
1047 + return PTR_ERR(phydev);
1050 + /* mask with MAC supported features */
1051 + phydev->supported &= (SUPPORTED_10baseT_Half |
1052 + SUPPORTED_10baseT_Full |
1053 + SUPPORTED_100baseT_Half |
1054 + SUPPORTED_100baseT_Full |
1055 + SUPPORTED_Autoneg |
1058 + phydev->advertising = phydev->supported;
1060 + if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
1061 + phydev->advertising |= SUPPORTED_Pause;
1063 + phydev->advertising &= ~SUPPORTED_Pause;
1065 + dev_info(kdev, "attached PHY at address %d [%s]\n",
1066 + phydev->addr, phydev->drv->name);
1068 + priv->old_link = 0;
1069 + priv->old_duplex = -1;
1070 + priv->old_pause = -1;
1071 + priv->phydev = phydev;
1074 + /* mask all interrupts and request them */
1075 + enet_writel(priv, 0, ENET_IRMASK_REG);
1076 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1077 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1079 + irq_requested = 0;
1080 + ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
1085 + ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
1086 + IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
1091 + ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
1092 + IRQF_DISABLED, dev->name, dev);
1097 + /* initialize perfect match registers */
1098 + for (i = 0; i < 4; i++) {
1099 + enet_writel(priv, 0, ENET_PML_REG(i));
1100 + enet_writel(priv, 0, ENET_PMH_REG(i));
1103 + /* write device mac address */
1104 + memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
1105 + bcm_enet_set_mac_address(dev, &addr);
1107 + /* allocate rx dma ring */
1108 + size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
1109 + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
1111 + dev_err(kdev, "cannot allocate rx ring %u\n", size);
1116 + memset(p, 0, size);
1117 + priv->rx_desc_alloc_size = size;
1118 + priv->rx_desc_cpu = p;
1120 + /* allocate tx dma ring */
1121 + size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
1122 + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
1124 + dev_err(kdev, "cannot allocate tx ring\n");
1129 + memset(p, 0, size);
1130 + priv->tx_desc_alloc_size = size;
1131 + priv->tx_desc_cpu = p;
1133 + priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
1135 + if (!priv->tx_skb) {
1136 + dev_err(kdev, "cannot allocate rx skb queue\n");
1141 + priv->tx_desc_count = priv->tx_ring_size;
1142 + priv->tx_dirty_desc = 0;
1143 + priv->tx_curr_desc = 0;
1144 + spin_lock_init(&priv->tx_lock);
1146 + /* init & fill rx ring with skbs */
1147 + priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
1149 + if (!priv->rx_skb) {
1150 + dev_err(kdev, "cannot allocate rx skb queue\n");
1155 + priv->rx_desc_count = 0;
1156 + priv->rx_dirty_desc = 0;
1157 + priv->rx_curr_desc = 0;
1159 + /* initialize flow control buffer allocation */
1160 + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1161 + ENETDMA_BUFALLOC_REG(priv->rx_chan));
1163 + if (bcm_enet_refill_rx(dev)) {
1164 + dev_err(kdev, "cannot allocate rx skb queue\n");
1169 + /* write rx & tx ring addresses */
1170 + enet_dma_writel(priv, priv->rx_desc_dma,
1171 + ENETDMA_RSTART_REG(priv->rx_chan));
1172 + enet_dma_writel(priv, priv->tx_desc_dma,
1173 + ENETDMA_RSTART_REG(priv->tx_chan));
1175 + /* clear remaining state ram for rx & tx channel */
1176 + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
1177 + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
1178 + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
1179 + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
1180 + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
1181 + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
1183 + /* set max rx/tx length */
1184 + enet_writel(priv, BCMENET_MAX_RX_SIZE, ENET_RXMAXLEN_REG);
1185 + enet_writel(priv, BCMENET_MAX_TX_SIZE, ENET_TXMAXLEN_REG);
1187 + /* set dma maximum burst len */
1188 + enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
1189 + ENETDMA_MAXBURST_REG(priv->rx_chan));
1190 + enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
1191 + ENETDMA_MAXBURST_REG(priv->tx_chan));
1193 + /* set correct transmit fifo watermark */
1194 + enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1196 + /* set flow control low/high threshold to 1/3 / 2/3 */
1197 + val = priv->rx_ring_size / 3;
1198 + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1199 + val = (priv->rx_ring_size * 2) / 3;
1200 + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1202 + /* all set, enable mac and interrupts, start dma engine and
1203 + * kick rx dma channel */
1205 + enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
1206 + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1207 + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
1208 + ENETDMA_CHANCFG_REG(priv->rx_chan));
1210 + /* watch "mib counters about to overflow" interrupt */
1211 + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1212 + enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1214 + /* watch "packet transferred" interrupt in rx and tx */
1215 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1216 + ENETDMA_IR_REG(priv->rx_chan));
1217 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1218 + ENETDMA_IR_REG(priv->tx_chan));
1220 + /* make sure we enable napi before rx interrupt */
1221 + napi_enable(&priv->napi);
1223 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1224 + ENETDMA_IRMASK_REG(priv->rx_chan));
1225 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
1226 + ENETDMA_IRMASK_REG(priv->tx_chan));
1228 + if (priv->has_phy)
1229 + phy_start(priv->phydev);
1231 + bcm_enet_adjust_link(dev);
1233 + netif_start_queue(dev);
1237 + phy_disconnect(priv->phydev);
1238 + if (irq_requested > 2)
1239 + free_irq(priv->irq_tx, dev);
1240 + if (irq_requested > 1)
1241 + free_irq(priv->irq_rx, dev);
1242 + if (irq_requested > 0)
1243 + free_irq(dev->irq, dev);
1244 + for (i = 0; i < priv->rx_ring_size; i++) {
1245 + struct bcm_enet_desc *desc;
1247 + if (!priv->rx_skb[i])
1250 + desc = &priv->rx_desc_cpu[i];
1251 + dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
1253 + kfree_skb(priv->rx_skb[i]);
1255 + if (priv->rx_desc_cpu)
1256 + dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1257 + priv->rx_desc_cpu, priv->rx_desc_dma);
1258 + if (priv->tx_desc_cpu)
1259 + dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1260 + priv->tx_desc_cpu, priv->tx_desc_dma);
1261 + kfree(priv->rx_skb);
1262 + kfree(priv->tx_skb);
1269 +static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1274 + val = enet_readl(priv, ENET_CTL_REG);
1275 + val |= ENET_CTL_DISABLE_MASK;
1276 + enet_writel(priv, val, ENET_CTL_REG);
1282 + val = enet_readl(priv, ENET_CTL_REG);
1283 + if (!(val & ENET_CTL_DISABLE_MASK))
1286 + } while (limit--);
1290 + * disable dma in given channel
1292 +static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1296 + enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
1302 + val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
1303 + if (!(val & ENETDMA_CHANCFG_EN_MASK))
1306 + } while (limit--);
1312 +static int bcm_enet_stop(struct net_device *dev)
1314 + struct bcm_enet_priv *priv;
1315 + struct device *kdev;
1318 + priv = netdev_priv(dev);
1319 + kdev = &priv->pdev->dev;
1321 + netif_stop_queue(dev);
1322 + napi_disable(&priv->napi);
1323 + if (priv->has_phy)
1324 + phy_stop(priv->phydev);
1325 + del_timer_sync(&priv->rx_timeout);
1327 + /* mask all interrupts */
1328 + enet_writel(priv, 0, ENET_IRMASK_REG);
1329 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1330 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1332 + /* make sure no mib update is scheduled */
1333 + flush_scheduled_work();
1335 + /* disable dma & mac */
1336 + bcm_enet_disable_dma(priv, priv->tx_chan);
1337 + bcm_enet_disable_dma(priv, priv->rx_chan);
1338 + bcm_enet_disable_mac(priv);
1340 + /* force reclaim of all tx buffers */
1341 + bcm_enet_tx_reclaim(dev, 1);
1343 + /* free the rx skb ring */
1344 + for (i = 0; i < priv->rx_ring_size; i++) {
1345 + struct bcm_enet_desc *desc;
1347 + if (!priv->rx_skb[i])
1350 + desc = &priv->rx_desc_cpu[i];
1351 + dma_unmap_single(kdev, desc->address, BCMENET_MAX_RX_SIZE,
1353 + kfree_skb(priv->rx_skb[i]);
1356 + /* free remaining allocated memory */
1357 + kfree(priv->rx_skb);
1358 + kfree(priv->tx_skb);
1359 + dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1360 + priv->rx_desc_cpu, priv->rx_desc_dma);
1361 + dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1362 + priv->tx_desc_cpu, priv->tx_desc_dma);
1363 + free_irq(priv->irq_tx, dev);
1364 + free_irq(priv->irq_rx, dev);
1365 + free_irq(dev->irq, dev);
1368 + if (priv->has_phy) {
1369 + phy_disconnect(priv->phydev);
1370 + priv->phydev = NULL;
1377 + * core request to return device rx/tx stats
1379 +static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
1381 + struct bcm_enet_priv *priv;
1383 + priv = netdev_priv(dev);
1384 + return &priv->stats;
1388 + * ethtool callbacks
1390 +struct bcm_enet_stats {
1391 + char stat_string[ETH_GSTRING_LEN];
1397 +#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1398 + offsetof(struct bcm_enet_priv, m)
1400 +static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1401 + { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
1402 + { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
1403 + { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
1404 + { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
1405 + { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
1406 + { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
1407 + { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
1408 + { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
1410 + { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1411 + { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1412 + { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1413 + { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1414 + { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1415 + { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1416 + { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1417 + { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1418 + { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1419 + { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1420 + { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1421 + { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1422 + { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1423 + { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1424 + { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1425 + { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1426 + { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1427 + { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1428 + { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1429 + { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1430 + { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1432 + { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1433 + { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1434 + { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1435 + { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1436 + { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1437 + { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1438 + { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1439 + { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1440 + { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1441 + { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1442 + { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1443 + { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1444 + { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1445 + { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1446 + { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1447 + { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1448 + { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1449 + { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1450 + { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1451 + { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1452 + { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1453 + { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1457 +#define BCM_ENET_STATS_LEN \
1458 + (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1460 +static const u32 unused_mib_regs[] = {
1461 + ETH_MIB_TX_ALL_OCTETS,
1462 + ETH_MIB_TX_ALL_PKTS,
1463 + ETH_MIB_RX_ALL_OCTETS,
1464 + ETH_MIB_RX_ALL_PKTS,
1468 +static void bcm_enet_get_drvinfo(struct net_device *netdev,
1469 + struct ethtool_drvinfo *drvinfo)
1471 + strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
1472 + strncpy(drvinfo->version, bcm_enet_driver_version, 32);
1473 + strncpy(drvinfo->fw_version, "N/A", 32);
1474 + strncpy(drvinfo->bus_info, "bcm63xx", 32);
1475 + drvinfo->n_stats = BCM_ENET_STATS_LEN;
1478 +static int bcm_enet_get_stats_count(struct net_device *netdev)
1480 + return BCM_ENET_STATS_LEN;
1483 +static void bcm_enet_get_strings(struct net_device *netdev,
1484 + u32 stringset, u8 *data)
1488 + switch (stringset) {
1489 + case ETH_SS_STATS:
1490 + for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1491 + memcpy(data + i * ETH_GSTRING_LEN,
1492 + bcm_enet_gstrings_stats[i].stat_string,
1499 +static void update_mib_counters(struct bcm_enet_priv *priv)
1503 + for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1504 + const struct bcm_enet_stats *s;
1508 + s = &bcm_enet_gstrings_stats[i];
1509 + if (s->mib_reg == -1)
1512 + val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1513 + p = (char *)priv + s->stat_offset;
1515 + if (s->sizeof_stat == sizeof(u64))
1521 + /* also empty unused mib counters to make sure mib counter
1522 + * overflow interrupt is cleared */
1523 + for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1524 + (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1527 +static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1529 + struct bcm_enet_priv *priv;
1531 + priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1532 + mutex_lock(&priv->mib_update_lock);
1533 + update_mib_counters(priv);
1534 + mutex_unlock(&priv->mib_update_lock);
1536 + /* reenable mib interrupt */
1537 + if (netif_running(priv->net_dev))
1538 + enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1541 +static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1542 + struct ethtool_stats *stats,
1545 + struct bcm_enet_priv *priv;
1548 + priv = netdev_priv(netdev);
1550 + mutex_lock(&priv->mib_update_lock);
1551 + update_mib_counters(priv);
1553 + for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1554 + const struct bcm_enet_stats *s;
1557 + s = &bcm_enet_gstrings_stats[i];
1558 + p = (char *)priv + s->stat_offset;
1559 + data[i] = (s->sizeof_stat == sizeof(u64)) ?
1560 + *(u64 *)p : *(u32 *)p;
1562 + mutex_unlock(&priv->mib_update_lock);
1565 +static int bcm_enet_get_settings(struct net_device *dev,
1566 + struct ethtool_cmd *cmd)
1568 + struct bcm_enet_priv *priv;
1570 + priv = netdev_priv(dev);
1572 + cmd->maxrxpkt = 0;
1573 + cmd->maxtxpkt = 0;
1575 + if (priv->has_phy) {
1576 + if (!priv->phydev)
1578 + return phy_ethtool_gset(priv->phydev, cmd);
1581 + cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
1582 + cmd->duplex = (priv->force_duplex_full) ?
1583 + DUPLEX_FULL : DUPLEX_HALF;
1584 + cmd->supported = ADVERTISED_10baseT_Half |
1585 + ADVERTISED_10baseT_Full |
1586 + ADVERTISED_100baseT_Half |
1587 + ADVERTISED_100baseT_Full;
1588 + cmd->advertising = 0;
1589 + cmd->port = PORT_MII;
1590 + cmd->transceiver = XCVR_EXTERNAL;
1595 +static int bcm_enet_set_settings(struct net_device *dev,
1596 + struct ethtool_cmd *cmd)
1598 + struct bcm_enet_priv *priv;
1600 + priv = netdev_priv(dev);
1601 + if (priv->has_phy) {
1602 + if (!priv->phydev)
1604 + return phy_ethtool_sset(priv->phydev, cmd);
1607 + if (cmd->autoneg ||
1608 + (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1609 + cmd->port != PORT_MII)
1612 + priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1613 + priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1615 + if (netif_running(dev))
1616 + bcm_enet_adjust_link(dev);
1621 +static void bcm_enet_get_ringparam(struct net_device *dev,
1622 + struct ethtool_ringparam *ering)
1624 + struct bcm_enet_priv *priv;
1626 + priv = netdev_priv(dev);
1628 + /* rx/tx ring is actually only limited by memory */
1629 + ering->rx_max_pending = 8192;
1630 + ering->tx_max_pending = 8192;
1631 + ering->rx_mini_max_pending = 0;
1632 + ering->rx_jumbo_max_pending = 0;
1633 + ering->rx_pending = priv->rx_ring_size;
1634 + ering->tx_pending = priv->tx_ring_size;
1637 +static int bcm_enet_set_ringparam(struct net_device *dev,
1638 + struct ethtool_ringparam *ering)
1640 + struct bcm_enet_priv *priv;
1643 + priv = netdev_priv(dev);
1646 + if (netif_running(dev)) {
1647 + bcm_enet_stop(dev);
1651 + priv->rx_ring_size = ering->rx_pending;
1652 + priv->tx_ring_size = ering->tx_pending;
1654 + if (was_running) {
1657 + err = bcm_enet_open(dev);
1661 + bcm_enet_set_multicast_list(dev);
1666 +static void bcm_enet_get_pauseparam(struct net_device *dev,
1667 + struct ethtool_pauseparam *ecmd)
1669 + struct bcm_enet_priv *priv;
1671 + priv = netdev_priv(dev);
1672 + ecmd->autoneg = priv->pause_auto;
1673 + ecmd->rx_pause = priv->pause_rx;
1674 + ecmd->tx_pause = priv->pause_tx;
1677 +static int bcm_enet_set_pauseparam(struct net_device *dev,
1678 + struct ethtool_pauseparam *ecmd)
1680 + struct bcm_enet_priv *priv;
1682 + priv = netdev_priv(dev);
1684 + if (priv->has_phy) {
1685 + if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1686 + /* asymetric pause mode not supported,
1687 + * actually possible but integrated PHY has RO
1688 + * asym_pause bit */
1692 + /* no pause autoneg on direct mii connection */
1693 + if (ecmd->autoneg)
1697 + priv->pause_auto = ecmd->autoneg;
1698 + priv->pause_rx = ecmd->rx_pause;
1699 + priv->pause_tx = ecmd->tx_pause;
1704 +static struct ethtool_ops bcm_enet_ethtool_ops = {
1705 + .get_strings = bcm_enet_get_strings,
1706 + .get_stats_count = bcm_enet_get_stats_count,
1707 + .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1708 + .get_settings = bcm_enet_get_settings,
1709 + .set_settings = bcm_enet_set_settings,
1710 + .get_drvinfo = bcm_enet_get_drvinfo,
1711 + .get_link = ethtool_op_get_link,
1712 + .get_ringparam = bcm_enet_get_ringparam,
1713 + .set_ringparam = bcm_enet_set_ringparam,
1714 + .get_pauseparam = bcm_enet_get_pauseparam,
1715 + .set_pauseparam = bcm_enet_set_pauseparam,
1718 +static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1720 + struct bcm_enet_priv *priv;
1722 + priv = netdev_priv(dev);
1723 + if (priv->has_phy) {
1724 + if (!priv->phydev)
1726 + return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1728 + struct mii_if_info mii;
1731 + mii.mdio_read = bcm_enet_mdio_read_mii;
1732 + mii.mdio_write = bcm_enet_mdio_write_mii;
1734 + mii.phy_id_mask = 0x3f;
1735 + mii.reg_num_mask = 0x1f;
1736 + return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1741 + * preinit hardware to allow mii operation while device is down
1743 +static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1748 + /* make sure mac is disabled */
1749 + bcm_enet_disable_mac(priv);
1751 + /* soft reset mac */
1752 + val = ENET_CTL_SRESET_MASK;
1753 + enet_writel(priv, val, ENET_CTL_REG);
1758 + val = enet_readl(priv, ENET_CTL_REG);
1759 + if (!(val & ENET_CTL_SRESET_MASK))
1762 + } while (limit--);
1764 + /* select correct mii interface */
1765 + val = enet_readl(priv, ENET_CTL_REG);
1766 + if (priv->use_external_mii)
1767 + val |= ENET_CTL_EPHYSEL_MASK;
1769 + val &= ~ENET_CTL_EPHYSEL_MASK;
1770 + enet_writel(priv, val, ENET_CTL_REG);
1772 + /* turn on mdc clock */
1773 + enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1774 + ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1776 + /* set mib counters to self-clear when read */
1777 + val = enet_readl(priv, ENET_MIBCTL_REG);
1778 + val |= ENET_MIBCTL_RDCLEAR_MASK;
1779 + enet_writel(priv, val, ENET_MIBCTL_REG);
1783 + * allocate netdevice, request register memory and register device.
1785 +static int __devinit bcm_enet_probe(struct platform_device *pdev)
1787 + struct bcm_enet_priv *priv;
1788 + struct net_device *dev;
1789 + struct bcm63xx_enet_platform_data *pd;
1790 + struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1791 + struct mii_bus *bus;
1792 + const char *clk_name;
1793 + unsigned int iomem_size;
1794 + int i, ret, mdio_registered, mem_requested;
1796 + /* stop if shared driver failed, assume driver->probe will be
1797 + * called in the same order we register devices (correct ?) */
1798 + if (!bcm_enet_shared_base)
1801 + mdio_registered = mem_requested = 0;
1803 + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1804 + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1805 + res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1806 + res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1807 + if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1811 + dev = alloc_etherdev(sizeof(*priv));
1814 + priv = netdev_priv(dev);
1815 + memset(priv, 0, sizeof(*priv));
1817 + iomem_size = res_mem->end - res_mem->start + 1;
1818 + if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1822 + mem_requested = 1;
1824 + priv->base = ioremap(res_mem->start, iomem_size);
1825 + if (priv->base == NULL) {
1829 + dev->irq = priv->irq = res_irq->start;
1830 + priv->irq_rx = res_irq_rx->start;
1831 + priv->irq_tx = res_irq_tx->start;
1832 + priv->mac_id = pdev->id;
1834 + /* get rx & tx dma channel id for this mac */
1835 + if (priv->mac_id == 0) {
1836 + priv->rx_chan = 0;
1837 + priv->tx_chan = 1;
1838 + clk_name = "enet0";
1840 + priv->rx_chan = 2;
1841 + priv->tx_chan = 3;
1842 + clk_name = "enet1";
1845 + priv->mac_clk = clk_get(&pdev->dev, clk_name);
1846 + if (IS_ERR(priv->mac_clk)) {
1847 + ret = PTR_ERR(priv->mac_clk);
1848 + priv->mac_clk = NULL;
1851 + clk_enable(priv->mac_clk);
1853 + /* initialize default and fetch platform data */
1854 + priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1855 + priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1857 + pd = pdev->dev.platform_data;
1859 + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1860 + priv->has_phy = pd->has_phy;
1861 + priv->phy_id = pd->phy_id;
1862 + priv->has_phy_interrupt = pd->has_phy_interrupt;
1863 + priv->phy_interrupt = pd->phy_interrupt;
1864 + priv->use_external_mii = !pd->use_internal_phy;
1865 + priv->pause_auto = pd->pause_auto;
1866 + priv->pause_rx = pd->pause_rx;
1867 + priv->pause_tx = pd->pause_tx;
1868 + priv->force_duplex_full = pd->force_duplex_full;
1869 + priv->force_speed_100 = pd->force_speed_100;
1872 + if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1873 + /* using internal PHY, enable clock */
1874 + priv->phy_clk = clk_get(&pdev->dev, "ephy");
1875 + if (IS_ERR(priv->phy_clk)) {
1876 + ret = PTR_ERR(priv->phy_clk);
1877 + priv->phy_clk = NULL;
1880 + clk_enable(priv->phy_clk);
1883 + /* do minimal hardware init to be able to probe mii bus */
1884 + bcm_enet_hw_preinit(priv);
1886 + /* MII bus registration */
1887 + if (priv->has_phy) {
1888 + bus = &priv->mii_bus;
1889 + bus->name = "bcm63xx_enet MII bus";
1890 + bus->dev = &pdev->dev;
1892 + bus->read = bcm_enet_mdio_read_phylib;
1893 + bus->write = bcm_enet_mdio_write_phylib;
1894 + sprintf(bus->id, "%d", priv->mac_id);
1896 + /* only probe bus where we think the PHY is, because
1897 + * the mdio read operation return 0 instead of 0xffff
1898 + * if a slave is not present on hw */
1899 + bus->phy_mask = ~(1 << priv->phy_id);
1901 + bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1907 + if (priv->has_phy_interrupt)
1908 + bus->irq[priv->phy_id] = priv->phy_interrupt;
1910 + bus->irq[priv->phy_id] = PHY_POLL;
1912 + ret = mdiobus_register(bus);
1914 + dev_err(&pdev->dev, "unable to register mdio bus\n");
1917 + mdio_registered = 1;
1920 + /* run platform code to initialize PHY device */
1921 + if (pd->mii_config &&
1922 + pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1923 + bcm_enet_mdio_write_mii)) {
1924 + dev_err(&pdev->dev, "unable to configure mdio bus\n");
1929 + spin_lock_init(&priv->rx_lock);
1931 + /* init rx timeout (used for oom) */
1932 + init_timer(&priv->rx_timeout);
1933 + priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1934 + priv->rx_timeout.data = (unsigned long)dev;
1936 + /* init the mib update lock&work */
1937 + mutex_init(&priv->mib_update_lock);
1938 + INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1940 + /* zero mib counters */
1941 + for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1942 + enet_writel(priv, 0, ENET_MIB_REG(i));
1944 + /* register netdevice */
1945 + dev->open = bcm_enet_open;
1946 + dev->stop = bcm_enet_stop;
1947 + dev->hard_start_xmit = bcm_enet_start_xmit;
1948 + dev->get_stats = bcm_enet_get_stats;
1949 + dev->set_mac_address = bcm_enet_set_mac_address;
1950 + dev->set_multicast_list = bcm_enet_set_multicast_list;
1951 + netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1952 + dev->do_ioctl = bcm_enet_ioctl;
1953 +#ifdef CONFIG_NET_POLL_CONTROLLER
1954 + dev->poll_controller = bcm_enet_netpoll;
1957 + SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1959 + ret = register_netdev(dev);
1963 + platform_set_drvdata(pdev, dev);
1964 + priv->pdev = pdev;
1965 + priv->net_dev = dev;
1966 + SET_NETDEV_DEV(dev, &pdev->dev);
1971 + if (mem_requested)
1972 + release_mem_region(res_mem->start, iomem_size);
1973 + if (mdio_registered)
1974 + mdiobus_unregister(&priv->mii_bus);
1975 + kfree(priv->mii_bus.irq);
1976 + if (priv->mac_clk) {
1977 + clk_disable(priv->mac_clk);
1978 + clk_put(priv->mac_clk);
1980 + if (priv->phy_clk) {
1981 + clk_disable(priv->phy_clk);
1982 + clk_put(priv->phy_clk);
1985 + /* turn off mdc clock */
1986 + enet_writel(priv, 0, ENET_MIISC_REG);
1987 + iounmap(priv->base);
1995 + * exit func, stops hardware and unregisters netdevice
1997 +static int __devexit bcm_enet_remove(struct platform_device *pdev)
1999 + struct bcm_enet_priv *priv;
2000 + struct net_device *dev;
2001 + struct resource *res;
2003 + /* stop netdevice */
2004 + dev = platform_get_drvdata(pdev);
2005 + priv = netdev_priv(dev);
2006 + unregister_netdev(dev);
2008 + /* turn off mdc clock */
2009 + enet_writel(priv, 0, ENET_MIISC_REG);
2011 + if (priv->has_phy) {
2012 + mdiobus_unregister(&priv->mii_bus);
2013 + kfree(priv->mii_bus.irq);
2015 + struct bcm63xx_enet_platform_data *pd;
2017 + pd = pdev->dev.platform_data;
2018 + if (pd && pd->mii_config)
2019 + pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
2020 + bcm_enet_mdio_write_mii);
2023 + /* release device resources */
2024 + iounmap(priv->base);
2025 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2026 + release_mem_region(res->start, res->end - res->start + 1);
2028 + /* disable hw block clocks */
2029 + if (priv->phy_clk) {
2030 + clk_disable(priv->phy_clk);
2031 + clk_put(priv->phy_clk);
2033 + clk_disable(priv->mac_clk);
2034 + clk_put(priv->mac_clk);
2040 +struct platform_driver bcm63xx_enet_driver = {
2041 + .probe = bcm_enet_probe,
2042 + .remove = __devexit_p(bcm_enet_remove),
2044 + .name = "bcm63xx_enet",
2045 + .owner = THIS_MODULE,
2050 + * reserve & remap memory space shared between all macs
2052 +static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
2054 + struct resource *res;
2055 + unsigned int iomem_size;
2057 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2061 + iomem_size = res->end - res->start + 1;
2062 + if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
2065 + bcm_enet_shared_base = ioremap(res->start, iomem_size);
2066 + if (!bcm_enet_shared_base) {
2067 + release_mem_region(res->start, iomem_size);
2073 +static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
2075 + struct resource *res;
2077 + iounmap(bcm_enet_shared_base);
2078 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2079 + release_mem_region(res->start, res->end - res->start + 1);
2084 + * this "shared" driver is needed because both macs share a single
2087 +struct platform_driver bcm63xx_enet_shared_driver = {
2088 + .probe = bcm_enet_shared_probe,
2089 + .remove = __devexit_p(bcm_enet_shared_remove),
2091 + .name = "bcm63xx_enet_shared",
2092 + .owner = THIS_MODULE,
2099 +static int __init bcm_enet_init(void)
2103 + ret = platform_driver_register(&bcm63xx_enet_shared_driver);
2107 + ret = platform_driver_register(&bcm63xx_enet_driver);
2109 + platform_driver_unregister(&bcm63xx_enet_shared_driver);
2114 +static void __exit bcm_enet_exit(void)
2116 + platform_driver_unregister(&bcm63xx_enet_driver);
2117 + platform_driver_unregister(&bcm63xx_enet_shared_driver);
2121 +module_init(bcm_enet_init);
2122 +module_exit(bcm_enet_exit);
2124 +MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2125 +MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2126 +MODULE_LICENSE("GPL");
2127 diff --git a/drivers/net/bcm63xx_enet.h b/drivers/net/bcm63xx_enet.h
2128 new file mode 100644
2129 index 0000000..fe7ffc1
2131 +++ b/drivers/net/bcm63xx_enet.h
2133 +#ifndef BCM63XX_ENET_H_
2134 +#define BCM63XX_ENET_H_
2136 +#include <linux/types.h>
2137 +#include <linux/mii.h>
2138 +#include <linux/mutex.h>
2139 +#include <linux/phy.h>
2140 +#include <linux/platform_device.h>
2142 +#include <bcm63xx_regs.h>
2143 +#include <bcm63xx_irq.h>
2144 +#include <bcm63xx_io.h>
2146 +/* default number of descriptor */
2147 +#define BCMENET_DEF_RX_DESC 64
2148 +#define BCMENET_DEF_TX_DESC 32
2150 +/* maximum burst len for dma (4 bytes unit) */
2151 +#define BCMENET_DMA_MAXBURST 16
2153 +/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
2154 + * must be low enough so that a DMA transfer of above burst length can
2155 + * not overflow the fifo */
2156 +#define BCMENET_TX_FIFO_TRESH 32
2158 +/* maximum rx/tx packet size */
2159 +#define BCMENET_MAX_RX_SIZE (ETH_FRAME_LEN + 4)
2160 +#define BCMENET_MAX_TX_SIZE (ETH_FRAME_LEN + 4)
2163 + * rx/tx dma descriptor
2165 +struct bcm_enet_desc {
2170 +#define DMADESC_LENGTH_SHIFT 16
2171 +#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
2172 +#define DMADESC_OWNER_MASK (1 << 15)
2173 +#define DMADESC_EOP_MASK (1 << 14)
2174 +#define DMADESC_SOP_MASK (1 << 13)
2175 +#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
2176 +#define DMADESC_WRAP_MASK (1 << 12)
2178 +#define DMADESC_UNDER_MASK (1 << 9)
2179 +#define DMADESC_APPEND_CRC (1 << 8)
2180 +#define DMADESC_OVSIZE_MASK (1 << 4)
2181 +#define DMADESC_RXER_MASK (1 << 2)
2182 +#define DMADESC_CRC_MASK (1 << 1)
2183 +#define DMADESC_OV_MASK (1 << 0)
2184 +#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
2185 + DMADESC_OVSIZE_MASK | \
2186 + DMADESC_RXER_MASK | \
2187 + DMADESC_CRC_MASK | \
2192 + * MIB Counters register definitions
2194 +#define ETH_MIB_TX_GD_OCTETS 0
2195 +#define ETH_MIB_TX_GD_PKTS 1
2196 +#define ETH_MIB_TX_ALL_OCTETS 2
2197 +#define ETH_MIB_TX_ALL_PKTS 3
2198 +#define ETH_MIB_TX_BRDCAST 4
2199 +#define ETH_MIB_TX_MULT 5
2200 +#define ETH_MIB_TX_64 6
2201 +#define ETH_MIB_TX_65_127 7
2202 +#define ETH_MIB_TX_128_255 8
2203 +#define ETH_MIB_TX_256_511 9
2204 +#define ETH_MIB_TX_512_1023 10
2205 +#define ETH_MIB_TX_1024_MAX 11
2206 +#define ETH_MIB_TX_JAB 12
2207 +#define ETH_MIB_TX_OVR 13
2208 +#define ETH_MIB_TX_FRAG 14
2209 +#define ETH_MIB_TX_UNDERRUN 15
2210 +#define ETH_MIB_TX_COL 16
2211 +#define ETH_MIB_TX_1_COL 17
2212 +#define ETH_MIB_TX_M_COL 18
2213 +#define ETH_MIB_TX_EX_COL 19
2214 +#define ETH_MIB_TX_LATE 20
2215 +#define ETH_MIB_TX_DEF 21
2216 +#define ETH_MIB_TX_CRS 22
2217 +#define ETH_MIB_TX_PAUSE 23
2219 +#define ETH_MIB_RX_GD_OCTETS 32
2220 +#define ETH_MIB_RX_GD_PKTS 33
2221 +#define ETH_MIB_RX_ALL_OCTETS 34
2222 +#define ETH_MIB_RX_ALL_PKTS 35
2223 +#define ETH_MIB_RX_BRDCAST 36
2224 +#define ETH_MIB_RX_MULT 37
2225 +#define ETH_MIB_RX_64 38
2226 +#define ETH_MIB_RX_65_127 39
2227 +#define ETH_MIB_RX_128_255 40
2228 +#define ETH_MIB_RX_256_511 41
2229 +#define ETH_MIB_RX_512_1023 42
2230 +#define ETH_MIB_RX_1024_MAX 43
2231 +#define ETH_MIB_RX_JAB 44
2232 +#define ETH_MIB_RX_OVR 45
2233 +#define ETH_MIB_RX_FRAG 46
2234 +#define ETH_MIB_RX_DROP 47
2235 +#define ETH_MIB_RX_CRC_ALIGN 48
2236 +#define ETH_MIB_RX_UND 49
2237 +#define ETH_MIB_RX_CRC 50
2238 +#define ETH_MIB_RX_ALIGN 51
2239 +#define ETH_MIB_RX_SYM 52
2240 +#define ETH_MIB_RX_PAUSE 53
2241 +#define ETH_MIB_RX_CNTRL 54
2244 +struct bcm_enet_mib_counters {
2247 + u32 tx_all_octets;
2271 + u32 rx_all_octets;
2295 +struct bcm_enet_priv {
2297 + /* mac id (from platform device id) */
2300 + /* base remapped address of device */
2301 + void __iomem *base;
2303 + /* mac irq, rx_dma irq, tx_dma irq */
2308 + /* hw view of rx & tx dma ring */
2309 + dma_addr_t rx_desc_dma;
2310 + dma_addr_t tx_desc_dma;
2312 + /* allocated size (in bytes) for rx & tx dma ring */
2313 + unsigned int rx_desc_alloc_size;
2314 + unsigned int tx_desc_alloc_size;
2317 + struct napi_struct napi;
2319 + /* dma channel id for rx */
2322 + /* number of dma desc in rx ring */
2325 + /* cpu view of rx dma ring */
2326 + struct bcm_enet_desc *rx_desc_cpu;
2328 + /* current number of armed descriptor given to hardware for rx */
2329 + int rx_desc_count;
2331 + /* next rx descriptor to fetch from hardware */
2334 + /* next dirty rx descriptor to refill */
2335 + int rx_dirty_desc;
2337 + /* list of skb given to hw for rx */
2338 + struct sk_buff **rx_skb;
2340 + /* used when rx skb allocation failed, so we defer rx queue
2342 + struct timer_list rx_timeout;
2344 + /* lock rx_timeout against rx normal operation */
2345 + spinlock_t rx_lock;
2348 + /* dma channel id for tx */
2351 + /* number of dma desc in tx ring */
2354 + /* cpu view of rx dma ring */
2355 + struct bcm_enet_desc *tx_desc_cpu;
2357 + /* number of available descriptor for tx */
2358 + int tx_desc_count;
2360 + /* next tx descriptor avaiable */
2363 + /* next dirty tx descriptor to reclaim */
2364 + int tx_dirty_desc;
2366 + /* list of skb given to hw for tx */
2367 + struct sk_buff **tx_skb;
2369 + /* lock used by tx reclaim and xmit */
2370 + spinlock_t tx_lock;
2373 + /* set if internal phy is ignored and external mii interface
2375 + int use_external_mii;
2377 + /* set if a phy is connected, phy address must be known,
2378 + * probing is not possible */
2382 + /* set if connected phy has an associated irq */
2383 + int has_phy_interrupt;
2384 + int phy_interrupt;
2386 + /* used when a phy is connected (phylib used) */
2387 + struct mii_bus mii_bus;
2388 + struct phy_device *phydev;
2393 + /* used when no phy is connected */
2394 + int force_speed_100;
2395 + int force_duplex_full;
2397 + /* pause parameters */
2403 + struct net_device_stats stats;
2404 + struct bcm_enet_mib_counters mib;
2406 + /* after mib interrupt, mib registers update is done in this
2408 + struct work_struct mib_update_task;
2410 + /* lock mib update between userspace request and workqueue */
2411 + struct mutex mib_update_lock;
2414 + struct clk *mac_clk;
2416 + /* phy clock if internal phy is used */
2417 + struct clk *phy_clk;
2419 + /* network device reference */
2420 + struct net_device *net_dev;
2422 + /* platform device reference */
2423 + struct platform_device *pdev;
2426 +#endif /* ! BCM63XX_ENET_H_ */
2427 diff --git a/include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h b/include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h
2428 new file mode 100644
2429 index 0000000..d53f611
2431 +++ b/include/asm-mips/mach-bcm63xx/bcm63xx_dev_enet.h
2433 +#ifndef BCM63XX_DEV_ENET_H_
2434 +#define BCM63XX_DEV_ENET_H_
2436 +#include <linux/if_ether.h>
2437 +#include <linux/init.h>
2440 + * on board ethernet platform data
2442 +struct bcm63xx_enet_platform_data {
2443 + char mac_addr[ETH_ALEN];
2447 + /* if has_phy, then set use_internal_phy */
2448 + int use_internal_phy;
2450 + /* or fill phy info to use an external one */
2452 + int has_phy_interrupt;
2453 + int phy_interrupt;
2455 + /* if has_phy, use autonegociated pause parameters or force
2461 + /* if !has_phy, set desired forced speed/duplex */
2462 + int force_speed_100;
2463 + int force_duplex_full;
2465 + /* if !has_phy, set callback to perform mii device
2467 + int (*mii_config)(struct net_device *dev, int probe,
2468 + int (*mii_read)(struct net_device *dev,
2469 + int phy_id, int reg),
2470 + void (*mii_write)(struct net_device *dev,
2471 + int phy_id, int reg, int val));
2474 +int __init bcm63xx_enet_register(int unit,
2475 + const struct bcm63xx_enet_platform_data *pd);
2477 +#endif /* ! BCM63XX_DEV_ENET_H_ */