1 --- a/drivers/net/Kconfig
2 +++ b/drivers/net/Kconfig
3 @@ -2071,6 +2071,14 @@ config ACENIC_OMIT_TIGON_I
5 The safe and default value for this is N.
8 + tristate "Cavium CNS3xxx Ethernet support"
9 + depends on ARCH_CNS3XXX
12 + Say Y here if you want to use built-in Ethernet ports
13 + on CNS3XXX processor.
16 tristate "DL2000/TC902x-based Gigabit Ethernet support"
18 --- a/drivers/net/Makefile
19 +++ b/drivers/net/Makefile
20 @@ -240,6 +240,7 @@ obj-$(CONFIG_MAC89x0) += mac89x0.o
21 obj-$(CONFIG_TUN) += tun.o
22 obj-$(CONFIG_VETH) += veth.o
23 obj-$(CONFIG_NET_NETX) += netx-eth.o
24 +obj-$(CONFIG_CNS3XXX_ETH) += cns3xxx_eth.o
25 obj-$(CONFIG_DL2K) += dl2k.o
26 obj-$(CONFIG_R8169) += r8169.o
27 obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
29 +++ b/drivers/net/cns3xxx_eth.c
32 + * Cavium CNS3xxx Gigabit driver for Linux
34 + * Copyright 2011 Gateworks Corporation
35 + * Chris Lang <clang@gateworks.com>
37 + * This program is free software; you can redistribute it and/or modify it
38 + * under the terms of version 2 of the GNU General Public License
39 + * as published by the Free Software Foundation.
43 +#include <linux/delay.h>
44 +#include <linux/dma-mapping.h>
45 +#include <linux/dmapool.h>
46 +#include <linux/etherdevice.h>
47 +#include <linux/io.h>
48 +#include <linux/kernel.h>
49 +#include <linux/phy.h>
50 +#include <linux/platform_device.h>
51 +#include <linux/skbuff.h>
52 +#include <mach/hardware.h>
54 +#define DRV_NAME "cns3xxx_eth"
58 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
60 +#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
61 +#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
62 +#define REGS_SIZE 0x150
65 +#define NAPI_WEIGHT 64
67 +/* Port Config Defines */
68 +#define PORT_DISABLE (1 << 18)
70 +/* DMA AUTO Poll Defines */
71 +#define TS_POLL_EN (1 << 5)
72 +#define TS_SUSPEND (1 << 4)
73 +#define FS_POLL_EN (1 << 1)
74 +#define FS_SUSPEND (1 << 0)
78 + u32 sdp; // segment data pointer
82 + u32 sdl:16; // segment data length
86 + u32 rsv_1:3; // reserve
88 + u32 fp:1; // force priority
128 + u8 alignment[16]; // for alignment 32 byte
133 + u32 sdp; // segment data pointer
137 + u32 sdl:16; // segment data length
182 + u8 alignment[16]; // for alignment 32 byte
186 +struct switch_regs {
187 + u32 phy_control; /* 000 */
191 + u32 mac_pri_ctrl[5], __res;
194 + u32 prio_etype_udp;
195 + u32 prio_ipdscp[8];
200 + u32 mc_fc_glob_thrs;
213 + u32 fc_input_thrs, __res1[2];
215 + u32 mac_glob_cfg_ext, __res2[2];
217 + u32 dma_auto_poll_cfg;
218 + u32 delay_intr_cfg, __res3;
221 + u32 ts_desc_base_addr0, __res4;
224 + u32 fs_desc_base_addr0, __res5;
227 + u32 ts_desc_base_addr1, __res6;
230 + u32 fs_desc_base_addr1;
234 + struct tx_desc *desc;
235 + dma_addr_t phys_addr;
236 + struct tx_desc *cur_addr;
237 + struct sk_buff *buff_tab[TX_DESCS];
246 + struct rx_desc *desc;
247 + dma_addr_t phys_addr;
248 + struct rx_desc *cur_addr;
249 + struct sk_buff *buff_tab[RX_DESCS];
256 + struct resource *mem_res;
257 + struct switch_regs __iomem *regs;
258 + struct napi_struct napi;
259 + struct cns3xxx_plat_info *plat;
260 + struct _tx_ring *tx_ring;
261 + struct _rx_ring *rx_ring;
266 + struct net_device *netdev;
267 + struct phy_device *phydev;
269 + int id; /* logical port ID */
274 +static spinlock_t mdio_lock;
275 +static spinlock_t tx_lock;
276 +static spinlock_t stat_lock;
277 +static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
278 +struct mii_bus *mdio_bus;
279 +static int ports_open;
280 +static struct port *switch_port_tab[3];
281 +static struct dma_pool *rx_dma_pool;
282 +static struct dma_pool *tx_dma_pool;
283 +struct net_device *napi_dev;
285 +static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
286 + int write, u16 cmd)
291 + temp = __raw_readl(&mdio_regs->phy_control);
292 + temp |= (1 << 15); /* Clear Command Complete bit */
293 + __raw_writel(temp, &mdio_regs->phy_control);
297 + temp = (cmd << 16);
298 + temp |= (1 << 13); /* Write Command */
300 + temp = (1 << 14); /* Read Command */
302 + temp |= ((location & 0x1f) << 8);
303 + temp |= (phy_id & 0x1f);
305 + __raw_writel(temp, &mdio_regs->phy_control);
307 + while (((__raw_readl(&mdio_regs->phy_control) & 0x8000) == 0)
308 + && cycles < 5000) {
313 + if (cycles == 5000) {
314 + printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
319 + temp = __raw_readl(&mdio_regs->phy_control);
320 + temp |= (1 << 15); /* Clear Command Complete bit */
321 + __raw_writel(temp, &mdio_regs->phy_control);
326 + return ((temp >> 16) & 0xFFFF);
329 +static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
331 + unsigned long flags;
334 + spin_lock_irqsave(&mdio_lock, flags);
335 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
336 + spin_unlock_irqrestore(&mdio_lock, flags);
340 +static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
343 + unsigned long flags;
346 + spin_lock_irqsave(&mdio_lock, flags);
347 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
348 + spin_unlock_irqrestore(&mdio_lock, flags);
352 +static int cns3xxx_mdio_register(void)
356 + if (!(mdio_bus = mdiobus_alloc()))
359 + mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
361 + spin_lock_init(&mdio_lock);
362 + mdio_bus->name = "CNS3xxx MII Bus";
363 + mdio_bus->read = &cns3xxx_mdio_read;
364 + mdio_bus->write = &cns3xxx_mdio_write;
365 + strcpy(mdio_bus->id, "0");
367 + if ((err = mdiobus_register(mdio_bus)))
368 + mdiobus_free(mdio_bus);
372 +static void cns3xxx_mdio_remove(void)
374 + mdiobus_unregister(mdio_bus);
375 + mdiobus_free(mdio_bus);
378 +static void cns3xxx_adjust_link(struct net_device *dev)
380 + struct port *port = netdev_priv(dev);
381 + struct phy_device *phydev = port->phydev;
383 + if (!phydev->link) {
386 + printk(KERN_INFO "%s: link down\n", dev->name);
391 + if (port->speed == phydev->speed && port->duplex == phydev->duplex)
394 + port->speed = phydev->speed;
395 + port->duplex = phydev->duplex;
397 + printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
398 + dev->name, port->speed, port->duplex ? "full" : "half");
401 +irqreturn_t eth_rx_irq(int irq, void *pdev)
403 + struct net_device *dev = pdev;
404 + struct sw *sw = netdev_priv(dev);
405 + if (likely(napi_schedule_prep(&sw->napi))) {
406 + disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
407 + __napi_schedule(&sw->napi);
409 + return (IRQ_HANDLED);
413 +static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
415 + struct _rx_ring *rx_ring = sw->rx_ring;
416 + unsigned int i = rx_ring->alloc_index;
417 + struct rx_desc *desc;
418 + struct sk_buff *skb;
421 + rx_ring->alloc_count += received;
423 + for (received = rx_ring->alloc_count; received > 0; received--) {
424 + desc = &(rx_ring)->desc[i];
426 + if ((skb = dev_alloc_skb(mtu))) {
427 + if (SKB_DMA_REALIGN)
428 + skb_reserve(skb, SKB_DMA_REALIGN);
429 + skb_reserve(skb, NET_IP_ALIGN);
430 + desc->sdp = dma_map_single(NULL, skb->data,
431 + mtu, DMA_FROM_DEVICE);
432 + if (dma_mapping_error(NULL, desc->sdp)) {
433 + printk("failed to map\n");
434 + dev_kfree_skb(skb);
435 + /* Failed to map, better luck next time */
439 + printk("failed to alloc\n");
440 + /* Failed to allocate skb, try again next time */
444 + /* put the new buffer on RX-free queue */
445 + rx_ring->buff_tab[i] = skb;
447 + if (++i == RX_DESCS) {
449 + desc->config0 = 0x70000000 | mtu;
451 + desc->config0 = 0x30000000 | mtu;
455 + rx_ring->alloc_count = received;
456 + rx_ring->alloc_index = i;
459 +static void update_tx_stats(struct sw *sw)
461 + struct _tx_ring *tx_ring = sw->tx_ring;
462 + struct tx_desc *desc;
463 + struct tx_desc *next_desc;
464 + struct sk_buff *skb;
469 + spin_lock_bh(&stat_lock);
471 + num_count = tx_ring->num_count;
474 + spin_unlock_bh(&stat_lock);
478 + index = tx_ring->count_index;
479 + desc = &(tx_ring)->desc[index];
480 + for (i = 0; i < num_count; i++) {
481 + skb = tx_ring->buff_tab[index];
483 + tx_ring->buff_tab[index] = 0;
484 + if (unlikely(++index == TX_DESCS)) index = 0;
485 + next_desc = &(tx_ring)->desc[index];
486 + prefetch(next_desc + 4);
488 + skb->dev->stats.tx_packets++;
489 + skb->dev->stats.tx_bytes += skb->len;
490 + dev_kfree_skb_any(skb);
497 + tx_ring->num_count -= i;
498 + tx_ring->count_index = index;
500 + spin_unlock_bh(&stat_lock);
503 +static void clear_tx_desc(struct sw *sw)
505 + struct _tx_ring *tx_ring = sw->tx_ring;
506 + struct tx_desc *desc;
507 + struct tx_desc *next_desc;
510 + int num_used = tx_ring->num_used - tx_ring->num_count;
512 + if (num_used < (TX_DESCS >> 1))
515 + index = tx_ring->free_index;
516 + desc = &(tx_ring)->desc[index];
517 + for (i = 0; i < num_used; i++) {
519 + if (unlikely(++index == TX_DESCS)) index = 0;
520 + next_desc = &(tx_ring)->desc[index];
521 + prefetch(next_desc);
522 + prefetch(next_desc + 4);
523 + if (likely(desc->sdp))
524 + dma_unmap_single(NULL, desc->sdp,
525 + desc->sdl, DMA_TO_DEVICE);
531 + tx_ring->free_index = index;
532 + tx_ring->num_used -= i;
535 +static int eth_poll(struct napi_struct *napi, int budget)
537 + struct sw *sw = container_of(napi, struct sw, napi);
538 + struct net_device *dev;
539 + struct _rx_ring *rx_ring = sw->rx_ring;
541 + unsigned int length;
542 + unsigned int i = rx_ring->cur_index;
543 + struct rx_desc *next_desc;
544 + struct rx_desc *desc = &(rx_ring)->desc[i];
547 + while (desc->cown) {
548 + struct sk_buff *skb;
550 + if (received >= budget)
553 + skb = rx_ring->buff_tab[i];
555 + if (++i == RX_DESCS) i = 0;
556 + next_desc = &(rx_ring)->desc[i];
557 + prefetch(next_desc);
558 + prefetch(next_desc + 4);
559 + prefetch(next_desc + 8);
561 + port_id = desc->sp;
563 + dev = switch_port_tab[2]->netdev;
565 + dev = switch_port_tab[port_id]->netdev;
567 + length = desc->sdl;
568 + /* process received frame */
569 + dma_unmap_single(&dev->dev, desc->sdp,
570 + length, DMA_FROM_DEVICE);
572 + skb_put(skb, length);
575 + skb->protocol = eth_type_trans(skb, dev);
577 + dev->stats.rx_packets++;
578 + dev->stats.rx_bytes += length;
580 + switch (desc->prot) {
588 + skb->ip_summed = CHECKSUM_NONE;
590 + skb->ip_summed = CHECKSUM_UNNECESSARY;
593 + skb->ip_summed = CHECKSUM_NONE;
597 + napi_gro_receive(napi, skb);
603 + cns3xxx_alloc_rx_buf(sw, received);
604 + rx_ring->cur_index = i;
606 + if (received != budget) {
607 + napi_complete(napi);
608 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
614 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
616 + struct port *port = netdev_priv(dev);
617 + struct sw *sw = port->sw;
618 + struct _tx_ring *tx_ring = sw->tx_ring;
619 + struct tx_desc *tx_desc;
621 + int len = skb->len;
622 + char pmap = (1 << port->id);
627 + if (unlikely(len > sw->mtu)) {
628 + dev_kfree_skb(skb);
629 + dev->stats.tx_errors++;
630 + return NETDEV_TX_OK;
633 + update_tx_stats(sw);
635 + spin_lock_bh(&tx_lock);
639 + if (unlikely(tx_ring->num_used == TX_DESCS)) {
640 + spin_unlock_bh(&tx_lock);
641 + return NETDEV_TX_BUSY;
644 + index = tx_ring->cur_index;
646 + if (unlikely(++tx_ring->cur_index == TX_DESCS))
647 + tx_ring->cur_index = 0;
649 + tx_ring->num_used++;
650 + tx_ring->num_count++;
652 + spin_unlock_bh(&tx_lock);
654 + tx_desc = &(tx_ring)->desc[index];
656 + tx_desc->sdp = dma_map_single(NULL, skb->data, len,
659 + if (dma_mapping_error(NULL, tx_desc->sdp)) {
660 + dev_kfree_skb(skb);
661 + dev->stats.tx_errors++;
662 + return NETDEV_TX_OK;
665 + tx_desc->pmap = pmap;
666 + tx_ring->buff_tab[index] = skb;
668 + if (index == TX_DESCS - 1) {
669 + tx_desc->config0 = 0x74070000 | len;
671 + tx_desc->config0 = 0x34070000 | len;
674 + return NETDEV_TX_OK;
677 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
679 + struct port *port = netdev_priv(dev);
681 + if (!netif_running(dev))
683 + return phy_mii_ioctl(port->phydev, req, cmd);
686 +/* ethtool support */
688 +static void cns3xxx_get_drvinfo(struct net_device *dev,
689 + struct ethtool_drvinfo *info)
691 + strcpy(info->driver, DRV_NAME);
692 + strcpy(info->bus_info, "internal");
695 +static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
697 + struct port *port = netdev_priv(dev);
698 + return phy_ethtool_gset(port->phydev, cmd);
701 +static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
703 + struct port *port = netdev_priv(dev);
704 + return phy_ethtool_sset(port->phydev, cmd);
707 +static int cns3xxx_nway_reset(struct net_device *dev)
709 + struct port *port = netdev_priv(dev);
710 + return phy_start_aneg(port->phydev);
713 +static struct ethtool_ops cns3xxx_ethtool_ops = {
714 + .get_drvinfo = cns3xxx_get_drvinfo,
715 + .get_settings = cns3xxx_get_settings,
716 + .set_settings = cns3xxx_set_settings,
717 + .nway_reset = cns3xxx_nway_reset,
718 + .get_link = ethtool_op_get_link,
722 +static int init_rings(struct sw *sw)
725 + struct _rx_ring *rx_ring = sw->rx_ring;
726 + struct _tx_ring *tx_ring = sw->tx_ring;
728 + __raw_writel(0x0, &sw->regs->fs_dma_ctrl0);
729 + __raw_writel(0x11, &sw->regs->dma_auto_poll_cfg);
730 + __raw_writel(0x000000f0, &sw->regs->dma_ring_ctrl);
731 + __raw_writel(0x800000f0, &sw->regs->dma_ring_ctrl);
733 + __raw_writel(0x000000f0, &sw->regs->dma_ring_ctrl);
735 + if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL, RX_POOL_ALLOC_SIZE, 32, 0)))
738 + if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
739 + &rx_ring->phys_addr)))
741 + memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
743 + /* Setup RX buffers */
744 + for (i = 0; i < RX_DESCS; i++) {
745 + struct rx_desc *desc = &(rx_ring)->desc[i];
746 + struct sk_buff *skb;
747 + if (!(skb = dev_alloc_skb(sw->mtu)))
749 + if (SKB_DMA_REALIGN)
750 + skb_reserve(skb, SKB_DMA_REALIGN);
751 + skb_reserve(skb, NET_IP_ALIGN);
752 + desc->sdl = sw->mtu;
753 + if (i == (RX_DESCS - 1))
758 + desc->sdp = dma_map_single(NULL, skb->data,
759 + sw->mtu, DMA_FROM_DEVICE);
760 + if (dma_mapping_error(NULL, desc->sdp)) {
763 + rx_ring->buff_tab[i] = skb;
766 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
767 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
769 + if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL, TX_POOL_ALLOC_SIZE, 32, 0)))
772 + if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
773 + &tx_ring->phys_addr)))
775 + memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
777 + /* Setup TX buffers */
778 + for (i = 0; i < TX_DESCS; i++) {
779 + struct tx_desc *desc = &(tx_ring)->desc[i];
780 + tx_ring->buff_tab[i] = 0;
782 + if (i == (TX_DESCS - 1))
786 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
787 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
792 +static void destroy_rings(struct sw *sw)
795 + if (sw->rx_ring->desc) {
796 + for (i = 0; i < RX_DESCS; i++) {
797 + struct _rx_ring *rx_ring = sw->rx_ring;
798 + struct rx_desc *desc = &(rx_ring)->desc[i];
799 + struct sk_buff *skb = sw->rx_ring->buff_tab[i];
801 + dma_unmap_single(NULL,
803 + sw->mtu, DMA_FROM_DEVICE);
804 + dev_kfree_skb(skb);
807 + dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
808 + dma_pool_destroy(rx_dma_pool);
810 + sw->rx_ring->desc = 0;
812 + if (sw->tx_ring->desc) {
813 + for (i = 0; i < TX_DESCS; i++) {
814 + struct _tx_ring *tx_ring = sw->tx_ring;
815 + struct tx_desc *desc = &(tx_ring)->desc[i];
816 + struct sk_buff *skb = sw->tx_ring->buff_tab[i];
818 + dma_unmap_single(NULL, desc->sdp,
819 + skb->len, DMA_TO_DEVICE);
820 + dev_kfree_skb(skb);
823 + dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
824 + dma_pool_destroy(tx_dma_pool);
826 + sw->tx_ring->desc = 0;
830 +static int eth_open(struct net_device *dev)
832 + struct port *port = netdev_priv(dev);
833 + struct sw *sw = port->sw;
836 + port->speed = 0; /* force "link up" message */
837 + phy_start(port->phydev);
839 + netif_start_queue(dev);
842 + request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
843 + napi_enable(&sw->napi);
844 + netif_start_queue(napi_dev);
845 + //enable_irq(IRQ_CNS3XXX_SW_R0RXC);
847 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
848 + temp &= ~(PORT_DISABLE);
849 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
851 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
853 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
855 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
857 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
858 + temp &= ~(PORT_DISABLE);
859 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
862 + netif_carrier_on(dev);
867 +static int eth_close(struct net_device *dev)
869 + struct port *port = netdev_priv(dev);
870 + struct sw *sw = port->sw;
875 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
876 + temp |= (PORT_DISABLE);
877 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
879 + netif_stop_queue(dev);
881 + phy_stop(port->phydev);
884 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
885 + free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
886 + napi_disable(&sw->napi);
887 + netif_stop_queue(napi_dev);
888 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
889 + temp |= (PORT_DISABLE);
890 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
893 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
896 + netif_carrier_off(dev);
900 +static void eth_rx_mode(struct net_device *dev)
902 + struct port *port = netdev_priv(dev);
903 + struct sw *sw = port->sw;
906 + temp = __raw_readl(&sw->regs->mac_glob_cfg);
908 + if (dev->flags & IFF_PROMISC) {
910 + temp |= ((1 << 2) << 29);
912 + temp |= ((1 << port->id) << 29);
915 + temp &= ~((1 << 2) << 29);
917 + temp &= ~((1 << port->id) << 29);
919 + __raw_writel(temp, &sw->regs->mac_glob_cfg);
922 +static int eth_set_mac(struct net_device *netdev, void *p)
924 + struct port *port = netdev_priv(netdev);
925 + struct sw *sw = port->sw;
926 + struct sockaddr *addr = p;
929 + if (!is_valid_ether_addr(addr->sa_data))
930 + return -EADDRNOTAVAIL;
932 + /* Invalidate old ARL Entry */
934 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
936 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
937 + __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
938 + (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
939 + &sw->regs->arl_ctrl[1]);
941 + __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
943 + &sw->regs->arl_ctrl[2]);
944 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
946 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
947 + && cycles < 5000) {
953 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
956 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
958 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
959 + __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
960 + (addr->sa_data[2] << 8) | (addr->sa_data[3])),
961 + &sw->regs->arl_ctrl[1]);
963 + __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
964 + (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
965 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
967 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
968 + && cycles < 5000) {
975 +static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu)
977 + struct port *port = netdev_priv(netdev);
978 + struct sw *sw = port->sw;
981 + struct _rx_ring *rx_ring = sw->rx_ring;
982 + struct rx_desc *desc;
983 + struct sk_buff *skb;
985 + if (new_mtu > MAX_MRU)
988 + netdev->mtu = new_mtu;
990 + new_mtu += 36 + SKB_DMA_REALIGN;
991 + port->mtu = new_mtu;
994 + for (i = 0; i < 3; i++) {
995 + if (switch_port_tab[i]) {
996 + if (switch_port_tab[i]->mtu > new_mtu)
997 + new_mtu = switch_port_tab[i]->mtu;
1002 + if (new_mtu == sw->mtu)
1005 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1007 + sw->mtu = new_mtu;
1011 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1013 + for (i = 0; i < RX_DESCS; i++) {
1014 + desc = &(rx_ring)->desc[i];
1015 + /* Check if we own it, if we do, it will get set correctly when it is re-used */
1016 + if (!desc->cown) {
1017 + skb = rx_ring->buff_tab[i];
1018 + dma_unmap_single(NULL, desc->sdp, desc->sdl, DMA_FROM_DEVICE);
1019 + dev_kfree_skb(skb);
1021 + if ((skb = dev_alloc_skb(new_mtu))) {
1022 + if (SKB_DMA_REALIGN)
1023 + skb_reserve(skb, SKB_DMA_REALIGN);
1024 + skb_reserve(skb, NET_IP_ALIGN);
1025 + desc->sdp = dma_map_single(NULL, skb->data,
1026 + new_mtu, DMA_FROM_DEVICE);
1027 + if (dma_mapping_error(NULL, desc->sdp)) {
1028 + dev_kfree_skb(skb);
1033 + /* put the new buffer on RX-free queue */
1034 + rx_ring->buff_tab[i] = skb;
1036 + if (i == RX_DESCS - 1)
1037 + desc->config0 = 0x70000000 | new_mtu;
1039 + desc->config0 = 0x30000000 | new_mtu;
1043 + /* Re-ENABLE DMA */
1044 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1046 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1048 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
1050 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
1055 +static const struct net_device_ops cns3xxx_netdev_ops = {
1056 + .ndo_open = eth_open,
1057 + .ndo_stop = eth_close,
1058 + .ndo_start_xmit = eth_xmit,
1059 + .ndo_set_rx_mode = eth_rx_mode,
1060 + .ndo_do_ioctl = eth_ioctl,
1061 + .ndo_change_mtu = cns3xxx_change_mtu,
1062 + .ndo_set_mac_address = eth_set_mac,
1063 + .ndo_validate_addr = eth_validate_addr,
1066 +static int __devinit eth_init_one(struct platform_device *pdev)
1069 + struct port *port;
1071 + struct net_device *dev;
1072 + struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1074 + char phy_id[MII_BUS_ID_SIZE + 3];
1078 + spin_lock_init(&tx_lock);
1079 + spin_lock_init(&stat_lock);
1081 + if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1083 + strcpy(napi_dev->name, "switch%d");
1085 + SET_NETDEV_DEV(napi_dev, &pdev->dev);
1086 + sw = netdev_priv(napi_dev);
1087 + memset(sw, 0, sizeof(struct sw));
1088 + sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1089 + regs_phys = CNS3XXX_SWITCH_BASE;
1090 + sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1091 + if (!sw->mem_res) {
1096 + sw->mtu = 1536 + SKB_DMA_REALIGN;
1098 + for (i = 0; i < 4; i++) {
1099 + temp = __raw_readl(&sw->regs->mac_cfg[i]);
1100 + temp |= (PORT_DISABLE) | 0x80000000;
1101 + __raw_writel(temp, &sw->regs->mac_cfg[i]);
1104 + temp = PORT_DISABLE;
1105 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
1107 + temp = __raw_readl(&sw->regs->vlan_cfg);
1108 + temp |= ((1 << 15) | (1 << 0));
1109 + __raw_writel(temp, &sw->regs->vlan_cfg);
1111 + temp = 0x02300000;
1112 + __raw_writel(temp, &sw->regs->mac_glob_cfg);
1114 + if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1118 + memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1120 + if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1124 + memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1126 + if ((err = init_rings(sw)) != 0) {
1127 + destroy_rings(sw);
1129 + goto err_free_rings;
1131 + platform_set_drvdata(pdev, napi_dev);
1133 + netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1135 + for (i = 0; i < 3; i++) {
1136 + if (!(plat->ports & (1 << i))) {
1140 + if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1144 + //SET_NETDEV_DEV(dev, &pdev->dev);
1145 + port = netdev_priv(dev);
1146 + port->netdev = dev;
1152 + port->mtu = sw->mtu;
1154 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1155 + temp |= (PORT_DISABLE);
1156 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1158 + dev->netdev_ops = &cns3xxx_netdev_ops;
1159 + dev->ethtool_ops = &cns3xxx_ethtool_ops;
1160 + dev->tx_queue_len = 1000;
1161 + dev->features = NETIF_F_HW_CSUM;
1163 + dev->vlan_features = NETIF_F_HW_CSUM;
1165 + switch_port_tab[i] = port;
1166 + memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1168 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1169 + port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1170 + PHY_INTERFACE_MODE_RGMII);
1171 + if ((err = IS_ERR(port->phydev))) {
1172 + switch_port_tab[i] = 0;
1177 + port->phydev->irq = PHY_POLL;
1179 + if ((err = register_netdev(dev))) {
1180 + phy_disconnect(port->phydev);
1181 + switch_port_tab[i] = 0;
1186 + printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1187 + netif_carrier_off(dev);
1195 + for (--i; i >= 0; i--) {
1196 + if (switch_port_tab[i]) {
1197 + port = switch_port_tab[i];
1198 + dev = port->netdev;
1199 + unregister_netdev(dev);
1200 + phy_disconnect(port->phydev);
1201 + switch_port_tab[i] = 0;
1206 + kfree(sw->tx_ring);
1208 + kfree(sw->rx_ring);
1210 + free_netdev(napi_dev);
1214 +static int __devexit eth_remove_one(struct platform_device *pdev)
1216 + struct net_device *dev = platform_get_drvdata(pdev);
1217 + struct sw *sw = netdev_priv(dev);
1219 + destroy_rings(sw);
1221 + for (i = 2; i >= 0; i--) {
1222 + if (switch_port_tab[i]) {
1223 + struct port *port = switch_port_tab[i];
1224 + struct net_device *dev = port->netdev;
1225 + unregister_netdev(dev);
1226 + phy_disconnect(port->phydev);
1227 + switch_port_tab[i] = 0;
1232 + release_resource(sw->mem_res);
1233 + free_netdev(napi_dev);
1237 +static struct platform_driver cns3xxx_eth_driver = {
1238 + .driver.name = DRV_NAME,
1239 + .probe = eth_init_one,
1240 + .remove = eth_remove_one,
1243 +static int __init eth_init_module(void)
1246 + if ((err = cns3xxx_mdio_register()))
1248 + return platform_driver_register(&cns3xxx_eth_driver);
1251 +static void __exit eth_cleanup_module(void)
1253 + platform_driver_unregister(&cns3xxx_eth_driver);
1254 + cns3xxx_mdio_remove();
1257 +module_init(eth_init_module);
1258 +module_exit(eth_cleanup_module);
1260 +MODULE_AUTHOR("Chris Lang");
1261 +MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1262 +MODULE_LICENSE("GPL v2");
1263 +MODULE_ALIAS("platform:cns3xxx_eth");
1264 --- a/arch/arm/mach-cns3xxx/include/mach/hardware.h
1265 +++ b/arch/arm/mach-cns3xxx/include/mach/hardware.h
1267 #define PCIBIOS_MIN_MEM 0x00000000
1268 #define pcibios_assign_all_busses() 1
1270 +#include "platform.h"
1274 +++ b/arch/arm/mach-cns3xxx/include/mach/platform.h
1277 + * arch/arm/mach-cns3xxx/include/mach/platform.h
1279 + * Copyright 2011 Gateworks Corporation
1280 + * Chris Lang <clang@gateworks.com
1282 + * This file is free software; you can redistribute it and/or modify
1283 + * it under the terms of the GNU General Public License, Version 2, as
1284 + * published by the Free Software Foundation.
1288 +#ifndef __ASM_ARCH_PLATFORM_H
1289 +#define __ASM_ARCH_PLATFORM_H
1291 +#ifndef __ASSEMBLY__
1293 +/* Information about built-in Ethernet MAC interfaces */
1294 +struct cns3xxx_plat_info {
1295 + u8 ports; /* Bitmap of enabled Ports */
1300 +#endif /* __ASM_ARCH_PLATFORM_H */