1 --- a/drivers/net/Kconfig
2 +++ b/drivers/net/Kconfig
3 @@ -2071,6 +2071,14 @@ config ACENIC_OMIT_TIGON_I
5 The safe and default value for this is N.
8 + tristate "Cavium CNS3xxx Ethernet support"
9 + depends on ARCH_CNS3XXX
12 + Say Y here if you want to use built-in Ethernet ports
13 + on CNS3XXX processor.
16 tristate "DL2000/TC902x-based Gigabit Ethernet support"
18 --- a/drivers/net/Makefile
19 +++ b/drivers/net/Makefile
20 @@ -240,6 +240,7 @@ obj-$(CONFIG_MAC89x0) += mac89x0.o
21 obj-$(CONFIG_TUN) += tun.o
22 obj-$(CONFIG_VETH) += veth.o
23 obj-$(CONFIG_NET_NETX) += netx-eth.o
24 +obj-$(CONFIG_CNS3XXX_ETH) += cns3xxx_eth.o
25 obj-$(CONFIG_DL2K) += dl2k.o
26 obj-$(CONFIG_R8169) += r8169.o
27 obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
29 +++ b/drivers/net/cns3xxx_eth.c
32 + * Cavium CNS3xxx Gigabit driver for Linux
34 + * Copyright 2011 Gateworks Corporation
35 + * Chris Lang <clang@gateworks.com>
37 + * This program is free software; you can redistribute it and/or modify it
38 + * under the terms of version 2 of the GNU General Public License
39 + * as published by the Free Software Foundation.
43 +#include <linux/delay.h>
44 +#include <linux/dma-mapping.h>
45 +#include <linux/dmapool.h>
46 +#include <linux/etherdevice.h>
47 +#include <linux/io.h>
48 +#include <linux/kernel.h>
49 +#include <linux/phy.h>
50 +#include <linux/platform_device.h>
51 +#include <linux/skbuff.h>
52 +#include <mach/hardware.h>
54 +#define DRV_NAME "cns3xxx_eth"
58 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
60 +#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
61 +#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
62 +#define REGS_SIZE 336
65 +#define NAPI_WEIGHT 64
68 +#define MDIO_CMD_COMPLETE 0x00008000
69 +#define MDIO_WRITE_COMMAND 0x00002000
70 +#define MDIO_READ_COMMAND 0x00004000
71 +#define MDIO_REG_OFFSET 8
72 +#define MDIO_VALUE_OFFSET 16
74 +/* Descritor Defines */
75 +#define END_OF_RING 0x40000000
76 +#define FIRST_SEGMENT 0x20000000
77 +#define LAST_SEGMENT 0x10000000
78 +#define FORCE_ROUTE 0x04000000
79 +#define IP_CHECKSUM 0x00040000
80 +#define UDP_CHECKSUM 0x00020000
81 +#define TCP_CHECKSUM 0x00010000
83 +/* Port Config Defines */
84 +#define PORT_DISABLE 0x00040000
85 +#define PROMISC_OFFSET 29
87 +/* Global Config Defines */
88 +#define UNKNOWN_VLAN_TO_CPU 0x02000000
89 +#define ACCEPT_CRC_PACKET 0x00200000
90 +#define CRC_STRIPPING 0x00100000
92 +/* VLAN Config Defines */
93 +#define NIC_MODE 0x00008000
94 +#define VLAN_UNAWARE 0x00000001
96 +/* DMA AUTO Poll Defines */
97 +#define TS_POLL_EN 0x00000020
98 +#define TS_SUSPEND 0x00000010
99 +#define FS_POLL_EN 0x00000002
100 +#define FS_SUSPEND 0x00000001
102 +/* DMA Ring Control Defines */
103 +#define QUEUE_THRESHOLD 0x000000f0
104 +#define CLR_FS_STATE 0x80000000
108 + u32 sdp; /* segment data pointer */
112 + u32 sdl:16; /* segment data length */
116 + u32 rsv_1:3; /* reserve */
118 + u32 fp:1; /* force priority */
158 + u8 alignment[16]; /* for 32 byte */
163 + u32 sdp; /* segment data pointer */
167 + u32 sdl:16; /* segment data length */
212 + u8 alignment[16]; /* for 32 byte alignment */
216 +struct switch_regs {
221 + u32 mac_pri_ctrl[5], __res;
224 + u32 prio_etype_udp;
225 + u32 prio_ipdscp[8];
230 + u32 mc_fc_glob_thrs;
243 + u32 fc_input_thrs, __res1[2];
245 + u32 mac_glob_cfg_ext, __res2[2];
247 + u32 dma_auto_poll_cfg;
248 + u32 delay_intr_cfg, __res3;
251 + u32 ts_desc_base_addr0, __res4;
254 + u32 fs_desc_base_addr0, __res5;
257 + u32 ts_desc_base_addr1, __res6;
260 + u32 fs_desc_base_addr1;
264 + struct tx_desc *desc;
265 + dma_addr_t phys_addr;
266 + struct tx_desc *cur_addr;
267 + struct sk_buff *buff_tab[TX_DESCS];
276 + struct rx_desc *desc;
277 + dma_addr_t phys_addr;
278 + struct rx_desc *cur_addr;
279 + struct sk_buff *buff_tab[RX_DESCS];
286 + struct resource *mem_res;
287 + struct switch_regs __iomem *regs;
288 + struct napi_struct napi;
289 + struct cns3xxx_plat_info *plat;
290 + struct _tx_ring *tx_ring;
291 + struct _rx_ring *rx_ring;
296 + struct net_device *netdev;
297 + struct phy_device *phydev;
299 + int id; /* logical port ID */
304 +static spinlock_t mdio_lock;
305 +static spinlock_t tx_lock;
306 +static spinlock_t stat_lock;
307 +static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
308 +struct mii_bus *mdio_bus;
309 +static int ports_open;
310 +static struct port *switch_port_tab[3];
311 +static struct dma_pool *rx_dma_pool;
312 +static struct dma_pool *tx_dma_pool;
313 +struct net_device *napi_dev;
315 +static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
316 + int write, u16 cmd)
321 + temp = __raw_readl(&mdio_regs->phy_control);
322 + temp |= MDIO_CMD_COMPLETE;
323 + __raw_writel(temp, &mdio_regs->phy_control);
327 + temp = (cmd << MDIO_VALUE_OFFSET);
328 + temp |= MDIO_WRITE_COMMAND;
330 + temp = MDIO_READ_COMMAND;
332 + temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
333 + temp |= (phy_id & 0x1f);
335 + __raw_writel(temp, &mdio_regs->phy_control);
337 + while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0)
338 + && cycles < 5000) {
343 + if (cycles == 5000) {
344 + printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
349 + temp = __raw_readl(&mdio_regs->phy_control);
350 + temp |= MDIO_CMD_COMPLETE;
351 + __raw_writel(temp, &mdio_regs->phy_control);
356 + return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF);
359 +static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
361 + unsigned long flags;
364 + spin_lock_irqsave(&mdio_lock, flags);
365 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
366 + spin_unlock_irqrestore(&mdio_lock, flags);
370 +static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
373 + unsigned long flags;
376 + spin_lock_irqsave(&mdio_lock, flags);
377 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
378 + spin_unlock_irqrestore(&mdio_lock, flags);
382 +static int cns3xxx_mdio_register(void)
386 + if (!(mdio_bus = mdiobus_alloc()))
389 + mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
391 + spin_lock_init(&mdio_lock);
392 + mdio_bus->name = "CNS3xxx MII Bus";
393 + mdio_bus->read = &cns3xxx_mdio_read;
394 + mdio_bus->write = &cns3xxx_mdio_write;
395 + strcpy(mdio_bus->id, "0");
397 + if ((err = mdiobus_register(mdio_bus)))
398 + mdiobus_free(mdio_bus);
402 +static void cns3xxx_mdio_remove(void)
404 + mdiobus_unregister(mdio_bus);
405 + mdiobus_free(mdio_bus);
408 +static void cns3xxx_adjust_link(struct net_device *dev)
410 + struct port *port = netdev_priv(dev);
411 + struct phy_device *phydev = port->phydev;
413 + if (!phydev->link) {
416 + printk(KERN_INFO "%s: link down\n", dev->name);
421 + if (port->speed == phydev->speed && port->duplex == phydev->duplex)
424 + port->speed = phydev->speed;
425 + port->duplex = phydev->duplex;
427 + printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
428 + dev->name, port->speed, port->duplex ? "full" : "half");
431 +irqreturn_t eth_rx_irq(int irq, void *pdev)
433 + struct net_device *dev = pdev;
434 + struct sw *sw = netdev_priv(dev);
435 + if (likely(napi_schedule_prep(&sw->napi))) {
436 + disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
437 + __napi_schedule(&sw->napi);
439 + return (IRQ_HANDLED);
443 +static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
445 + struct _rx_ring *rx_ring = sw->rx_ring;
446 + unsigned int i = rx_ring->alloc_index;
447 + struct rx_desc *desc;
448 + struct sk_buff *skb;
451 + rx_ring->alloc_count += received;
453 + for (received = rx_ring->alloc_count; received > 0; received--) {
454 + desc = &(rx_ring)->desc[i];
456 + if ((skb = dev_alloc_skb(mtu))) {
457 + if (SKB_DMA_REALIGN)
458 + skb_reserve(skb, SKB_DMA_REALIGN);
459 + skb_reserve(skb, NET_IP_ALIGN);
460 + desc->sdp = dma_map_single(NULL, skb->data,
461 + mtu, DMA_FROM_DEVICE);
462 + if (dma_mapping_error(NULL, desc->sdp)) {
463 + dev_kfree_skb(skb);
464 + /* Failed to map, better luck next time */
468 + /* Failed to allocate skb, try again next time */
472 + /* put the new buffer on RX-free queue */
473 + rx_ring->buff_tab[i] = skb;
475 + if (++i == RX_DESCS) {
477 + desc->config0 = END_OF_RING | FIRST_SEGMENT |
478 + LAST_SEGMENT | mtu;
480 + desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | mtu;
484 + rx_ring->alloc_count = received;
485 + rx_ring->alloc_index = i;
488 +static void update_tx_stats(struct sw *sw)
490 + struct _tx_ring *tx_ring = sw->tx_ring;
491 + struct tx_desc *desc;
492 + struct tx_desc *next_desc;
493 + struct sk_buff *skb;
498 + spin_lock_bh(&stat_lock);
500 + num_count = tx_ring->num_count;
503 + spin_unlock_bh(&stat_lock);
507 + index = tx_ring->count_index;
508 + desc = &(tx_ring)->desc[index];
509 + for (i = 0; i < num_count; i++) {
510 + skb = tx_ring->buff_tab[index];
512 + tx_ring->buff_tab[index] = 0;
513 + if (unlikely(++index == TX_DESCS)) index = 0;
514 + next_desc = &(tx_ring)->desc[index];
515 + prefetch(next_desc + 4);
517 + skb->dev->stats.tx_packets++;
518 + skb->dev->stats.tx_bytes += skb->len;
519 + dev_kfree_skb_any(skb);
526 + tx_ring->num_count -= i;
527 + tx_ring->count_index = index;
529 + spin_unlock_bh(&stat_lock);
532 +static void clear_tx_desc(struct sw *sw)
534 + struct _tx_ring *tx_ring = sw->tx_ring;
535 + struct tx_desc *desc;
536 + struct tx_desc *next_desc;
539 + int num_used = tx_ring->num_used - tx_ring->num_count;
541 + if (num_used < (TX_DESCS >> 1))
544 + index = tx_ring->free_index;
545 + desc = &(tx_ring)->desc[index];
546 + for (i = 0; i < num_used; i++) {
548 + if (unlikely(++index == TX_DESCS)) index = 0;
549 + next_desc = &(tx_ring)->desc[index];
550 + prefetch(next_desc);
551 + prefetch(next_desc + 4);
552 + if (likely(desc->sdp))
553 + dma_unmap_single(NULL, desc->sdp,
554 + desc->sdl, DMA_TO_DEVICE);
560 + tx_ring->free_index = index;
561 + tx_ring->num_used -= i;
564 +static int eth_poll(struct napi_struct *napi, int budget)
566 + struct sw *sw = container_of(napi, struct sw, napi);
567 + struct net_device *dev;
568 + struct _rx_ring *rx_ring = sw->rx_ring;
570 + unsigned int length;
571 + unsigned int i = rx_ring->cur_index;
572 + struct rx_desc *next_desc;
573 + struct rx_desc *desc = &(rx_ring)->desc[i];
576 + while (desc->cown) {
577 + struct sk_buff *skb;
579 + if (received >= budget)
582 + skb = rx_ring->buff_tab[i];
584 + if (++i == RX_DESCS) i = 0;
585 + next_desc = &(rx_ring)->desc[i];
586 + prefetch(next_desc);
588 + port_id = desc->sp;
590 + dev = switch_port_tab[2]->netdev;
592 + dev = switch_port_tab[port_id]->netdev;
594 + length = desc->sdl;
595 + /* process received frame */
596 + dma_unmap_single(&dev->dev, desc->sdp,
597 + length, DMA_FROM_DEVICE);
599 + skb_put(skb, length);
602 + skb->protocol = eth_type_trans(skb, dev);
604 + dev->stats.rx_packets++;
605 + dev->stats.rx_bytes += length;
607 + switch (desc->prot) {
615 + skb->ip_summed = CHECKSUM_NONE;
617 + skb->ip_summed = CHECKSUM_UNNECESSARY;
620 + skb->ip_summed = CHECKSUM_NONE;
624 + napi_gro_receive(napi, skb);
630 + cns3xxx_alloc_rx_buf(sw, received);
631 + rx_ring->cur_index = i;
633 + if (received != budget) {
634 + napi_complete(napi);
635 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
641 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
643 + struct port *port = netdev_priv(dev);
644 + struct sw *sw = port->sw;
645 + struct _tx_ring *tx_ring = sw->tx_ring;
646 + struct tx_desc *tx_desc;
648 + int len = skb->len;
649 + char pmap = (1 << port->id);
654 + if (unlikely(len > sw->mtu)) {
655 + dev_kfree_skb(skb);
656 + dev->stats.tx_errors++;
657 + return NETDEV_TX_OK;
660 + update_tx_stats(sw);
662 + spin_lock_bh(&tx_lock);
666 + if (unlikely(tx_ring->num_used == TX_DESCS)) {
667 + spin_unlock_bh(&tx_lock);
668 + return NETDEV_TX_BUSY;
671 + index = tx_ring->cur_index;
673 + if (unlikely(++tx_ring->cur_index == TX_DESCS))
674 + tx_ring->cur_index = 0;
676 + tx_ring->num_used++;
677 + tx_ring->num_count++;
679 + spin_unlock_bh(&tx_lock);
681 + tx_desc = &(tx_ring)->desc[index];
683 + tx_desc->sdp = dma_map_single(NULL, skb->data, len,
686 + if (dma_mapping_error(NULL, tx_desc->sdp)) {
687 + dev_kfree_skb(skb);
688 + dev->stats.tx_errors++;
689 + return NETDEV_TX_OK;
692 + tx_desc->pmap = pmap;
693 + tx_ring->buff_tab[index] = skb;
695 + if (index == TX_DESCS - 1) {
696 + tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT |
697 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
698 + TCP_CHECKSUM | len;
700 + tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
701 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
702 + TCP_CHECKSUM | len;
705 + return NETDEV_TX_OK;
708 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
710 + struct port *port = netdev_priv(dev);
712 + if (!netif_running(dev))
714 + return phy_mii_ioctl(port->phydev, req, cmd);
717 +/* ethtool support */
719 +static void cns3xxx_get_drvinfo(struct net_device *dev,
720 + struct ethtool_drvinfo *info)
722 + strcpy(info->driver, DRV_NAME);
723 + strcpy(info->bus_info, "internal");
726 +static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
728 + struct port *port = netdev_priv(dev);
729 + return phy_ethtool_gset(port->phydev, cmd);
732 +static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
734 + struct port *port = netdev_priv(dev);
735 + return phy_ethtool_sset(port->phydev, cmd);
738 +static int cns3xxx_nway_reset(struct net_device *dev)
740 + struct port *port = netdev_priv(dev);
741 + return phy_start_aneg(port->phydev);
744 +static struct ethtool_ops cns3xxx_ethtool_ops = {
745 + .get_drvinfo = cns3xxx_get_drvinfo,
746 + .get_settings = cns3xxx_get_settings,
747 + .set_settings = cns3xxx_set_settings,
748 + .nway_reset = cns3xxx_nway_reset,
749 + .get_link = ethtool_op_get_link,
753 +static int init_rings(struct sw *sw)
756 + struct _rx_ring *rx_ring = sw->rx_ring;
757 + struct _tx_ring *tx_ring = sw->tx_ring;
759 + __raw_writel(0, &sw->regs->fs_dma_ctrl0);
760 + __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
761 + __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
762 + __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
764 + __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
766 + if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL,
767 + RX_POOL_ALLOC_SIZE, 32, 0)))
770 + if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
771 + &rx_ring->phys_addr)))
773 + memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
775 + /* Setup RX buffers */
776 + for (i = 0; i < RX_DESCS; i++) {
777 + struct rx_desc *desc = &(rx_ring)->desc[i];
778 + struct sk_buff *skb;
779 + if (!(skb = dev_alloc_skb(sw->mtu)))
781 + if (SKB_DMA_REALIGN)
782 + skb_reserve(skb, SKB_DMA_REALIGN);
783 + skb_reserve(skb, NET_IP_ALIGN);
784 + desc->sdl = sw->mtu;
785 + if (i == (RX_DESCS - 1))
790 + desc->sdp = dma_map_single(NULL, skb->data,
791 + sw->mtu, DMA_FROM_DEVICE);
792 + if (dma_mapping_error(NULL, desc->sdp)) {
795 + rx_ring->buff_tab[i] = skb;
798 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
799 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
801 + if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL,
802 + TX_POOL_ALLOC_SIZE, 32, 0)))
805 + if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
806 + &tx_ring->phys_addr)))
808 + memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
810 + /* Setup TX buffers */
811 + for (i = 0; i < TX_DESCS; i++) {
812 + struct tx_desc *desc = &(tx_ring)->desc[i];
813 + tx_ring->buff_tab[i] = 0;
815 + if (i == (TX_DESCS - 1))
819 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
820 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
825 +static void destroy_rings(struct sw *sw)
828 + if (sw->rx_ring->desc) {
829 + for (i = 0; i < RX_DESCS; i++) {
830 + struct _rx_ring *rx_ring = sw->rx_ring;
831 + struct rx_desc *desc = &(rx_ring)->desc[i];
832 + struct sk_buff *skb = sw->rx_ring->buff_tab[i];
834 + dma_unmap_single(NULL,
836 + sw->mtu, DMA_FROM_DEVICE);
837 + dev_kfree_skb(skb);
840 + dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
841 + dma_pool_destroy(rx_dma_pool);
843 + sw->rx_ring->desc = 0;
845 + if (sw->tx_ring->desc) {
846 + for (i = 0; i < TX_DESCS; i++) {
847 + struct _tx_ring *tx_ring = sw->tx_ring;
848 + struct tx_desc *desc = &(tx_ring)->desc[i];
849 + struct sk_buff *skb = sw->tx_ring->buff_tab[i];
851 + dma_unmap_single(NULL, desc->sdp,
852 + skb->len, DMA_TO_DEVICE);
853 + dev_kfree_skb(skb);
856 + dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
857 + dma_pool_destroy(tx_dma_pool);
859 + sw->tx_ring->desc = 0;
863 +static int eth_open(struct net_device *dev)
865 + struct port *port = netdev_priv(dev);
866 + struct sw *sw = port->sw;
869 + port->speed = 0; /* force "link up" message */
870 + phy_start(port->phydev);
872 + netif_start_queue(dev);
875 + request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
876 + napi_enable(&sw->napi);
877 + netif_start_queue(napi_dev);
878 + //enable_irq(IRQ_CNS3XXX_SW_R0RXC);
880 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
881 + temp &= ~(PORT_DISABLE);
882 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
884 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
885 + temp &= ~(TS_SUSPEND | FS_SUSPEND);
886 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
888 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
890 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
891 + temp &= ~(PORT_DISABLE);
892 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
895 + netif_carrier_on(dev);
900 +static int eth_close(struct net_device *dev)
902 + struct port *port = netdev_priv(dev);
903 + struct sw *sw = port->sw;
908 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
909 + temp |= (PORT_DISABLE);
910 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
912 + netif_stop_queue(dev);
914 + phy_stop(port->phydev);
917 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
918 + free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
919 + napi_disable(&sw->napi);
920 + netif_stop_queue(napi_dev);
921 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
922 + temp |= (PORT_DISABLE);
923 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
925 + __raw_writel(TS_SUSPEND | FS_SUSPEND,
926 + &sw->regs->dma_auto_poll_cfg);
929 + netif_carrier_off(dev);
933 +static void eth_rx_mode(struct net_device *dev)
935 + struct port *port = netdev_priv(dev);
936 + struct sw *sw = port->sw;
939 + temp = __raw_readl(&sw->regs->mac_glob_cfg);
941 + if (dev->flags & IFF_PROMISC) {
943 + temp |= ((1 << 2) << PROMISC_OFFSET);
945 + temp |= ((1 << port->id) << PROMISC_OFFSET);
948 + temp &= ~((1 << 2) << PROMISC_OFFSET);
950 + temp &= ~((1 << port->id) << PROMISC_OFFSET);
952 + __raw_writel(temp, &sw->regs->mac_glob_cfg);
955 +static int eth_set_mac(struct net_device *netdev, void *p)
957 + struct port *port = netdev_priv(netdev);
958 + struct sw *sw = port->sw;
959 + struct sockaddr *addr = p;
962 + if (!is_valid_ether_addr(addr->sa_data))
963 + return -EADDRNOTAVAIL;
965 + /* Invalidate old ARL Entry */
967 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
969 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
970 + __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
971 + (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
972 + &sw->regs->arl_ctrl[1]);
974 + __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
976 + &sw->regs->arl_ctrl[2]);
977 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
979 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
980 + && cycles < 5000) {
986 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
989 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
991 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
992 + __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
993 + (addr->sa_data[2] << 8) | (addr->sa_data[3])),
994 + &sw->regs->arl_ctrl[1]);
996 + __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
997 + (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
998 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1000 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1001 + && cycles < 5000) {
1008 +static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu)
1010 + struct port *port = netdev_priv(netdev);
1011 + struct sw *sw = port->sw;
1014 + struct _rx_ring *rx_ring = sw->rx_ring;
1015 + struct rx_desc *desc;
1016 + struct sk_buff *skb;
1018 + if (new_mtu > MAX_MRU)
1021 + netdev->mtu = new_mtu;
1023 + new_mtu += 36 + SKB_DMA_REALIGN;
1024 + port->mtu = new_mtu;
1027 + for (i = 0; i < 3; i++) {
1028 + if (switch_port_tab[i]) {
1029 + if (switch_port_tab[i]->mtu > new_mtu)
1030 + new_mtu = switch_port_tab[i]->mtu;
1035 + if (new_mtu == sw->mtu)
1038 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1040 + sw->mtu = new_mtu;
1043 + __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
1045 + for (i = 0; i < RX_DESCS; i++) {
1046 + desc = &(rx_ring)->desc[i];
1047 + /* Check if we own it, if we do, it will get set correctly
1048 + * when it is re-used */
1049 + if (!desc->cown) {
1050 + skb = rx_ring->buff_tab[i];
1051 + dma_unmap_single(NULL, desc->sdp, desc->sdl,
1053 + dev_kfree_skb(skb);
1055 + if ((skb = dev_alloc_skb(new_mtu))) {
1056 + if (SKB_DMA_REALIGN)
1057 + skb_reserve(skb, SKB_DMA_REALIGN);
1058 + skb_reserve(skb, NET_IP_ALIGN);
1059 + desc->sdp = dma_map_single(NULL, skb->data,
1060 + new_mtu, DMA_FROM_DEVICE);
1061 + if (dma_mapping_error(NULL, desc->sdp)) {
1062 + dev_kfree_skb(skb);
1067 + /* put the new buffer on RX-free queue */
1068 + rx_ring->buff_tab[i] = skb;
1070 + if (i == RX_DESCS - 1)
1071 + desc->config0 = END_OF_RING | FIRST_SEGMENT |
1072 + LAST_SEGMENT | new_mtu;
1074 + desc->config0 = FIRST_SEGMENT |
1075 + LAST_SEGMENT | new_mtu;
1079 + /* Re-ENABLE DMA */
1080 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1081 + temp &= ~(TS_SUSPEND | FS_SUSPEND);
1082 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1084 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
1086 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
1091 +static const struct net_device_ops cns3xxx_netdev_ops = {
1092 + .ndo_open = eth_open,
1093 + .ndo_stop = eth_close,
1094 + .ndo_start_xmit = eth_xmit,
1095 + .ndo_set_rx_mode = eth_rx_mode,
1096 + .ndo_do_ioctl = eth_ioctl,
1097 + .ndo_change_mtu = cns3xxx_change_mtu,
1098 + .ndo_set_mac_address = eth_set_mac,
1099 + .ndo_validate_addr = eth_validate_addr,
1102 +static int __devinit eth_init_one(struct platform_device *pdev)
1105 + struct port *port;
1107 + struct net_device *dev;
1108 + struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1110 + char phy_id[MII_BUS_ID_SIZE + 3];
1114 + spin_lock_init(&tx_lock);
1115 + spin_lock_init(&stat_lock);
1117 + if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1119 + strcpy(napi_dev->name, "switch%d");
1121 + SET_NETDEV_DEV(napi_dev, &pdev->dev);
1122 + sw = netdev_priv(napi_dev);
1123 + memset(sw, 0, sizeof(struct sw));
1124 + sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1125 + regs_phys = CNS3XXX_SWITCH_BASE;
1126 + sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1127 + if (!sw->mem_res) {
1132 + sw->mtu = 1536 + SKB_DMA_REALIGN;
1134 + for (i = 0; i < 4; i++) {
1135 + temp = __raw_readl(&sw->regs->mac_cfg[i]);
1136 + temp |= (PORT_DISABLE) | 0x80000000;
1137 + __raw_writel(temp, &sw->regs->mac_cfg[i]);
1140 + temp = PORT_DISABLE;
1141 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
1143 + temp = __raw_readl(&sw->regs->vlan_cfg);
1144 + temp |= NIC_MODE | VLAN_UNAWARE;
1145 + __raw_writel(temp, &sw->regs->vlan_cfg);
1147 + __raw_writel(UNKNOWN_VLAN_TO_CPU | ACCEPT_CRC_PACKET |
1148 + CRC_STRIPPING, &sw->regs->mac_glob_cfg);
1150 + if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1154 + memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1156 + if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1160 + memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1162 + if ((err = init_rings(sw)) != 0) {
1163 + destroy_rings(sw);
1165 + goto err_free_rings;
1167 + platform_set_drvdata(pdev, napi_dev);
1169 + netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1171 + for (i = 0; i < 3; i++) {
1172 + if (!(plat->ports & (1 << i))) {
1176 + if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1180 + //SET_NETDEV_DEV(dev, &pdev->dev);
1181 + port = netdev_priv(dev);
1182 + port->netdev = dev;
1188 + port->mtu = sw->mtu;
1190 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1191 + temp |= (PORT_DISABLE);
1192 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1194 + dev->netdev_ops = &cns3xxx_netdev_ops;
1195 + dev->ethtool_ops = &cns3xxx_ethtool_ops;
1196 + dev->tx_queue_len = 1000;
1197 + dev->features = NETIF_F_HW_CSUM;
1199 + dev->vlan_features = NETIF_F_HW_CSUM;
1201 + switch_port_tab[i] = port;
1202 + memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1204 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1205 + port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1206 + PHY_INTERFACE_MODE_RGMII);
1207 + if ((err = IS_ERR(port->phydev))) {
1208 + switch_port_tab[i] = 0;
1213 + port->phydev->irq = PHY_POLL;
1215 + if ((err = register_netdev(dev))) {
1216 + phy_disconnect(port->phydev);
1217 + switch_port_tab[i] = 0;
1222 + printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1223 + netif_carrier_off(dev);
1231 + for (--i; i >= 0; i--) {
1232 + if (switch_port_tab[i]) {
1233 + port = switch_port_tab[i];
1234 + dev = port->netdev;
1235 + unregister_netdev(dev);
1236 + phy_disconnect(port->phydev);
1237 + switch_port_tab[i] = 0;
1242 + kfree(sw->tx_ring);
1244 + kfree(sw->rx_ring);
1246 + free_netdev(napi_dev);
1250 +static int __devexit eth_remove_one(struct platform_device *pdev)
1252 + struct net_device *dev = platform_get_drvdata(pdev);
1253 + struct sw *sw = netdev_priv(dev);
1255 + destroy_rings(sw);
1257 + for (i = 2; i >= 0; i--) {
1258 + if (switch_port_tab[i]) {
1259 + struct port *port = switch_port_tab[i];
1260 + struct net_device *dev = port->netdev;
1261 + unregister_netdev(dev);
1262 + phy_disconnect(port->phydev);
1263 + switch_port_tab[i] = 0;
1268 + release_resource(sw->mem_res);
1269 + free_netdev(napi_dev);
1273 +static struct platform_driver cns3xxx_eth_driver = {
1274 + .driver.name = DRV_NAME,
1275 + .probe = eth_init_one,
1276 + .remove = eth_remove_one,
1279 +static int __init eth_init_module(void)
1282 + if ((err = cns3xxx_mdio_register()))
1284 + return platform_driver_register(&cns3xxx_eth_driver);
1287 +static void __exit eth_cleanup_module(void)
1289 + platform_driver_unregister(&cns3xxx_eth_driver);
1290 + cns3xxx_mdio_remove();
1293 +module_init(eth_init_module);
1294 +module_exit(eth_cleanup_module);
1296 +MODULE_AUTHOR("Chris Lang");
1297 +MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1298 +MODULE_LICENSE("GPL v2");
1299 +MODULE_ALIAS("platform:cns3xxx_eth");
1300 --- a/arch/arm/mach-cns3xxx/include/mach/hardware.h
1301 +++ b/arch/arm/mach-cns3xxx/include/mach/hardware.h
1303 #define PCIBIOS_MIN_MEM 0x00000000
1304 #define pcibios_assign_all_busses() 1
1306 +#include "platform.h"
1310 +++ b/arch/arm/mach-cns3xxx/include/mach/platform.h
1313 + * arch/arm/mach-cns3xxx/include/mach/platform.h
1315 + * Copyright 2011 Gateworks Corporation
1316 + * Chris Lang <clang@gateworks.com
1318 + * This file is free software; you can redistribute it and/or modify
1319 + * it under the terms of the GNU General Public License, Version 2, as
1320 + * published by the Free Software Foundation.
1324 +#ifndef __ASM_ARCH_PLATFORM_H
1325 +#define __ASM_ARCH_PLATFORM_H
1327 +#ifndef __ASSEMBLY__
1329 +/* Information about built-in Ethernet MAC interfaces */
1330 +struct cns3xxx_plat_info {
1331 + u8 ports; /* Bitmap of enabled Ports */
1336 +#endif /* __ASM_ARCH_PLATFORM_H */