1 --- a/drivers/net/Kconfig
2 +++ b/drivers/net/Kconfig
3 @@ -2638,6 +2638,8 @@ config S6GMAC
5 source "drivers/net/stmmac/Kconfig"
7 +source "drivers/net/cns21xx/Kconfig"
12 --- a/drivers/net/Makefile
13 +++ b/drivers/net/Makefile
14 @@ -254,6 +254,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/
15 obj-$(CONFIG_ENC28J60) += enc28j60.o
16 obj-$(CONFIG_ETHOC) += ethoc.o
17 obj-$(CONFIG_GRETH) += greth.o
18 +obj-$(CONFIG_CNS21XX_GEC) += cns21xx/
20 obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
23 +++ b/drivers/net/cns21xx/cns21xx_gec_main.c
26 + * Copyright (c) 2010 Gabor Juhos <juhosg@openwrt.org>
28 + * This driver has been derived from the ethernet driver of the
30 + * Copyright (c) 2008 Cavium Networks
32 + * This file is free software; you can redistribute it and/or modify
33 + * it under the terms of the GNU General Public License, Version 2, as
34 + * published by the Free Software Foundation.
37 +#include <linux/module.h>
38 +#include <linux/kernel.h>
39 +#include <linux/bootmem.h>
40 +#include <linux/sched.h>
41 +#include <linux/types.h>
42 +#include <linux/fcntl.h>
43 +#include <linux/interrupt.h>
44 +#include <linux/ptrace.h>
45 +#include <linux/ioport.h>
46 +#include <linux/in.h>
47 +#include <linux/slab.h>
48 +#include <linux/init.h>
49 +#include <linux/bitops.h>
50 +#include <linux/irq.h>
51 +#include <linux/io.h>
52 +#include <linux/pci.h>
53 +#include <linux/errno.h>
54 +#include <linux/delay.h>
55 +#include <linux/netdevice.h>
56 +#include <linux/etherdevice.h>
57 +#include <linux/platform_device.h>
58 +#include <linux/skbuff.h>
59 +#include <linux/ip.h>
60 +#include <linux/if_ether.h>
61 +#include <linux/icmp.h>
62 +#include <linux/udp.h>
63 +#include <linux/tcp.h>
64 +#include <linux/if_arp.h>
67 +#include <mach/hardware.h>
68 +#include <mach/cns21xx.h>
69 +#include <mach/cns21xx_misc.h>
70 +#include <mach/cns21xx_powermgmt.h>
71 +#include <mach/cns21xx_gec_platform.h>
73 +#define DRIVER_NAME "cns21xx-gec"
75 +/* VSC8601 and WavePlus Phy are the same */
76 +#define CNS21XX_GEC_PHY_ADDR 0
78 +#define CNS21XX_GEC_TX_HW_CHECKSUM
79 +#define CNS21XX_GEC_RX_HW_CHECKSUM
81 +#define CNS21XX_PEND_INT_COUNT 16
82 +#define CNS21XX_PEND_INT_TIME 5 /* 5 x 20 usecs */
84 +#define MAX_PACKET_LEN 1536
86 +#define CNS21XX_GEC_NUM_TXDS 48 /* FIXME: original 64 will cause UDP fail */
87 +#define CNS21XX_GEC_NUM_RXDS 64
89 +struct cns21xx_gec_mib_info {
93 + u32 mib_rx_over_size;
94 + u32 mib_rx_no_buffer_drop;
96 + u32 mib_rx_arl_drop;
97 + u32 mib_rx_myvid_drop;
98 + u32 mib_rx_csum_err;
99 + u32 mib_rx_pause_frame;
101 + u64 mib_tx_ok_byte;
102 + u32 mib_tx_pause_frame;
106 + * Network Driver, Receive/Send and Initial Buffer Function
108 +struct cns21xx_gec_txd {
135 +struct cns21xx_gec_rxd {
166 +struct cns21xx_gec_ring {
172 + struct sk_buff **skbs;
175 +#define CNS21XX_GEC_NUM_VLANS 4
176 +struct cns21xx_gec_vlan {
177 + u32 vid; /* 0~4095 */
178 + u32 control; /* ENABLE or DISABLE */
181 +/* store this information for the driver.. */
182 +struct cns21xx_gec {
183 + struct napi_struct napi;
184 + struct net_device *netdev;
185 + struct device *parent;
186 + struct cns21xx_gec_ring txring;
187 + struct cns21xx_gec_ring rxring;
189 + void __iomem *base;
190 + struct resource *mem_res;
191 + struct cns21xx_gec_plat_data *pdata;
193 + spinlock_t tx_lock;
200 + unsigned long rx_queue_full;
202 + struct cns21xx_gec_vlan vlans[CNS21XX_GEC_NUM_VLANS];
204 + struct timer_list internal_phy_timer;
205 + struct timer_list nic_timer;
208 + struct cns21xx_gec_mib_info mib_info;
211 +#define GEC_REG_PHY_CTRL0 0x000
212 +#define GEC_REG_PHY_CTRL1 0x004
213 +#define GEC_REG_MAC_CFG 0x008
214 +#define GEC_REG_FC_CFG 0x00c
215 +#define GEC_REG_ARL_CFG 0x010
216 +#define GEC_REG_MY_MAC_H 0x014
217 +#define GEC_REG_MY_MAC_L 0x018
218 +#define GEC_REG_HASH_CTRL 0x01c
219 +#define GEC_REG_VLAN_CTRL 0x020
220 +#define GEC_REG_VLAN_ID_0_1 0x024
221 +#define GEC_REG_VLAN_ID_2_3 0x028
222 +#define GEC_REG_DMA_CFG 0x030
223 +#define GEC_REG_TX_DMA_CTRL 0x034
224 +#define GEC_REG_RX_DMA_CTRL 0x038
225 +#define GEC_REG_TX_DPTR 0x03c
226 +#define GEC_REG_RX_DPTR 0x040
227 +#define GEC_REG_TX_BASE_ADDR 0x044
228 +#define GEC_REG_RX_BASE_ADDR 0x048
229 +#define GEC_REG_DLY_INT_CFG 0x04c
230 +#define GEC_REG_INT 0x050
231 +#define GEC_REG_INT_MASK 0x054
232 +#define GEC_REG_TEST0 0x058
233 +#define GEC_REG_TEST1 0x05c
234 +#define GEC_REG_EXTEND_CFG 0x060
236 +#define GEC_REG_RX_OK_PKT_CNTR 0x100
237 +#define GEC_REG_RX_OK_BYTE_CNTR 0x104
238 +#define GEC_REG_RX_RUNT_BYTE_CNTR 0x108
239 +#define GEC_REG_RX_OSIZE_DROP_PKT_CNTR 0x10c
240 +#define GEC_REG_RX_NO_BUF_DROP_PKT_CNTR 0x110
241 +#define GEC_REG_RX_CRC_ERR_PKT_CNTR 0x114
242 +#define GEC_REG_RX_ARL_DROP_PKT_CNTR 0x118
243 +#define GEC_REG_MYVLANID_MISMATCH_DROP_PKT_CNTR 0x11c
244 +#define GEC_REG_RX_CHKSUM_ERR_PKT_CNTR 0x120
245 +#define GEC_REG_RX_PAUSE_FRAME_PKT_CNTR 0x124
246 +#define GEC_REG_TX_OK_PKT_CNTR 0x128
247 +#define GEC_REG_TX_OK_BYTE_CNTR 0x12c
248 +#define GEC_REG_TX_COLLISION_CNTR 0x130
249 +#define GEC_REG_TX_PAUSE_FRAME_CNTR 0x130
250 +#define GEC_REG_TX_FIFO_UNDERRUN_RETX_CNTR 0x134
252 +#define GEC_INT_MIB_COUNTER_TH BIT(3)
253 +#define GEC_INT_PORT_STATUS_CHG BIT(2)
255 +#define FE_PHY_LED_MODE (0x1 << 12)
257 +static void internal_phy_init_timer(struct cns21xx_gec *gec);
258 +static void internal_phy_start_timer(struct cns21xx_gec *gec);
259 +static void internal_phy_stop_timer(struct cns21xx_gec *gec);
261 +static void cns21xx_gec_phy_powerdown(struct cns21xx_gec *gec);
262 +static void cns21xx_gec_phy_powerup(struct cns21xx_gec *gec);
264 +static inline u32 cns21xx_gec_rr(struct cns21xx_gec *gec, unsigned int reg)
266 + return __raw_readl(gec->base + reg);
269 +static inline void cns21xx_gec_wr(struct cns21xx_gec *gec, unsigned int reg,
272 + __raw_writel(val, gec->base + reg);
275 +static void cns21xx_gec_timer_func(unsigned long data)
277 + struct cns21xx_gec *gec = (struct cns21xx_gec *) data;
278 + struct cns21xx_gec_ring *txring = &gec->txring;
282 + int skb_free_count = 0;
283 + struct cns21xx_gec_txd *txd;
284 + unsigned long flags;
286 + local_irq_save(flags);
287 + txsd_current = cns21xx_gec_rr(gec, GEC_REG_TX_DPTR);
288 + txsd_index = (txsd_current - (u32)txring->desc_dma) >> 4;
289 + if (txsd_index > txring->dirty) {
290 + skb_free_count = txsd_index - txring->dirty;
291 + } else if (txsd_index <= txring->dirty) {
292 + skb_free_count = txring->count + txsd_index -
295 + for (i = 0; i < skb_free_count; i++) {
296 + txd = ((struct cns21xx_gec_txd *) txring->desc_cpu) +
299 + if (txd->cown == 0)
302 + if (txring->skbs[txring->dirty]) {
303 + dev_kfree_skb_any(txring->skbs[txring->dirty]);
304 + txring->skbs[txring->dirty] = NULL;
306 + dma_unmap_single(gec->parent,
313 + if (txring->dirty == txring->count)
316 + local_irq_restore(flags);
319 +static void __init cns21xx_gec_timer_init(struct cns21xx_gec *gec)
321 + init_timer(&gec->nic_timer);
322 + gec->nic_timer.function = &cns21xx_gec_timer_func;
323 + gec->nic_timer.data = (unsigned long) gec;
326 +static void cns21xx_gec_timer_modify(struct cns21xx_gec *gec, unsigned int t)
328 + mod_timer(&gec->nic_timer, jiffies + t);
331 +static int cns21xx_gec_write_phy(struct cns21xx_gec *gec,
332 + u8 addr, u8 reg, u16 val)
336 + if (addr > 31 || reg > 31)
339 + /* clear previous rw_ok status */
340 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0, 0x1 << 15);
342 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0,
343 + addr | (reg << 8) | (val << 16) | (0x1 << 13));
345 + for (i = 0; i < 10000; i++) {
348 + status = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL0);
349 + if (status & (0x1 << 15)) {
351 + * clear the rw_ok status,
352 + * and clear other bits value
354 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0, (0x1 << 15));
360 + dev_err(&gec->netdev->dev,
361 + "%s timed out, phy_addr:0x%x, phy_reg:0x%x, write_data:0x%x\n",
362 + __func__, addr, reg, val);
367 +static int cns21xx_gec_read_phy(struct cns21xx_gec *gec,
368 + u8 addr, u8 reg, u16 *val)
372 + if (addr > 31 || reg > 31)
375 + /* clear previous rw_ok status */
376 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0, 0x1 << 15);
378 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0,
379 + addr | (reg << 8) | (0x1 << 14));
381 + for (i = 0; i < 10000; i++) {
384 + status = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL0);
385 + if (status & (0x1 << 15)) {
387 + * clear the rw_ok status,
388 + * and clear other bits value
390 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0, (0x1 << 15));
391 + *val = (status >> 16) & 0xffff;
397 + dev_err(&gec->netdev->dev,
398 + "%s timed out, phy_addr:0x%x, phy_reg:0x%x\n",
399 + __func__, addr, reg);
405 +static void cns21xx_gec_dma_config(struct cns21xx_gec *gec)
407 + u32 dma_config = 0;
409 + dma_config = cns21xx_gec_rr(gec, GEC_REG_DMA_CFG);
411 + /* Config TX DMA */
412 + /* TX auto polling: 1 us */
413 + dma_config &= ~(0x3 << 6);
414 + /* TX auto polling :100us */
415 + dma_config |= (0x2 << 6);
416 + /* TX auto polling C-bit enable */
417 + dma_config |= (0x1 << 5);
418 + /* TX can transmit packets, No suspend */
419 + dma_config &= ~(0x1 << 4);
421 + /* Config RX DMA */
422 + /* RX auto polling: 1 us */
423 + dma_config &= ~(0x3 << 2);
424 + /* RX auto polling :100us */
425 + dma_config |= (0x2 << 2);
426 + /* RX auto polling C-bit enable */
427 + dma_config |= (0x1 << 1);
428 + /* RX can receive packets, No suspend */
429 + dma_config &= ~0x1;
431 + /* 4N+2(for Linux) */
432 + dma_config &= ~(0x1 << 16);
434 + cns21xx_gec_wr(gec, GEC_REG_DMA_CFG, dma_config);
437 +static void cns21xx_gec_mac_config(struct cns21xx_gec *gec)
441 + mac_config = cns21xx_gec_rr(gec, GEC_REG_MAC_CFG);
443 +#ifdef CNS21XX_GEC_TX_HW_CHECKSUM
444 + /* Tx ChkSum offload On: TCP/UDP/IP */
445 + mac_config |= (0x1 << 26);
447 + /* Tx ChkSum offload Off: TCP/UDP/IP */
448 + mac_config &= ~(0x1 << 26);
451 +#ifdef CNS21XX_GEC_RX_HW_CHECKSUM
452 + /* Rx ChkSum offload On: TCP/UDP/IP */
453 + mac_config |= (0x1 << 25);
455 + /* Rx ChkSum offload Off: TCP/UDP/IP */
456 + mac_config &= ~(0x1 << 25);
459 + /* Accept CSUM error pkt */
460 + mac_config |= (0x1 << 24);
462 + mac_config &= ~(0x1 << 23);
463 + /* Strip vlan tag */
464 + mac_config |= (0x1 << 22);
465 + /* Accept CRC error pkt */
466 + mac_config |= (0x1 << 21);
468 + mac_config |= (0x1 << 20);
470 + /* Discard oversize pkt */
471 + mac_config &= ~(0x1 << 18);
473 + /* clear, set 1518 */
474 + mac_config &= ~(0x3 << 16);
477 + mac_config |= (0x2 << 16);
480 + mac_config |= (0x1f << 10);
482 + /* Do not skip 16 consecutive collisions pkt */
483 + /* allow to re-tx */
484 + mac_config |= (0x1 << 9);
486 + mac_config |= (0x1 << 8);
488 + cns21xx_gec_wr(gec, GEC_REG_MAC_CFG, mac_config);
491 +static void cns21xx_gec_fc_config(struct cns21xx_gec *gec)
495 + fc_config = cns21xx_gec_rr(gec, GEC_REG_FC_CFG);
497 + /* Send pause on frame threshold */
499 + fc_config &= ~(0xfff << 16);
500 + fc_config |= (0x360 << 16);
501 + /* Disable UC_PAUSE */
502 + fc_config &= ~(0x1 << 8);
503 + /* Enable Half Duplex backpressure */
504 + fc_config |= (0x1 << 7);
505 + /* Collision-based BP */
506 + fc_config &= ~(0x1 << 6);
507 + /* Disable max BP collision */
508 + fc_config &= ~(0x1 << 5);
510 + fc_config &= ~(0x1f);
512 + fc_config |= (0xc);
514 + cns21xx_gec_wr(gec, GEC_REG_FC_CFG, fc_config);
517 +static void cns21xx_gec_internal_phy_config(struct cns21xx_gec *gec)
522 + dev_info(&gec->netdev->dev, "Internal PHY\n");
524 + phy_addr = CNS21XX_GEC_PHY_ADDR;
525 + gec->phy_addr = phy_addr;
527 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
529 + /* set phy addr for auto-polling */
530 + phy_ctrl1 |= (phy_addr & 0x1f) << 24;
532 + /* set internal phy mode */
533 + /* internel 10/100 phy */
534 + phy_ctrl1 |= 0x1 << 18;
537 + phy_ctrl1 &= ~(0x1 << 17);
540 + phy_ctrl1 &= ~(0x1 << 16);
542 + /* config PHY LED bit[13:12] */
543 + cns21xx_gec_read_phy(gec, phy_addr, 31, (u16 *)(&phy_ctrl1));
544 + /* clear LED control */
545 + phy_ctrl1 &= ~(0x3 << 12);
546 + phy_ctrl1 |= FE_PHY_LED_MODE;
547 + cns21xx_gec_write_phy(gec, phy_addr, 31, phy_ctrl1);
549 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
552 +static void cns21xx_gec_vsc8601_phy_config(struct cns21xx_gec *gec)
558 + phy_addr = CNS21XX_GEC_PHY_ADDR;
559 + gec->phy_addr = phy_addr;
561 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
563 + /* phy addr for auto-polling */
564 + phy_ctrl1 |= phy_addr << 24;
566 + /* set external phy mode */
567 + phy_ctrl1 &= ~(0x1 << 18);
570 + phy_ctrl1 |= (0x1 << 17);
572 + /* set MII interface */
573 + phy_ctrl1 &= ~(0x1 << 16);
575 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
577 + /* set phy addr for auto-polling */
578 + phy_ctrl1 |= phy_addr << 24;
580 + /* set external phy mode */
581 + /* MII/RGMII interface */
582 + phy_ctrl1 &= ~(0x1 << 18);
585 + phy_ctrl1 |= (0x1 << 17);
588 + phy_ctrl1 &= ~(0x1 << 16);
590 + cns21xx_gec_read_phy(gec, phy_addr, 3, &phy_data);
591 + if ((phy_data & 0x000f) == 0x0000) {
595 + dev_info(&gec->netdev->dev, "VSC8601 Type A Chip\n");
596 + cns21xx_gec_write_phy(gec, phy_addr, 31, 0x52B5);
597 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0xAF8A);
600 + cns21xx_gec_read_phy(gec, phy_addr, 18, &tmp16);
601 + phy_data |= (tmp16 & ~0x0);
602 + cns21xx_gec_write_phy(gec, phy_addr, 18, phy_data);
605 + cns21xx_gec_read_phy(gec, phy_addr, 17, &tmp16);
606 + phy_data |= (tmp16 & ~0x000C);
607 + cns21xx_gec_write_phy(gec, phy_addr, 17, phy_data);
609 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0x8F8A);
610 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0xAF86);
613 + cns21xx_gec_read_phy(gec, phy_addr, 18, &tmp16);
614 + phy_data |= (tmp16 & ~0x000C);
615 + cns21xx_gec_write_phy(gec, phy_addr, 18, phy_data);
618 + cns21xx_gec_read_phy(gec, phy_addr, 17, &tmp16);
619 + phy_data |= (tmp16 & ~0x0);
620 + cns21xx_gec_write_phy(gec, phy_addr, 17, phy_data);
622 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0x8F8A);
624 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0xAF82);
627 + cns21xx_gec_read_phy(gec, phy_addr, 18, &tmp16);
628 + phy_data |= (tmp16 & ~0x0);
629 + cns21xx_gec_write_phy(gec, phy_addr, 18, phy_data);
632 + cns21xx_gec_read_phy(gec, phy_addr, 17, &tmp16);
633 + phy_data |= (tmp16 & ~0x0180);
634 + cns21xx_gec_write_phy(gec, phy_addr, 17, phy_data);
636 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0x8F82);
638 + cns21xx_gec_write_phy(gec, phy_addr, 31, 0x0);
640 + /* Set port type: single port */
641 + cns21xx_gec_read_phy(gec, phy_addr, 9, &phy_data);
642 + phy_data &= ~(0x1 << 10);
643 + cns21xx_gec_write_phy(gec, phy_addr, 9, phy_data);
644 + } else if ((phy_data & 0x000f) == 0x0001) {
646 + dev_info(&gec->netdev->dev, "VSC8601 Type B Chip\n");
648 + cns21xx_gec_read_phy(gec, phy_addr, 23, &phy_data);
649 + phy_data |= (0x1 << 8); /* set RGMII timing skew */
650 + cns21xx_gec_write_phy(gec, phy_addr, 23, phy_data);
653 + /* change to extened registers */
654 + cns21xx_gec_write_phy(gec, phy_addr, 31, 0x0001);
656 + cns21xx_gec_read_phy(gec, phy_addr, 28, &phy_data);
657 + phy_data &= ~(0x3 << 14); /* set RGMII TX timing skew */
658 + phy_data |= (0x3 << 14); /* 2.0ns */
659 + phy_data &= ~(0x3 << 12); /* set RGMII RX timing skew */
660 + phy_data |= (0x3 << 12); /* 2.0ns */
661 + cns21xx_gec_write_phy(gec, phy_addr, 28, phy_data);
663 + /* change to normal registers */
664 + cns21xx_gec_write_phy(gec, phy_addr, 31, 0x0000);
667 + /* set TX and RX clock skew */
668 + cns21xx_gec_wr(gec, GEC_REG_TEST0, (0x2 << 2) | (0x2 << 0));
671 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
674 +static void cns21xx_gec_ip101a_phy_config(struct cns21xx_gec *gec)
679 + dev_info(&gec->netdev->dev, "ICPlus IP101A\n");
682 + gec->phy_addr = phy_addr;
684 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
686 + /* set phy addr for auto-polling */
687 + phy_ctrl1 |= phy_addr << 24;
689 + /* set external phy mode */
690 + /* MII/RGMII interface */
691 + phy_ctrl1 &= ~(0x1 << 18);
694 + phy_ctrl1 &= ~(0x1 << 17);
697 + phy_ctrl1 &= ~(0x1 << 16);
699 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
702 +static void cns21xx_gec_ip1001_phy_config(struct cns21xx_gec *gec)
708 + dev_info(&gec->netdev->dev, "ICPlus IP1001\n");
711 + gec->phy_addr = phy_addr;
713 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
715 + /* set phy addr for auto-polling */
716 + phy_ctrl1 |= phy_addr << 24;
718 + /* set external phy mode */
719 + /* MII/RGMII interface */
720 + phy_ctrl1 &= ~(0x1 << 18);
723 + phy_ctrl1 |= (0x1 << 17);
726 + phy_ctrl1 &= ~(0x1 << 16);
728 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
729 + cns21xx_gec_read_phy(gec, phy_addr, 2, &phy_data);
731 + /* set AN capability */
732 + cns21xx_gec_read_phy(gec, phy_addr, 4, &phy_data);
733 + /* clear existing values */
734 + phy_data &= ~(0xf << 5);
736 + phy_data |= (0x1 << 5);
738 + phy_data |= (0x1 << 6);
740 + phy_data |= (0x1 << 7);
742 + phy_data |= (0x1 << 8);
744 + phy_data |= (0x1 << 10);
745 + cns21xx_gec_write_phy(gec, phy_addr, 4, phy_data);
747 + cns21xx_gec_read_phy(gec, phy_addr, 9, &phy_data);
749 + phy_data |= (0x1 << 9);
750 + phy_data &= ~(0x1 << 10);
751 + phy_data |= (0x1 << 12);
752 + cns21xx_gec_write_phy(gec, phy_addr, 9, phy_data);
754 + cns21xx_gec_read_phy(gec, phy_addr, 16, &phy_data);
755 + /* Smart function off */
756 + phy_data &= ~(0x1 << 11);
758 + phy_data |= (0x1 << 0);
760 + phy_data |= (0x1 << 1);
761 + cns21xx_gec_write_phy(gec, phy_addr, 16, phy_data);
763 + cns21xx_gec_read_phy(gec, phy_addr, 16, &phy_data);
766 + cns21xx_gec_read_phy(gec, phy_addr, 20, &phy_data);
767 + phy_data &= ~(0x1<<2);
769 + phy_data |= (0x1<<9);
770 + cns21xx_gec_write_phy(gec, phy_addr, 20, phy_data);
773 + cns21xx_gec_read_phy(gec, phy_addr, 0, &phy_data);
774 + phy_data |= (0x1 << 9); /* re-AN */
775 + cns21xx_gec_write_phy(gec, phy_addr, 0, phy_data);
777 + cns21xx_gec_read_phy(gec, phy_addr, 9, &phy_data);
779 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
782 +static int cns21xx_gec_phy_config(struct cns21xx_gec *gec)
786 + switch (gec->pdata->phy_type) {
787 + case CNS21XX_GEC_PHY_TYPE_INTERNAL:
788 + cns21xx_gec_internal_phy_config(gec);
791 + case CNS21XX_GEC_PHY_TYPE_VSC8601:
792 + cns21xx_gec_vsc8601_phy_config(gec);
795 + case CNS21XX_GEC_PHY_TYPE_IP101A:
796 + cns21xx_gec_ip101a_phy_config(gec);
799 + case CNS21XX_GEC_PHY_TYPE_IP1001:
800 + cns21xx_gec_ip1001_phy_config(gec);
807 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
810 + phy_ctrl1 |= (0x1 << 8);
811 + if (!((phy_ctrl1 >> 8) & 0x1)) { /* AN disable */
812 + /* Force to FullDuplex mode */
813 + phy_ctrl1 &= ~(0x1 << 11); /* Half */
815 + /* Force to 100Mbps mode */
816 + phy_ctrl1 &= ~(0x3 << 9); /* clear to 10M */
817 + phy_ctrl1 |= (0x1 << 9); /* set to 100M */
820 + /* Force TX FlowCtrl On,in 1000M */
821 + phy_ctrl1 |= (0x1 << 13);
823 + /* Force TX FlowCtrl On, in 10/100M */
824 + phy_ctrl1 |= (0x1 << 12);
826 + /* Enable MII auto polling */
827 + phy_ctrl1 &= ~(0x1 << 7);
829 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
830 + cns21xx_gec_phy_powerdown(gec);
835 +static void cns21xx_gec_vlan_config(struct cns21xx_gec *gec)
837 + /* setup VLAN entries */
838 + gec->vlans[0].vid = 2;
839 + gec->vlans[0].control = 0;
840 + gec->vlans[1].vid = 2;
841 + gec->vlans[1].control = 1;
842 + gec->vlans[2].vid = 1;
843 + gec->vlans[2].control = 1;
844 + gec->vlans[3].vid = 1;
845 + gec->vlans[3].control = 0;
847 + cns21xx_gec_wr(gec, GEC_REG_VLAN_ID_0_1,
848 + (gec->vlans[0].vid & 0x0fff) |
849 + ((gec->vlans[1].vid & 0x0fff) << 16));
851 + cns21xx_gec_wr(gec, GEC_REG_VLAN_ID_2_3,
852 + (gec->vlans[2].vid & 0x0fff) |
853 + ((gec->vlans[3].vid & 0x0fff) << 16));
855 + cns21xx_gec_wr(gec, GEC_REG_VLAN_CTRL,
856 + (gec->vlans[0].control << 0) |
857 + (gec->vlans[1].control << 1) |
858 + (gec->vlans[2].control << 2) |
859 + (gec->vlans[3].control << 3));
862 +static int cns21xx_gec_arl_config(struct cns21xx_gec *gec)
866 + arl_config = cns21xx_gec_rr(gec, GEC_REG_ARL_CFG);
869 + arl_config |= (0x1 << 4);
871 + /* My MAC only enable */
872 + arl_config |= (0x1 << 3);
875 + arl_config &= ~(0x1 << 2);
877 + /* Forward MC to CPU */
878 + arl_config &= ~(0x1 << 1);
880 + /* Hash direct mode */
881 + arl_config &= ~(0x1);
883 + cns21xx_gec_wr(gec, GEC_REG_ARL_CFG, arl_config);
888 +static void cns21xx_gec_phy_powerdown(struct cns21xx_gec *gec)
892 + cns21xx_gec_read_phy(gec, gec->phy_addr, 0, &phy_data);
893 + phy_data |= (0x1 << 11);
894 + cns21xx_gec_write_phy(gec, gec->phy_addr, 0, phy_data);
896 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
897 + PWRMGT_SOFTWARE_RESET_CONTROL_REG &= ~(0x1 << 15);
900 +static void cns21xx_gec_phy_powerup(struct cns21xx_gec *gec)
904 + cns21xx_gec_read_phy(gec, gec->phy_addr, 0, &phy_data);
905 + phy_data &= ~(0x1 << 11);
906 + cns21xx_gec_write_phy(gec, gec->phy_addr, 0, phy_data);
908 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
911 +static void cns21xx_gec_enable(struct cns21xx_gec *gec)
914 + cns21xx_gec_wr(gec, GEC_REG_RX_DMA_CTRL, 1);
916 + cns21xx_gec_phy_powerup(gec);
917 + internal_phy_start_timer(gec);
920 +static void cns21xx_gec_shutdown(struct cns21xx_gec *gec)
922 + /* stop Rx and Tx DMA */
923 + cns21xx_gec_wr(gec, GEC_REG_RX_DMA_CTRL, 0);
924 + cns21xx_gec_wr(gec, GEC_REG_TX_DMA_CTRL, 0);
926 + internal_phy_stop_timer(gec);
929 +static irqreturn_t cns21xx_gec_receive_isr(int irq, void *dev_id)
931 + struct cns21xx_gec *gec = dev_id;
933 + if (!test_bit(NAPI_STATE_SCHED, &gec->napi.state)) {
934 + if (likely(napi_schedule_prep(&gec->napi)))
935 + __napi_schedule(&gec->napi);
938 + return IRQ_HANDLED;
941 +static int cns21xx_gec_install_receive_isr(struct cns21xx_gec *gec)
945 + err = request_irq(gec->rxrc_irq, cns21xx_gec_receive_isr,
946 + IRQF_SHARED, dev_name(&gec->netdev->dev), gec);
948 + dev_err(&gec->netdev->dev,
949 + "unable to get IRQ %d (err=%d)\n",
950 + gec->rxrc_irq, err);
955 +static void cns21xx_gec_uninstall_receive_isr(struct cns21xx_gec *gec)
957 + free_irq(gec->rxrc_irq, gec);
960 +static irqreturn_t cns21xx_gec_rxqf_isr(int irq, void *dev_id)
962 + struct cns21xx_gec *gec = dev_id;
965 + * because in normal state, fsql only invoke once
966 + * and set_bit is atomic function, so don't mask it
968 + set_bit(0, &gec->rx_queue_full);
969 + if (!test_bit(NAPI_STATE_SCHED, &gec->napi.state)) {
970 + if (likely(napi_schedule_prep(&gec->napi)))
971 + __napi_schedule(&gec->napi);
974 + return IRQ_HANDLED;
977 +static int cns21xx_gec_install_rxqf_isr(struct cns21xx_gec *gec)
981 + /* QUEUE full interrupt handler */
982 + err = request_irq(gec->rxqf_irq, cns21xx_gec_rxqf_isr,
983 + IRQF_SHARED, dev_name(&gec->netdev->dev), gec);
985 + dev_err(&gec->netdev->dev,
986 + "unable to get IRQ %d (err=%d)\n",
987 + gec->rxqf_irq, err);
992 +static void cns21xx_gec_uninstall_rxqf_isr(struct cns21xx_gec *gec)
994 + free_irq(gec->rxqf_irq, gec);
997 +static void cns21xx_gec_mib_reset(struct cns21xx_gec *gec)
999 + unsigned long flags;
1001 + local_irq_save(flags);
1002 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_OK_PKT_CNTR);
1003 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_OK_BYTE_CNTR);
1004 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_RUNT_BYTE_CNTR);
1005 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_OSIZE_DROP_PKT_CNTR);
1006 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_NO_BUF_DROP_PKT_CNTR);
1007 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_CRC_ERR_PKT_CNTR);
1008 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_ARL_DROP_PKT_CNTR);
1009 + (void) cns21xx_gec_rr(gec, GEC_REG_MYVLANID_MISMATCH_DROP_PKT_CNTR);
1010 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_CHKSUM_ERR_PKT_CNTR);
1011 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_PAUSE_FRAME_PKT_CNTR);
1012 + (void) cns21xx_gec_rr(gec, GEC_REG_TX_OK_PKT_CNTR);
1013 + (void) cns21xx_gec_rr(gec, GEC_REG_TX_OK_BYTE_CNTR);
1014 + (void) cns21xx_gec_rr(gec, GEC_REG_TX_PAUSE_FRAME_CNTR);
1015 + local_irq_restore(flags);
1018 +static void cns21xx_gec_mib_read(struct cns21xx_gec *gec)
1020 + unsigned long flags;
1022 + local_irq_save(flags);
1023 + gec->mib_info.mib_rx_ok_pkt +=
1024 + cns21xx_gec_rr(gec, GEC_REG_RX_OK_PKT_CNTR);
1025 + gec->mib_info.mib_rx_ok_byte +=
1026 + cns21xx_gec_rr(gec, GEC_REG_RX_OK_BYTE_CNTR);
1027 + gec->mib_info.mib_rx_runt +=
1028 + cns21xx_gec_rr(gec, GEC_REG_RX_RUNT_BYTE_CNTR);
1029 + gec->mib_info.mib_rx_over_size +=
1030 + cns21xx_gec_rr(gec, GEC_REG_RX_OSIZE_DROP_PKT_CNTR);
1031 + gec->mib_info.mib_rx_no_buffer_drop +=
1032 + cns21xx_gec_rr(gec, GEC_REG_RX_NO_BUF_DROP_PKT_CNTR);
1033 + gec->mib_info.mib_rx_crc_err +=
1034 + cns21xx_gec_rr(gec, GEC_REG_RX_CRC_ERR_PKT_CNTR);
1035 + gec->mib_info.mib_rx_arl_drop +=
1036 + cns21xx_gec_rr(gec, GEC_REG_RX_ARL_DROP_PKT_CNTR);
1037 + gec->mib_info.mib_rx_myvid_drop +=
1038 + cns21xx_gec_rr(gec, GEC_REG_MYVLANID_MISMATCH_DROP_PKT_CNTR);
1039 + gec->mib_info.mib_rx_csum_err +=
1040 + cns21xx_gec_rr(gec, GEC_REG_RX_CHKSUM_ERR_PKT_CNTR);
1041 + gec->mib_info.mib_rx_pause_frame +=
1042 + cns21xx_gec_rr(gec, GEC_REG_RX_PAUSE_FRAME_PKT_CNTR);
1043 + gec->mib_info.mib_tx_ok_pkt +=
1044 + cns21xx_gec_rr(gec, GEC_REG_TX_OK_PKT_CNTR);
1045 + gec->mib_info.mib_tx_ok_byte +=
1046 + cns21xx_gec_rr(gec, GEC_REG_TX_OK_BYTE_CNTR);
1047 + gec->mib_info.mib_tx_pause_frame +=
1048 + cns21xx_gec_rr(gec, GEC_REG_TX_PAUSE_FRAME_CNTR);
1049 + local_irq_restore(flags);
1052 +static const char *cns21xx_gec_speed_str(u32 phy_ctrl1)
1054 + switch ((phy_ctrl1 >> 2) & 3) {
1066 +static irqreturn_t cns21xx_gec_status_isr(int irq, void *dev_id)
1068 + struct cns21xx_gec *gec = dev_id;
1071 + int_status = cns21xx_gec_rr(gec, GEC_REG_INT);
1072 + cns21xx_gec_wr(gec, GEC_REG_INT, int_status);
1075 + (void) cns21xx_gec_rr(gec, GEC_REG_INT);
1077 + if (int_status & GEC_INT_MIB_COUNTER_TH)
1078 + cns21xx_gec_mib_read(gec);
1080 + if (int_status & GEC_INT_PORT_STATUS_CHG) {
1083 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
1084 + if (phy_ctrl1 & BIT(0)) {
1085 + netif_carrier_on(gec->netdev);
1086 + dev_info(&gec->netdev->dev,
1087 + "link up (%sMbps/%s duplex)\n",
1088 + cns21xx_gec_speed_str(phy_ctrl1),
1089 + (phy_ctrl1 & BIT(4)) ? "Full" : "Half");
1091 + netif_carrier_off(gec->netdev);
1092 + dev_info(&gec->netdev->dev, "link down\n");
1096 + return IRQ_HANDLED;
1099 +static inline void cns21xx_gec_enable_interrupt(struct cns21xx_gec *gec,
1102 + cns21xx_gec_wr(gec, GEC_REG_INT_MASK,
1103 + cns21xx_gec_rr(gec, GEC_REG_INT_MASK) & ~mask);
1106 +static int cns21xx_gec_install_status_isr(struct cns21xx_gec *gec)
1110 + err = request_irq(gec->status_irq, cns21xx_gec_status_isr,
1111 + IRQF_DISABLED, dev_name(&gec->netdev->dev), gec);
1114 + dev_err(&gec->netdev->dev,
1115 + "unable to get IRQ %d (err=%d)\n",
1116 + gec->status_irq, err);
1120 + cns21xx_gec_enable_interrupt(gec, GEC_INT_MIB_COUNTER_TH |
1121 + GEC_INT_PORT_STATUS_CHG);
1126 +static inline void cns21xx_gec_uninstall_status_isr(struct cns21xx_gec *gec)
1128 + free_irq(gec->status_irq, gec);
1131 +static void cns21xx_gec_uninstall_isr(struct cns21xx_gec *gec)
1133 + cns21xx_gec_uninstall_rxqf_isr(gec);
1134 + cns21xx_gec_uninstall_status_isr(gec);
1135 + cns21xx_gec_uninstall_receive_isr(gec);
1138 +static int cns21xx_gec_install_isr(struct cns21xx_gec *gec)
1142 + /* setup delayed interrupts */
1143 + cns21xx_gec_wr(gec, GEC_REG_DLY_INT_CFG,
1145 + ((CNS21XX_PEND_INT_COUNT & 0xFF) << 8) |
1146 + (CNS21XX_PEND_INT_TIME & 0xFF));
1148 + err = cns21xx_gec_install_receive_isr(gec);
1152 + err = cns21xx_gec_install_rxqf_isr(gec);
1154 + goto err_uninstall_receive;
1156 + err = cns21xx_gec_install_status_isr(gec);
1158 + goto err_uninstall_rxqf;
1162 + err_uninstall_rxqf:
1163 + cns21xx_gec_uninstall_rxqf_isr(gec);
1164 + err_uninstall_receive:
1165 + cns21xx_gec_uninstall_receive_isr(gec);
1170 +static int cns21xx_gec_lan_open(struct net_device *dev)
1172 + struct cns21xx_gec *gec = netdev_priv(dev);
1175 + dev_dbg(&gec->netdev->dev, "open\n");
1178 + MOD_INC_USE_COUNT;
1181 + napi_enable(&gec->napi);
1182 + netif_start_queue(dev);
1183 + err = cns21xx_gec_install_isr(gec);
1187 + cns21xx_gec_enable(gec);
1192 + netif_stop_queue(dev);
1193 + napi_disable(&gec->napi);
1197 +static void cns21xx_gec_timeout(struct net_device *dev)
1199 + dev_dbg(&dev->dev, "timeout\n");
1200 + netif_wake_queue(dev);
1201 + dev->trans_start = jiffies;
1204 +static int cns21xx_gec_close(struct net_device *dev)
1206 + struct cns21xx_gec *gec = netdev_priv(dev);
1208 + cns21xx_gec_phy_powerdown(gec);
1209 + cns21xx_gec_uninstall_isr(gec);
1210 + napi_disable(&gec->napi);
1211 + netif_stop_queue(dev);
1212 + cns21xx_gec_shutdown(gec);
1215 + MOD_DEC_USE_COUNT;
1221 +static inline struct sk_buff *cns21xx_gec_alloc_skb(void)
1223 + struct sk_buff *skb;
1225 + skb = dev_alloc_skb(MAX_PACKET_LEN + 2);
1227 + if (unlikely(!skb))
1230 + /* Make buffer alignment 2 beyond a 16 byte boundary
1231 + * this will result in a 16 byte aligned IP header after
1232 + * the 14 byte MAC header is removed
1234 + skb_reserve(skb, 2); /* 16 bit alignment */
1239 +static inline int cns21xx_gec_tx_dma_size(struct cns21xx_gec *gec)
1241 + return gec->txring.count * sizeof(struct cns21xx_gec_txd);
1244 +static inline int cns21xx_gec_rx_dma_size(struct cns21xx_gec *gec)
1246 + return gec->rxring.count * sizeof(struct cns21xx_gec_rxd);
1249 +static void __init cns21xx_gec_buffer_free(struct cns21xx_gec *gec)
1251 + struct cns21xx_gec_ring *txring = &gec->txring;
1252 + struct cns21xx_gec_ring *rxring = &gec->rxring;
1255 + if (rxring->desc_cpu) {
1256 + for (i = 0; i < rxring->count; i++) {
1257 + if (rxring->skbs[i])
1258 + dev_kfree_skb(rxring->skbs[i]);
1261 + dma_free_coherent(gec->parent, cns21xx_gec_rx_dma_size(gec),
1262 + rxring->desc_cpu, rxring->desc_dma);
1263 + memset((void *)&rxring, 0, cns21xx_gec_rx_dma_size(gec));
1266 + if (txring->desc_cpu) {
1267 + dma_free_coherent(gec->parent, cns21xx_gec_tx_dma_size(gec),
1268 + txring->desc_cpu, txring->desc_dma);
1269 + memset((void *)&txring, 0, cns21xx_gec_tx_dma_size(gec));
1272 + kfree(txring->skbs);
1273 + kfree(rxring->skbs);
1276 +static int __init cns21xx_gec_buffer_alloc(struct cns21xx_gec *gec)
1278 + struct cns21xx_gec_ring *txring = &gec->txring;
1279 + struct cns21xx_gec_ring *rxring = &gec->rxring;
1280 + struct cns21xx_gec_rxd *rxd;
1281 + struct cns21xx_gec_txd *txd;
1282 + struct sk_buff *skb;
1283 + int err = -ENOMEM;
1286 + rxring->skbs = kzalloc(rxring->count * sizeof(struct skb *),
1289 + if (rxring->skbs == NULL) {
1290 + dev_err(&gec->netdev->dev,
1291 + "%s allocation failed\n", "RX buffer");
1295 + txring->skbs = kzalloc(txring->count * sizeof(struct skb *),
1298 + if (txring->skbs == NULL) {
1299 + dev_err(&gec->netdev->dev,
1300 + "%s allocation failed\n", "TX buffer");
1304 + rxring->desc_cpu = dma_alloc_coherent(gec->parent,
1305 + cns21xx_gec_rx_dma_size(gec),
1306 + &rxring->desc_dma,
1308 + if (!rxring->desc_cpu) {
1309 + dev_err(&gec->netdev->dev,
1310 + "%s allocation failed\n", "RX ring");
1314 + txring->desc_cpu = dma_alloc_coherent(gec->parent,
1315 + cns21xx_gec_tx_dma_size(gec),
1316 + &txring->desc_dma,
1318 + if (!txring->desc_cpu) {
1319 + dev_err(&gec->netdev->dev,
1320 + "%s allocation failed\n", "TX ring");
1324 + /* Clean RX Memory */
1325 + memset((void *)rxring->desc_cpu, 0, cns21xx_gec_rx_dma_size(gec));
1326 + dev_dbg(&gec->netdev->dev,
1327 + "rxring->desc_cpu=0x%08X rxring->desc_dma=0x%08X\n",
1328 + (u32) rxring->desc_cpu,
1329 + (u32) rxring->desc_dma);
1331 + /* Set cur_index Point to Zero */
1333 + rxd = rxring->desc_cpu;
1334 + for (i = 0; i < rxring->count; i++, rxd++) {
1335 + if (i == (rxring->count - 1)) {
1336 + /* End bit == 0; */
1339 + skb = cns21xx_gec_alloc_skb();
1341 + dev_err(&gec->netdev->dev,
1342 + "%s allocation failed\n", "skb");
1346 + /* Trans Packet from Virtual Memory to Physical Memory */
1347 + rxring->skbs[i] = skb;
1348 + rxd->sdp = dma_map_single(gec->parent,
1352 + rxd->length = MAX_PACKET_LEN;
1355 + /* Clear TX Memory */
1356 + memset((void *)txring->desc_cpu, 0, cns21xx_gec_tx_dma_size(gec));
1357 + dev_dbg(&gec->netdev->dev,
1358 + "txring->desc_cpu=0x%08X txring->desc_dma=0x%08X\n",
1359 + (u32) txring->desc_cpu,
1360 + (u32) txring->desc_dma);
1362 + /* Set cur_index Point to Zero */
1364 + txd = txring->desc_cpu;
1365 + for (i = 0; i < txring->count; i++, txd++) {
1366 + if (i == (txring->count - 1)) {
1367 + /* End of Ring ==1 */
1370 + /* TX Ring , Cown == 1 */
1373 +#ifdef CNS21XX_GEC_TX_HW_CHECKSUM
1374 + /* Enable Checksum */
1383 + /* clear txring->skbs */
1384 + txring->skbs[i] = NULL;
1390 + cns21xx_gec_buffer_free(gec);
1394 +static int cns21xx_gec_get_rfd_buff(struct cns21xx_gec *gec, int index)
1396 + struct cns21xx_gec_ring *rxring = &gec->rxring;
1397 + struct cns21xx_gec_rxd *rxd;
1398 + struct sk_buff *skb;
1399 + unsigned char *data;
1402 + /* TODO: get rxdesc ptr */
1403 + rxd = ((struct cns21xx_gec_rxd *) rxring->desc_cpu) + index;
1404 + skb = rxring->skbs[index];
1406 + len = rxd->length;
1408 + dma_unmap_single(gec->parent, rxd->sdp, len,
1411 + data = skb_put(skb, len);
1413 + skb->dev = gec->netdev;
1415 +#ifdef CNS21XX_GEC_RX_HW_CHECKSUM
1416 + if (rxd->ipf == 1 || rxd->l4f == 1) {
1417 + if (rxd->prot != 0x11) {
1418 + skb->ip_summed = CHECKSUM_NONE;
1420 + /* CheckSum Fail */
1421 + skb->dev->stats.rx_errors++;
1425 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1428 + skb->ip_summed = CHECKSUM_NONE;
1431 + /* this line must, if no, packet will not send to network layer */
1432 + skb->protocol = eth_type_trans(skb, skb->dev);
1434 + skb->dev->stats.rx_packets++;
1435 + skb->dev->stats.rx_bytes += len;
1436 + skb->dev->last_rx = jiffies;
1438 + /* if netif_rx any package, will let this driver core dump. */
1439 + netif_receive_skb(skb);
1444 + dev_kfree_skb_any(skb);
1448 +static void cns21xx_gec_receive_packet(struct cns21xx_gec *gec,
1449 + int mode, int *work_done, int work_to_do)
1451 + struct cns21xx_gec_ring *rxring = &gec->rxring;
1454 + struct cns21xx_gec_rxd *rxd;
1455 + struct sk_buff *skb;
1456 + int i, rxcount = 0;
1458 + rxd = ((struct cns21xx_gec_rxd *) rxring->desc_cpu) + rxring->curr;
1459 + rxsd_current = cns21xx_gec_rr(gec, GEC_REG_RX_DPTR);
1460 + rxsd_index = (rxsd_current - (u32)rxring->desc_dma) >> 4;
1462 + if (rxsd_index > rxring->curr) {
1463 + rxcount = rxsd_index - rxring->curr;
1464 + } else if (rxsd_index < rxring->curr) {
1465 + rxcount = (rxring->count - rxring->curr) +
1468 + if (rxd->cown == 0) {
1469 + goto receive_packet_exit;
1472 + rxcount = rxring->count;
1476 + for (i = 0; i < rxcount; i++) {
1477 + if (*work_done >= work_to_do)
1482 + if (rxd->cown != 0) {
1483 + /* Alloc New skb_buff */
1484 + skb = cns21xx_gec_alloc_skb();
1486 + /* Check skb_buff */
1487 + if (skb != NULL) {
1488 + cns21xx_gec_get_rfd_buff(gec, rxring->curr);
1489 + rxring->skbs[rxring->curr] = skb;
1491 + dma_map_single(gec->parent,
1495 + rxd->length = MAX_PACKET_LEN;
1497 + /* set cbit to 0 for CPU Transfer */
1501 + * TODO: I will add dev->lp.stats->rx_dropped,
1502 + * it will effect the performance
1504 + dev_warn(&gec->netdev->dev,
1505 + "skb allocation failed, reuse the buffer\n");
1507 + /* set cbit to 0 for CPU Transfer */
1513 + dev_err(&gec->netdev->dev, "encounter COWN == 0 BUG\n");
1517 + if (rxring->curr == (rxring->count - 1)) {
1519 + rxd = rxring->desc_cpu;
1526 + receive_packet_exit:
1530 +static int cns21xx_gec_poll(struct napi_struct *napi, int budget)
1532 + struct cns21xx_gec *gec;
1533 + int work_done = 0;
1534 + int work_to_do = budget;
1536 + gec = container_of(napi, struct cns21xx_gec, napi);
1537 + cns21xx_gec_receive_packet(gec, 0, &work_done, work_to_do);
1539 + budget -= work_done;
1541 + /* if no Tx and not enough Rx work done, exit the polling mode */
1543 + if (test_bit(0, &gec->rx_queue_full) == 1) {
1545 + clear_bit(0, &gec->rx_queue_full);
1546 + /* start Rx DMA */
1547 + cns21xx_gec_wr(gec, GEC_REG_RX_DMA_CTRL, 1);
1551 + napi_complete(&gec->napi);
1552 +#ifdef CONFIG_STAR_NIC_NAPI_MASK_IRQ
1553 + enable_irq(gec->rxrc_irq);
1561 +static int cns21xx_gec_send_packet(struct sk_buff *skb, struct net_device *dev)
1563 + struct cns21xx_gec *gec = netdev_priv(dev);
1564 + struct cns21xx_gec_txd *txd;
1565 + struct cns21xx_gec_ring *txring = &gec->txring;
1566 + struct sk_buff *skb_free = NULL;
1567 + unsigned long flags;
1570 + if (skb_padto(skb, ETH_ZLEN))
1571 + return NETDEV_TX_OK;
1573 + len = max_t(unsigned int, skb->len, ETH_ZLEN);
1575 + spin_lock_irqsave(&gec->tx_lock, flags);
1577 + txd = ((struct cns21xx_gec_txd *) txring->desc_cpu) + txring->curr;
1578 + if (txd->cown == 0) {
1579 + /* This TFD is busy */
1580 + spin_unlock_irqrestore(&gec->tx_lock, flags);
1581 + /* re-queue the skb */
1582 + return NETDEV_TX_BUSY;
1585 + if (txd->sdp != 0) {
1586 + /* MUST TODO: Free skbuff */
1587 + skb_free = txring->skbs[txring->curr];
1589 + dma_unmap_single(gec->parent,
1593 + txring->dirty = txring->curr + 1;
1594 + if (txring->dirty == txring->count)
1595 + txring->dirty = 0;
1598 +#ifdef CNS21XX_GEC_TX_HW_CHECKSUM
1599 + if (skb->protocol == __constant_htons(ETH_P_IP)) {
1600 + if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1603 + } else if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1615 +#endif /* CNS21XX_GEC_TX_HW_CHECKSUM */
1617 + txring->skbs[txring->curr] = skb;
1619 + txd->length = len;
1620 + txd->sdp = dma_map_single(gec->parent, skb->data, len,
1626 + /* Wake interrupt */
1632 + /* Start Tx DMA */
1633 + cns21xx_gec_wr(gec, GEC_REG_TX_DMA_CTRL, 1);
1635 + dev->stats.tx_packets++;
1636 + dev->stats.tx_bytes += skb->len;
1637 + dev->trans_start = jiffies;
1640 + if (txring->curr == txring->count)
1643 + spin_unlock_irqrestore(&gec->tx_lock, flags);
1646 + dev_kfree_skb(skb_free);
1648 + cns21xx_gec_timer_modify(gec, 10);
1650 + return NETDEV_TX_OK;
1653 +static void cns21xx_gec_set_mac_addr(struct cns21xx_gec *gec,
1654 + const char *mac_addr)
1656 + cns21xx_gec_wr(gec, GEC_REG_MY_MAC_H,
1657 + (mac_addr[0] << 8) | mac_addr[1]);
1659 + cns21xx_gec_wr(gec, GEC_REG_MY_MAC_L,
1660 + (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1661 + (mac_addr[4] << 8) | mac_addr[5]);
1663 + dev_dbg(&gec->netdev->dev, "MAC address: %pM", mac_addr);
1666 +static int cns21xx_gec_set_lan_mac_addr(struct net_device *dev, void *addr)
1668 + struct sockaddr *sock_addr = addr;
1669 + struct cns21xx_gec *gec = netdev_priv(dev);
1671 + spin_lock_irq(&gec->lock);
1672 + memcpy(dev->dev_addr, sock_addr->sa_data, 6);
1673 + cns21xx_gec_set_mac_addr(gec, sock_addr->sa_data);
1674 + spin_unlock_irq(&gec->lock);
1679 +static int __init cns21xx_gec_setup(struct cns21xx_gec *gec)
1684 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
1686 + PWRMGT_SOFTWARE_RESET_CONTROL_REG &= ~(0x1 << 15);
1688 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
1689 + /* set NIC clock to 67.5MHz */
1690 + PWRMGT_SYSTEM_CLOCK_CONTROL_REG |= (0x1 << 7);
1692 + /* enable NIC clock */
1693 + HAL_PWRMGT_ENABLE_NIC_CLOCK();
1695 + cns21xx_gec_wr(gec, GEC_REG_MAC_CFG, 0x00527C00);
1699 + /* Configure GPIO for NIC MDC/MDIO pins */
1700 + HAL_MISC_ENABLE_MDC_MDIO_PINS();
1701 + HAL_MISC_ENABLE_NIC_COL_PINS();
1704 + MISC_GPIOA_PIN_ENABLE_REG |= (0x7 << 22);
1705 + MISC_FAST_ETHERNET_PHY_CONFIG_REG |= (FE_PHY_LED_MODE >> 12) & 0x3;
1708 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
1710 + PWRMGT_SOFTWARE_RESET_CONTROL_REG &= ~(0x1 << 15);
1712 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
1715 + /* disable all interrupt status sources */
1716 + cns21xx_gec_wr(gec, GEC_REG_INT_MASK, ~(0));
1717 + /* clear pending interrupts */
1718 + cns21xx_gec_wr(gec, GEC_REG_INT, ~(0));
1720 + /* stop Rx and Tx DMA */
1721 + cns21xx_gec_wr(gec, GEC_REG_TX_DMA_CTRL, 0);
1722 + cns21xx_gec_wr(gec, GEC_REG_RX_DMA_CTRL, 0);
1724 + gec->txring.count = CNS21XX_GEC_NUM_TXDS;
1725 + gec->rxring.count = CNS21XX_GEC_NUM_RXDS;
1726 + err = cns21xx_gec_buffer_alloc(gec);
1730 + cns21xx_gec_mac_config(gec);
1731 + cns21xx_gec_fc_config(gec);
1733 + err = cns21xx_gec_phy_config(gec);
1735 + cns21xx_gec_buffer_free(gec);
1739 + cns21xx_gec_vlan_config(gec);
1740 + cns21xx_gec_arl_config(gec);
1741 + cns21xx_gec_set_mac_addr(gec, gec->netdev->dev_addr);
1742 + cns21xx_gec_mib_reset(gec);
1744 + MISC_DEBUG_PROBE_SELECTION_REG = 0x00000125; /* 0x00000105 pb0_nic */
1746 + cns21xx_gec_wr(gec, GEC_REG_TX_DPTR, gec->txring.desc_dma);
1747 + cns21xx_gec_wr(gec, GEC_REG_TX_BASE_ADDR, gec->txring.desc_dma);
1748 + cns21xx_gec_wr(gec, GEC_REG_RX_DPTR, gec->rxring.desc_dma);
1749 + cns21xx_gec_wr(gec, GEC_REG_RX_BASE_ADDR, gec->rxring.desc_dma);
1751 + cns21xx_gec_dma_config(gec);
1752 + internal_phy_init_timer(gec);
1753 + cns21xx_gec_timer_init(gec);
1758 +static const struct net_device_ops cns21xx_gec_netdev_ops = {
1759 + .ndo_open = cns21xx_gec_lan_open,
1760 + .ndo_stop = cns21xx_gec_close,
1761 + .ndo_start_xmit = cns21xx_gec_send_packet,
1762 + .ndo_set_mac_address = cns21xx_gec_set_lan_mac_addr,
1763 + .ndo_tx_timeout = cns21xx_gec_timeout,
1764 + .ndo_validate_addr = eth_validate_addr,
1765 + .ndo_change_mtu = eth_change_mtu,
1768 +static int __init cns21xx_gec_probe(struct platform_device *pdev)
1770 + struct net_device *netdev;
1771 + struct cns21xx_gec *gec;
1772 + struct cns21xx_gec_plat_data *pdata;
1775 + pdata = pdev->dev.platform_data;
1777 + dev_dbg(&pdev->dev, "no platform data\n");
1782 + if (!pdata->mac_addr) {
1783 + dev_dbg(&pdev->dev, "no mac address\n");
1788 + netdev = alloc_etherdev(sizeof(struct cns21xx_gec));
1794 + SET_NETDEV_DEV(netdev, &pdev->dev);
1796 + gec = netdev_priv(netdev);
1797 + gec->pdata = pdata;
1798 + gec->parent = &pdev->dev;
1800 + gec->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1801 + if (!gec->mem_res) {
1802 + dev_dbg(&pdev->dev, "no iomem resource\n");
1804 + goto err_free_netdev;
1807 + gec->status_irq = platform_get_irq_byname(pdev,
1808 + CNS21XX_GEC_STATUS_IRQ_NAME);
1809 + if (gec->status_irq < 0) {
1810 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1811 + CNS21XX_GEC_STATUS_IRQ_NAME);
1813 + goto err_free_netdev;
1816 + gec->rxrc_irq = platform_get_irq_byname(pdev,
1817 + CNS21XX_GEC_RXRC_IRQ_NAME);
1818 + if (gec->rxrc_irq < 0) {
1819 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1820 + CNS21XX_GEC_RXRC_IRQ_NAME);
1822 + goto err_free_netdev;
1825 + gec->rxqf_irq = platform_get_irq_byname(pdev,
1826 + CNS21XX_GEC_RXQF_IRQ_NAME);
1827 + if (gec->rxqf_irq < 0) {
1828 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1829 + CNS21XX_GEC_RXQF_IRQ_NAME);
1831 + goto err_free_netdev;
1834 + gec->txtc_irq = platform_get_irq_byname(pdev,
1835 + CNS21XX_GEC_TXTC_IRQ_NAME);
1836 + if (gec->txtc_irq < 0) {
1837 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1838 + CNS21XX_GEC_TXTC_IRQ_NAME);
1840 + goto err_free_netdev;
1843 + gec->txqe_irq = platform_get_irq_byname(pdev,
1844 + CNS21XX_GEC_TXQE_IRQ_NAME);
1845 + if (gec->txqe_irq < 0) {
1846 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1847 + CNS21XX_GEC_TXQE_IRQ_NAME);
1849 + goto err_free_netdev;
1852 + if (!request_mem_region(gec->mem_res->start,
1853 + resource_size(gec->mem_res), pdev->name)) {
1854 + dev_err(&pdev->dev, "unable to request mem region\n");
1856 + goto err_free_netdev;
1859 + gec->base = ioremap(gec->mem_res->start, resource_size(gec->mem_res));
1861 + dev_err(&pdev->dev, "ioremap failed \n");
1863 + goto err_release_mem;
1866 + platform_set_drvdata(pdev, netdev);
1868 + spin_lock_init(&gec->lock);
1869 + spin_lock_init(&gec->tx_lock);
1871 + netdev->base_addr = gec->mem_res->start;
1872 + netdev->netdev_ops = &cns21xx_gec_netdev_ops;
1873 +#if defined(CNS21XX_GEC_TX_HW_CHECKSUM)
1874 + netdev->features = NETIF_F_IP_CSUM;
1876 + memcpy(netdev->dev_addr, pdata->mac_addr, 6);
1877 + netif_napi_add(netdev, &gec->napi, cns21xx_gec_poll, 64);
1879 + gec->netdev = netdev;
1880 + err = register_netdev(netdev);
1884 + err = cns21xx_gec_setup(gec);
1886 + goto err_unregister_netdev;
1890 + err_unregister_netdev:
1891 + unregister_netdev(netdev);
1893 + platform_set_drvdata(pdev, NULL);
1894 + iounmap(gec->base);
1896 + release_mem_region(gec->mem_res->start, resource_size(gec->mem_res));
1898 + free_netdev(netdev);
1903 +static int __devexit cns21xx_gec_remove(struct platform_device *pdev)
1905 + struct net_device *netdev = platform_get_drvdata(pdev);
1906 + struct cns21xx_gec *gec = netdev_priv(netdev);
1908 + unregister_netdev(netdev);
1909 + platform_set_drvdata(pdev, NULL);
1910 + iounmap(gec->base);
1911 + release_mem_region(gec->mem_res->start, resource_size(gec->mem_res));
1912 + free_netdev(netdev);
1917 +static struct platform_driver cns21xx_gec_driver = {
1918 + .remove = __devexit_p(cns21xx_gec_remove),
1920 + .name = "cns21xx-gec",
1921 + .owner = THIS_MODULE,
1925 +static int __init cns21xx_gec_init(void)
1927 + return platform_driver_probe(&cns21xx_gec_driver, cns21xx_gec_probe);
1930 +static void __exit cns21xx_gec_exit(void)
1932 + platform_driver_unregister(&cns21xx_gec_driver);
1935 +module_init(cns21xx_gec_init);
1936 +module_exit(cns21xx_gec_exit);
1938 +#define INTERNAL_PHY_PATCH_CHECKCNT 16
1939 +#define INTERNAL_PHY_PATCH_CHECK_PERIOD 1000 /* ms */
1941 +static void (*phy_statemachine)(struct cns21xx_gec*, int, int, int);
1943 +#define ETH3220_PHY_MON_PERIOD INTERNAL_PHY_PATCH_CHECK_PERIOD
1945 +/* phy monitor state */
1947 +#define PHY_STATE_INIT 0
1948 +#define LINK_DOWN_POSITIVE 1
1949 +#define WAIT_LINK_UP_POSITIVE 2
1950 +#define LINK_UP_POSITIVE 3
1951 +#define WAIT_BYPASS_LINK_UP_POSITIVE 4
1952 +#define BYPASS_AND_LINK_UP_POSITIVE 5
1953 +#define LINK_UP_8101_POSITIVE 6
1954 +#define WAIT_8101_LINK_UP_POSITIVE 7
1956 +#define PHY_STATE_LAST (WAIT_8101_LINK_UP_POSITIVE+1)
1959 +#define WAIT_BYPASS_LINK_UP_POSITIVE_TIMEOUT 5000 /* 5000 ms */
1960 +#define WAIT_BYPASS_LINK_UP_NEGATIVE_TIMEOUT 5000 /* 5000 ms */
1961 +#define LINK_DOWN_ABILITY_DETECT_TIMEOUT 5000 /* 5000 ms */
1962 +#define DETECT_8101_PERIOD 7000 /* 7000 ms */
1963 +#define WAIT_8101_LINK_UP_TIMEOUT 3000 /* 3000 ms */
1965 +#define MAX_PHY_PORT 1
1966 +#define DEFAULT_AGC_TRAIN 16
1967 +#define MAX_AGC_TRAIN 16 /* train 16 times */
1968 +static int agc_train_num = DEFAULT_AGC_TRAIN;
1969 +u32 port_displaybuf[NUM_PHY][MAX_AGC_TRAIN + 1] = {
1973 +static int cuv[3][3] = {
1979 +static u32 link_status_old;
1981 +struct eth3220_phy {
1988 +#define DEBUG_PHY_STATE_TRANSITION 1
1989 +#if DEBUG_PHY_STATE_TRANSITION
1991 + * show state transition of debug phy port.
1992 + * -1 for all ports
1993 + * -2 for disable all ports
1994 + * 0 - 4 for each port
1996 +static int debug_phy_port = -2;
1997 +static char *phystate_name[] = {
1998 + "init", /* PHY_STATE_INIT */
1999 + "ldp", /* LINK_DOWN_POSITIVE */
2000 + "wait_lup", /* WAIT_LINK_UP_POSITIVE */
2001 + "lup", /* LINK_UP_POSITIVE */
2002 + "wait_bp_lup", /* WAIT_BYPASS_LINK_UP_POSITIVE */
2003 + "bp_lup", /* BYPASS_AND_LINK_UP_POSITIVE */
2004 + "8101_lup", /* LINK_UP_8101_POSITIVE */
2005 + "wait_8101_lup", /* WAIT_8101_LINK_UP_POSITIVE */
2008 +#endif /* DEBUG_PHY_STATE_TRANSITION */
2010 +static struct eth3220_phy phy[5] = {
2011 + {PHY_STATE_INIT, 0, 0, 0},
2012 + {PHY_STATE_INIT, 0, 0, 0},
2013 + {PHY_STATE_INIT, 0, 0, 0},
2014 + {PHY_STATE_INIT, 0, 0, 0},
2015 + {PHY_STATE_INIT, 0, 0, 0}
2018 +static u16 long_cable_global_reg[32] = {
2019 + 0x0000, 0x19a0, 0x1d00, 0x0e80,
2020 + 0x0f60, 0x07c0, 0x07e0, 0x03e0,
2021 + 0x0000, 0x0000, 0x0000, 0x2000,
2022 + 0x8250, 0x1700, 0x0000, 0x0000,
2023 + 0x0000, 0x0000, 0x0000, 0x0000,
2024 + 0x0000, 0x204b, 0x01c2, 0x0000,
2025 + 0x0000, 0x0000, 0x0fff, 0x4100,
2026 + 0x9319, 0x0021, 0x0034, 0x270a | FE_PHY_LED_MODE
2029 +static u16 long_cable_local_reg[32] = {
2030 + 0x3100, 0x786d, 0x01c1, 0xca51,
2031 + 0x05e1, 0x45e1, 0x0003, 0x001c,
2032 + 0x2000, 0x9828, 0xf3c4, 0x400c,
2033 + 0xf8ff, 0x6940, 0xb906, 0x503c,
2034 + 0x8000, 0x297a, 0x1010, 0x5010,
2035 + 0x6ae1, 0x7c73, 0x783c, 0xfbdf,
2036 + 0x2080, 0x3244, 0x1301, 0x1a80,
2037 + 0x8e8f, 0x8000, 0x9c29, 0xa70a | FE_PHY_LED_MODE
2040 +/*=============================================================*
2041 + * eth3220ac_rt8101_phy_setting
2042 + *=============================================================*/
2043 +static void eth3220ac_rt8101_phy_setting(struct cns21xx_gec *gec, int port)
2045 + cns21xx_gec_write_phy(gec, port, 12, 0x18ff);
2046 + cns21xx_gec_write_phy(gec, port, 18, 0x6400);
2049 +static void eth3220ac_release_bpf(struct cns21xx_gec *gec, int port)
2051 + cns21xx_gec_write_phy(gec, port, 18, 0x6210);
2054 +static void eth3220ac_def_bpf(struct cns21xx_gec *gec, int port)
2056 + cns21xx_gec_write_phy(gec, port, 18, 0x6bff);
2059 +static void eth3220ac_def_linkdown_setting(struct cns21xx_gec *gec, int port)
2061 + cns21xx_gec_write_phy(gec, port, 13, 0xe901);
2062 + cns21xx_gec_write_phy(gec, port, 14, 0xa3c6);
2065 +static void eth3220ac_def_linkup_setting(struct cns21xx_gec *gec, int port)
2067 + cns21xx_gec_write_phy(gec, port, 13, 0x6901);
2068 + cns21xx_gec_write_phy(gec, port, 14, 0xa286);
2071 +/*=============================================================*
2072 + * eth3220ac_link_agc:
2073 + *=============================================================*/
2074 +static int eth3220ac_link_agc(struct cns21xx_gec *gec, int port, int speed)
2081 + /* if speed = 100MHz, then continue */
2087 + for (i = 0; i < agc_train_num; i++) {
2088 + cns21xx_gec_read_phy(gec, port, 15, ®);
2090 + if (reg <= 0x12) {
2093 + agc_data += (u32)reg;
2098 + agc_data = (agc_data / jj) + 4;
2100 + agc_data = (cuv[2][0] * agc_data) / cuv[2][1] /
2101 + agc_train_num - 4;
2104 + agc_data = 0xd0 | (agc_data << 9);
2105 + cns21xx_gec_write_phy(gec, port, 15, agc_data);
2107 + cns21xx_gec_read_phy(gec, port, 15, ®);
2108 + reg &= ~(0x1 << 7);
2109 + cns21xx_gec_write_phy(gec, port, 15, reg);
2114 +/*=============================================================*
2115 + * eth3220ac_unlink_agc:
2116 + *=============================================================*/
2117 +static void eth3220ac_unlink_agc(struct cns21xx_gec *gec, int port)
2119 + /* start AGC adaptive */
2120 + cns21xx_gec_write_phy(gec, port, 15, 0xa050);
2123 +/*=============================================================*
2124 + * eth3220ac_rt8100_check
2125 + *=============================================================*/
2126 +static int eth3220ac_rt8100_check(struct cns21xx_gec *gec, int port)
2130 + /* Read reg27 (error register) */
2131 + cns21xx_gec_read_phy(gec, port, 27, ®);
2132 + /* if error exists, set Bypass Filter enable */
2133 + if ((reg & 0xfffc)) {
2134 + cns21xx_gec_read_phy(gec, port, 15, ®);
2135 + cns21xx_gec_read_phy(gec, port, 27, ®2);
2136 + if ((reg2 & 0xfffc) && (((reg >> 9) & 0xff) < 0x1c)) {
2137 + dev_err(&gec->netdev->dev, "8100 pos err\n");
2139 + /* Bypass agcgain disable */
2140 + cns21xx_gec_write_phy(gec, port, 15, (reg & (~(0x1 << 7))));
2142 + /* repeat counts when reaching threshold error */
2143 + cns21xx_gec_write_phy(gec, port, 13, 0x4940);
2146 + * Speed up AN speed and compensate threshold
2149 + cns21xx_gec_write_phy(gec, port, 14, 0xa306);
2151 + /* Bypass Filter enable */
2152 + cns21xx_gec_read_phy(gec, port, 18, ®2);
2154 + cns21xx_gec_write_phy(gec, port, 18, (reg | 0x400));
2157 + cns21xx_gec_write_phy(gec, port, 0, 0x3300);
2165 +/*=============================================================*
2166 + * eth3220ac_rt8100_linkdown
2167 + *=============================================================*/
2168 +static void eth3220ac_rt8100_linkdown(struct cns21xx_gec *gec, int port)
2172 + /* Bypass Filter disable */
2173 + cns21xx_gec_read_phy(gec, port, 18, ®);
2174 + cns21xx_gec_write_phy(gec, port, 18, (reg & (~(0x1 << 10))));
2175 + eth3220ac_def_linkdown_setting(gec, port);
2178 +static void eth3220ac_normal_phy_setting(struct cns21xx_gec *gec, int port)
2180 + cns21xx_gec_write_phy(gec, port, 12, 0xd8ff);
2181 + eth3220ac_def_bpf(gec, port);
2184 +/*=============================================================*
2185 + * wp3220ac_phystate
2186 + *=============================================================*/
2187 +static void wp3220ac_phystate(struct cns21xx_gec *gec, int port, int link, int speed)
2192 + phy[port].timer += ETH3220_PHY_MON_PERIOD;
2195 + /* Link up state */
2196 + switch (phy[port].state) {
2197 + case LINK_UP_POSITIVE:
2198 + next_state = eth3220ac_rt8100_check(gec, port) ?
2199 + WAIT_BYPASS_LINK_UP_POSITIVE :
2203 + case PHY_STATE_INIT:
2204 + case WAIT_LINK_UP_POSITIVE:
2205 + case LINK_DOWN_POSITIVE:
2206 + next_state = LINK_UP_POSITIVE;
2207 + eth3220ac_def_linkup_setting(gec, port);
2208 + eth3220ac_link_agc(gec, port, speed);
2209 + eth3220ac_release_bpf(gec, port);
2212 + case WAIT_BYPASS_LINK_UP_POSITIVE:
2213 + case BYPASS_AND_LINK_UP_POSITIVE:
2214 + next_state = BYPASS_AND_LINK_UP_POSITIVE;
2217 + case WAIT_8101_LINK_UP_POSITIVE:
2218 + next_state = LINK_UP_8101_POSITIVE;
2219 + eth3220ac_link_agc(gec, port, speed);
2220 + cns21xx_gec_write_phy(gec, port, 12, 0x98ff);
2223 + case LINK_UP_8101_POSITIVE:
2224 + next_state = LINK_UP_8101_POSITIVE;
2228 + next_state = LINK_UP_POSITIVE;
2229 + eth3220ac_def_linkup_setting(gec, port);
2230 + eth3220ac_link_agc(gec, port, speed);
2233 + /* Link down state */
2234 + switch (phy[port].state) {
2235 + case LINK_DOWN_POSITIVE:
2236 + cns21xx_gec_read_phy(gec, port, 5, ®);
2237 + cns21xx_gec_read_phy(gec, port, 28, ®2);
2239 + /* AN Link Partner Ability Register or NLP */
2240 + if (reg || (reg2 & 0x100))
2241 + next_state = WAIT_LINK_UP_POSITIVE;
2243 + next_state = LINK_DOWN_POSITIVE;
2246 + case WAIT_LINK_UP_POSITIVE:
2247 + if (phy[port].state_time >
2248 + LINK_DOWN_ABILITY_DETECT_TIMEOUT)
2249 + next_state = LINK_DOWN_POSITIVE;
2251 + next_state = WAIT_LINK_UP_POSITIVE;
2254 + case WAIT_BYPASS_LINK_UP_POSITIVE:
2255 + /* set timeout = 5 sec */
2256 + if (phy[port].state_time >
2257 + WAIT_BYPASS_LINK_UP_POSITIVE_TIMEOUT) {
2258 + next_state = LINK_DOWN_POSITIVE;
2260 + /* Bypass Filter disable */
2261 + eth3220ac_rt8100_linkdown(gec, port);
2262 + eth3220ac_def_bpf(gec, port);
2264 + next_state = WAIT_BYPASS_LINK_UP_POSITIVE;
2268 + case BYPASS_AND_LINK_UP_POSITIVE:
2269 + next_state = LINK_DOWN_POSITIVE;
2270 + eth3220ac_rt8100_linkdown(gec, port);
2271 + eth3220ac_def_bpf(gec, port);
2274 + case WAIT_8101_LINK_UP_POSITIVE:
2275 + if (phy[port].state_time > WAIT_8101_LINK_UP_TIMEOUT) {
2276 + next_state = LINK_DOWN_POSITIVE;
2277 + eth3220ac_normal_phy_setting(gec, port);
2278 + eth3220ac_def_linkdown_setting(gec, port);
2280 + next_state = WAIT_8101_LINK_UP_POSITIVE;
2284 + case LINK_UP_POSITIVE:
2285 + eth3220ac_unlink_agc(gec, port);
2286 + eth3220ac_def_linkdown_setting(gec, port);
2287 + eth3220ac_def_bpf(gec, port);
2288 + if (phy[port].timer > DETECT_8101_PERIOD) {
2289 + next_state = LINK_DOWN_POSITIVE;
2290 + phy[port].timer = 0;
2291 + phy[port].linkdown_cnt = 1;
2293 + if (++phy[port].linkdown_cnt > 2) {
2294 + next_state = WAIT_8101_LINK_UP_POSITIVE;
2295 + eth3220ac_rt8101_phy_setting(gec, port);
2297 + next_state = LINK_DOWN_POSITIVE;
2302 + case LINK_UP_8101_POSITIVE:
2303 + eth3220ac_normal_phy_setting(gec, port);
2304 + /* fall down to phy normal state */
2305 + case PHY_STATE_INIT:
2306 + eth3220ac_def_linkdown_setting(gec, port);
2307 + eth3220ac_unlink_agc(gec, port);
2309 + next_state = LINK_DOWN_POSITIVE;
2313 + if (phy[port].state != next_state) {
2314 + phy[port].state_time = 0;
2315 +#if DEBUG_PHY_STATE_TRANSITION
2316 + if (debug_phy_port == -1 || port == debug_phy_port) {
2317 + if ((phy[port].state < PHY_STATE_LAST) &&
2318 + (next_state < PHY_STATE_LAST))
2319 + dev_dbg(&gec->netdev->dev,
2320 + "p%d: %s->%s, %d, %d\n",
2321 + port, phystate_name[phy[port].state],
2322 + phystate_name[next_state],
2324 + phy[port].linkdown_cnt);
2326 + dev_dbg(&gec->netdev->dev,
2328 + port, phy[port].state, next_state);
2330 +#endif /* DEBUG_PHY_STATE_TRANSITION */
2332 + phy[port].state_time += ETH3220_PHY_MON_PERIOD;
2334 + phy[port].state = next_state;
2337 +/*=============================================================*
2338 + * eth3220_phyinit:
2339 + *=============================================================*/
2340 +static void eth3220ac_10m_agc(struct cns21xx_gec *gec)
2342 + /* Force 10M AGC = 2c globally */
2343 + cns21xx_gec_write_phy(gec, 0, 31, 0x2f1a);
2344 + cns21xx_gec_write_phy(gec, 0, 12, 0x112c);
2345 + cns21xx_gec_write_phy(gec, 0, 13, 0x2e21);
2346 + cns21xx_gec_write_phy(gec, 0, 31, 0xaf1a);
2349 +static void eth3220ac_dfe_init(struct cns21xx_gec *gec)
2353 + cns21xx_gec_write_phy(gec, 0, 31, 0x2f1a);
2354 + for (i = 0; i <= 7; i++)
2355 + cns21xx_gec_write_phy(gec, 0, i, 0);
2356 + cns21xx_gec_write_phy(gec, 0, 11, 0x0b50);
2357 + cns21xx_gec_write_phy(gec, 0, 31, 0xaf1a);
2360 +static void eth3220ac_phy_cdr_training_init(struct cns21xx_gec *gec)
2364 + /* Force all port in 10M FD mode */
2365 + for (i = 0; i < NUM_PHY; i++)
2366 + cns21xx_gec_write_phy(gec, i, 0, 0x100);
2368 + /* Global setting */
2369 + cns21xx_gec_write_phy(gec, 0, 31, 0x2f1a);
2370 + cns21xx_gec_write_phy(gec, 0, 29, 0x5021);
2371 + udelay(2000); /* 2ms, wait > 1 ms */
2372 + cns21xx_gec_write_phy(gec, 0, 29, 0x4021);
2373 + udelay(2000); /* 2ms, wait > 1 ms */
2374 + cns21xx_gec_write_phy(gec, 0, 31, 0xaf1a);
2376 + /* Enable phy AN */
2377 + for (i = 0; i < NUM_PHY; i++)
2378 + cns21xx_gec_write_phy(gec, i, 0, 0x3100);
2381 +static void eth3220_phyinit(struct cns21xx_gec *gec)
2383 + eth3220ac_10m_agc(gec);
2384 + eth3220ac_dfe_init(gec);
2385 + eth3220ac_phy_cdr_training_init(gec);
2388 +static void eth3220_phycfg(struct cns21xx_gec *gec, int phyaddr)
2390 + eth3220ac_def_linkdown_setting(gec, phyaddr);
2391 + eth3220ac_normal_phy_setting(gec, phyaddr);
2392 + cns21xx_gec_write_phy(gec, phyaddr, 9, 0x7f);
2395 +static void internal_phy_patch_check(struct cns21xx_gec *gec, int init)
2397 + u32 short_cable_agc_detect_count;
2398 + u32 link_status = 0, link_speed;
2399 + u32 phy_addr = gec->phy_addr;
2404 + cns21xx_gec_read_phy(gec, phy_addr, 1, &phy_data);
2406 + cns21xx_gec_read_phy(gec, phy_addr, 1, &phy_data2);
2407 + if (((phy_data & 0x0004) != 0x0004) &&
2408 + ((phy_data2 & 0x0004) != 0x0004)) {
2410 + short_cable_agc_detect_count = 0;
2411 + for (i = 0; i < INTERNAL_PHY_PATCH_CHECKCNT; i++) {
2412 + cns21xx_gec_read_phy(gec, phy_addr, 15, &phy_data);
2414 + if ((phy_data & 0x7F) <= 0x12) {
2416 + short_cable_agc_detect_count++;
2420 + if (short_cable_agc_detect_count) {
2424 + phy_statemachine = wp3220ac_phystate;
2425 + eth3220_phyinit(gec);
2426 + cns21xx_gec_read_phy(gec, phy_addr, 1, &phy_data);
2427 + if (phy_data & 0x0040)
2430 + mac_cfg = cns21xx_gec_rr(gec, GEC_REG_MAC_CFG);
2431 + if ((mac_cfg & 0xC) == 0x4) /* 100Mbps */
2436 + link_status_old = link_status;
2437 + for (i = 0; i < MAX_PHY_PORT; link_status >>= 1, i++)
2438 + eth3220_phycfg(gec, i);
2441 + /* set to global domain */
2442 + cns21xx_gec_write_phy(gec, phy_addr, 31,
2444 + for (i = 0; i < 32; i++)
2445 + cns21xx_gec_write_phy(gec, phy_addr, i,
2446 + long_cable_global_reg[i]);
2448 + /* set to local domain */
2449 + cns21xx_gec_write_phy(gec, phy_addr, 31,
2451 + for (i = 0; i < 32; i++)
2452 + cns21xx_gec_write_phy(gec, phy_addr, i,
2453 + long_cable_local_reg[i]);
2458 +static void internal_phy_timer_func(unsigned long data)
2460 + struct cns21xx_gec *gec = (struct cns21xx_gec *) data;
2462 + internal_phy_patch_check(gec, 0);
2463 + mod_timer(&gec->internal_phy_timer,
2464 + jiffies + INTERNAL_PHY_PATCH_CHECK_PERIOD / 10);
2467 +static void internal_phy_init_timer(struct cns21xx_gec *gec)
2469 + init_timer(&gec->internal_phy_timer);
2470 + gec->internal_phy_timer.function = internal_phy_timer_func;
2471 + gec->internal_phy_timer.data = (unsigned long) gec;
2474 +static void internal_phy_start_timer(struct cns21xx_gec *gec)
2476 + dev_dbg(&gec->netdev->dev, "starting patch check.\n");
2478 + internal_phy_patch_check(gec, 1);
2479 + mod_timer(&gec->internal_phy_timer,
2480 + jiffies + INTERNAL_PHY_PATCH_CHECK_PERIOD / 10);
2483 +static void internal_phy_stop_timer(struct cns21xx_gec *gec)
2485 + dev_dbg(&gec->netdev->dev, "stopping patch check.\n");
2487 + del_timer_sync(&gec->internal_phy_timer);
2490 +++ b/drivers/net/cns21xx/Kconfig
2493 + tristate "CNS21XX Gigabit Ethernet Controller support"
2494 + depends on ARCH_CNS21XX
2496 + If you wish to compile a kernel for Cavium Networks CNS21XX
2497 + with ethernet support, then you should always answer Y to this.
2500 +++ b/drivers/net/cns21xx/Makefile
2503 +# Makefile for the Cavium Networks CNS21XX ethernet driver
2506 +cns21xx_gec-y += cns21xx_gec_main.o
2508 +obj-$(CONFIG_CNS21XX_GEC) += cns21xx_gec.o