2 +++ b/drivers/net/ethernet/cns21xx/cns21xx_gec_main.c
5 + * Copyright (c) 2010 Gabor Juhos <juhosg@openwrt.org>
7 + * This driver has been derived from the ethernet driver of the
9 + * Copyright (c) 2008 Cavium Networks
11 + * This file is free software; you can redistribute it and/or modify
12 + * it under the terms of the GNU General Public License, Version 2, as
13 + * published by the Free Software Foundation.
16 +#include <linux/module.h>
17 +#include <linux/kernel.h>
18 +#include <linux/bootmem.h>
19 +#include <linux/sched.h>
20 +#include <linux/types.h>
21 +#include <linux/fcntl.h>
22 +#include <linux/interrupt.h>
23 +#include <linux/ptrace.h>
24 +#include <linux/ioport.h>
25 +#include <linux/in.h>
26 +#include <linux/slab.h>
27 +#include <linux/init.h>
28 +#include <linux/bitops.h>
29 +#include <linux/irq.h>
30 +#include <linux/io.h>
31 +#include <linux/pci.h>
32 +#include <linux/errno.h>
33 +#include <linux/delay.h>
34 +#include <linux/netdevice.h>
35 +#include <linux/etherdevice.h>
36 +#include <linux/platform_device.h>
37 +#include <linux/skbuff.h>
38 +#include <linux/ip.h>
39 +#include <linux/if_ether.h>
40 +#include <linux/icmp.h>
41 +#include <linux/udp.h>
42 +#include <linux/tcp.h>
43 +#include <linux/if_arp.h>
46 +#include <mach/hardware.h>
47 +#include <mach/cns21xx.h>
48 +#include <mach/cns21xx_misc.h>
49 +#include <mach/cns21xx_powermgmt.h>
50 +#include <mach/cns21xx_gec_platform.h>
52 +#define DRIVER_NAME "cns21xx-gec"
54 +/* VSC8601 and WavePlus Phy are the same */
55 +#define CNS21XX_GEC_PHY_ADDR 0
57 +#define CNS21XX_GEC_TX_HW_CHECKSUM
58 +#define CNS21XX_GEC_RX_HW_CHECKSUM
60 +#define CNS21XX_PEND_INT_COUNT 16
61 +#define CNS21XX_PEND_INT_TIME 5 /* 5 x 20 usecs */
63 +#define MAX_PACKET_LEN 1536
65 +#define CNS21XX_GEC_NUM_TXDS 48 /* FIXME: original 64 will cause UDP fail */
66 +#define CNS21XX_GEC_NUM_RXDS 64
68 +struct cns21xx_gec_mib_info {
72 + u32 mib_rx_over_size;
73 + u32 mib_rx_no_buffer_drop;
75 + u32 mib_rx_arl_drop;
76 + u32 mib_rx_myvid_drop;
77 + u32 mib_rx_csum_err;
78 + u32 mib_rx_pause_frame;
81 + u32 mib_tx_pause_frame;
85 + * Network Driver, Receive/Send and Initial Buffer Function
87 +struct cns21xx_gec_txd {
114 +struct cns21xx_gec_rxd {
145 +struct cns21xx_gec_ring {
151 + struct sk_buff **skbs;
154 +#define CNS21XX_GEC_NUM_VLANS 4
155 +struct cns21xx_gec_vlan {
156 + u32 vid; /* 0~4095 */
157 + u32 control; /* ENABLE or DISABLE */
160 +/* store this information for the driver.. */
161 +struct cns21xx_gec {
162 + struct napi_struct napi;
163 + struct net_device *netdev;
164 + struct device *parent;
165 + struct cns21xx_gec_ring txring;
166 + struct cns21xx_gec_ring rxring;
168 + void __iomem *base;
169 + struct resource *mem_res;
170 + struct cns21xx_gec_plat_data *pdata;
172 + spinlock_t tx_lock;
179 + unsigned long rx_queue_full;
181 + struct cns21xx_gec_vlan vlans[CNS21XX_GEC_NUM_VLANS];
183 + struct timer_list internal_phy_timer;
184 + struct timer_list nic_timer;
187 + struct cns21xx_gec_mib_info mib_info;
190 +#define GEC_REG_PHY_CTRL0 0x000
191 +#define GEC_REG_PHY_CTRL1 0x004
192 +#define GEC_REG_MAC_CFG 0x008
193 +#define GEC_REG_FC_CFG 0x00c
194 +#define GEC_REG_ARL_CFG 0x010
195 +#define GEC_REG_MY_MAC_H 0x014
196 +#define GEC_REG_MY_MAC_L 0x018
197 +#define GEC_REG_HASH_CTRL 0x01c
198 +#define GEC_REG_VLAN_CTRL 0x020
199 +#define GEC_REG_VLAN_ID_0_1 0x024
200 +#define GEC_REG_VLAN_ID_2_3 0x028
201 +#define GEC_REG_DMA_CFG 0x030
202 +#define GEC_REG_TX_DMA_CTRL 0x034
203 +#define GEC_REG_RX_DMA_CTRL 0x038
204 +#define GEC_REG_TX_DPTR 0x03c
205 +#define GEC_REG_RX_DPTR 0x040
206 +#define GEC_REG_TX_BASE_ADDR 0x044
207 +#define GEC_REG_RX_BASE_ADDR 0x048
208 +#define GEC_REG_DLY_INT_CFG 0x04c
209 +#define GEC_REG_INT 0x050
210 +#define GEC_REG_INT_MASK 0x054
211 +#define GEC_REG_TEST0 0x058
212 +#define GEC_REG_TEST1 0x05c
213 +#define GEC_REG_EXTEND_CFG 0x060
215 +#define GEC_REG_RX_OK_PKT_CNTR 0x100
216 +#define GEC_REG_RX_OK_BYTE_CNTR 0x104
217 +#define GEC_REG_RX_RUNT_BYTE_CNTR 0x108
218 +#define GEC_REG_RX_OSIZE_DROP_PKT_CNTR 0x10c
219 +#define GEC_REG_RX_NO_BUF_DROP_PKT_CNTR 0x110
220 +#define GEC_REG_RX_CRC_ERR_PKT_CNTR 0x114
221 +#define GEC_REG_RX_ARL_DROP_PKT_CNTR 0x118
222 +#define GEC_REG_MYVLANID_MISMATCH_DROP_PKT_CNTR 0x11c
223 +#define GEC_REG_RX_CHKSUM_ERR_PKT_CNTR 0x120
224 +#define GEC_REG_RX_PAUSE_FRAME_PKT_CNTR 0x124
225 +#define GEC_REG_TX_OK_PKT_CNTR 0x128
226 +#define GEC_REG_TX_OK_BYTE_CNTR 0x12c
227 +#define GEC_REG_TX_COLLISION_CNTR 0x130
228 +#define GEC_REG_TX_PAUSE_FRAME_CNTR 0x130
229 +#define GEC_REG_TX_FIFO_UNDERRUN_RETX_CNTR 0x134
231 +#define GEC_INT_MIB_COUNTER_TH BIT(3)
232 +#define GEC_INT_PORT_STATUS_CHG BIT(2)
234 +#define FE_PHY_LED_MODE (0x1 << 12)
236 +static void internal_phy_init_timer(struct cns21xx_gec *gec);
237 +static void internal_phy_start_timer(struct cns21xx_gec *gec);
238 +static void internal_phy_stop_timer(struct cns21xx_gec *gec);
240 +static void cns21xx_gec_phy_powerdown(struct cns21xx_gec *gec);
241 +static void cns21xx_gec_phy_powerup(struct cns21xx_gec *gec);
243 +static inline u32 cns21xx_gec_rr(struct cns21xx_gec *gec, unsigned int reg)
245 + return __raw_readl(gec->base + reg);
248 +static inline void cns21xx_gec_wr(struct cns21xx_gec *gec, unsigned int reg,
251 + __raw_writel(val, gec->base + reg);
254 +static void cns21xx_gec_timer_func(unsigned long data)
256 + struct cns21xx_gec *gec = (struct cns21xx_gec *) data;
257 + struct cns21xx_gec_ring *txring = &gec->txring;
261 + int skb_free_count = 0;
262 + struct cns21xx_gec_txd *txd;
263 + unsigned long flags;
265 + local_irq_save(flags);
266 + txsd_current = cns21xx_gec_rr(gec, GEC_REG_TX_DPTR);
267 + txsd_index = (txsd_current - (u32)txring->desc_dma) >> 4;
268 + if (txsd_index > txring->dirty) {
269 + skb_free_count = txsd_index - txring->dirty;
270 + } else if (txsd_index <= txring->dirty) {
271 + skb_free_count = txring->count + txsd_index -
274 + for (i = 0; i < skb_free_count; i++) {
275 + txd = ((struct cns21xx_gec_txd *) txring->desc_cpu) +
278 + if (txd->cown == 0)
281 + if (txring->skbs[txring->dirty]) {
282 + dev_kfree_skb_any(txring->skbs[txring->dirty]);
283 + txring->skbs[txring->dirty] = NULL;
285 + dma_unmap_single(gec->parent,
292 + if (txring->dirty == txring->count)
295 + local_irq_restore(flags);
298 +static void __init cns21xx_gec_timer_init(struct cns21xx_gec *gec)
300 + init_timer(&gec->nic_timer);
301 + gec->nic_timer.function = &cns21xx_gec_timer_func;
302 + gec->nic_timer.data = (unsigned long) gec;
305 +static void cns21xx_gec_timer_modify(struct cns21xx_gec *gec, unsigned int t)
307 + mod_timer(&gec->nic_timer, jiffies + t);
310 +static int cns21xx_gec_write_phy(struct cns21xx_gec *gec,
311 + u8 addr, u8 reg, u16 val)
315 + if (addr > 31 || reg > 31)
318 + /* clear previous rw_ok status */
319 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0, 0x1 << 15);
321 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0,
322 + addr | (reg << 8) | (val << 16) | (0x1 << 13));
324 + for (i = 0; i < 10000; i++) {
327 + status = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL0);
328 + if (status & (0x1 << 15)) {
330 + * clear the rw_ok status,
331 + * and clear other bits value
333 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0, (0x1 << 15));
339 + dev_err(&gec->netdev->dev,
340 + "%s timed out, phy_addr:0x%x, phy_reg:0x%x, write_data:0x%x\n",
341 + __func__, addr, reg, val);
346 +static int cns21xx_gec_read_phy(struct cns21xx_gec *gec,
347 + u8 addr, u8 reg, u16 *val)
351 + if (addr > 31 || reg > 31)
354 + /* clear previous rw_ok status */
355 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0, 0x1 << 15);
357 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0,
358 + addr | (reg << 8) | (0x1 << 14));
360 + for (i = 0; i < 10000; i++) {
363 + status = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL0);
364 + if (status & (0x1 << 15)) {
366 + * clear the rw_ok status,
367 + * and clear other bits value
369 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL0, (0x1 << 15));
370 + *val = (status >> 16) & 0xffff;
376 + dev_err(&gec->netdev->dev,
377 + "%s timed out, phy_addr:0x%x, phy_reg:0x%x\n",
378 + __func__, addr, reg);
384 +static void cns21xx_gec_dma_config(struct cns21xx_gec *gec)
386 + u32 dma_config = 0;
388 + dma_config = cns21xx_gec_rr(gec, GEC_REG_DMA_CFG);
390 + /* Config TX DMA */
391 + /* TX auto polling: 1 us */
392 + dma_config &= ~(0x3 << 6);
393 + /* TX auto polling :100us */
394 + dma_config |= (0x2 << 6);
395 + /* TX auto polling C-bit enable */
396 + dma_config |= (0x1 << 5);
397 + /* TX can transmit packets, No suspend */
398 + dma_config &= ~(0x1 << 4);
400 + /* Config RX DMA */
401 + /* RX auto polling: 1 us */
402 + dma_config &= ~(0x3 << 2);
403 + /* RX auto polling :100us */
404 + dma_config |= (0x2 << 2);
405 + /* RX auto polling C-bit enable */
406 + dma_config |= (0x1 << 1);
407 + /* RX can receive packets, No suspend */
408 + dma_config &= ~0x1;
410 + /* 4N+2(for Linux) */
411 + dma_config &= ~(0x1 << 16);
413 + cns21xx_gec_wr(gec, GEC_REG_DMA_CFG, dma_config);
416 +static void cns21xx_gec_mac_config(struct cns21xx_gec *gec)
420 + mac_config = cns21xx_gec_rr(gec, GEC_REG_MAC_CFG);
422 +#ifdef CNS21XX_GEC_TX_HW_CHECKSUM
423 + /* Tx ChkSum offload On: TCP/UDP/IP */
424 + mac_config |= (0x1 << 26);
426 + /* Tx ChkSum offload Off: TCP/UDP/IP */
427 + mac_config &= ~(0x1 << 26);
430 +#ifdef CNS21XX_GEC_RX_HW_CHECKSUM
431 + /* Rx ChkSum offload On: TCP/UDP/IP */
432 + mac_config |= (0x1 << 25);
434 + /* Rx ChkSum offload Off: TCP/UDP/IP */
435 + mac_config &= ~(0x1 << 25);
438 + /* Accept CSUM error pkt */
439 + mac_config |= (0x1 << 24);
441 + mac_config &= ~(0x1 << 23);
442 + /* Strip vlan tag */
443 + mac_config |= (0x1 << 22);
444 + /* Accept CRC error pkt */
445 + mac_config |= (0x1 << 21);
447 + mac_config |= (0x1 << 20);
449 + /* Discard oversize pkt */
450 + mac_config &= ~(0x1 << 18);
452 + /* clear, set 1518 */
453 + mac_config &= ~(0x3 << 16);
456 + mac_config |= (0x2 << 16);
459 + mac_config |= (0x1f << 10);
461 + /* Do not skip 16 consecutive collisions pkt */
462 + /* allow to re-tx */
463 + mac_config |= (0x1 << 9);
465 + mac_config |= (0x1 << 8);
467 + cns21xx_gec_wr(gec, GEC_REG_MAC_CFG, mac_config);
470 +static void cns21xx_gec_fc_config(struct cns21xx_gec *gec)
474 + fc_config = cns21xx_gec_rr(gec, GEC_REG_FC_CFG);
476 + /* Send pause on frame threshold */
478 + fc_config &= ~(0xfff << 16);
479 + fc_config |= (0x360 << 16);
480 + /* Disable UC_PAUSE */
481 + fc_config &= ~(0x1 << 8);
482 + /* Enable Half Duplex backpressure */
483 + fc_config |= (0x1 << 7);
484 + /* Collision-based BP */
485 + fc_config &= ~(0x1 << 6);
486 + /* Disable max BP collision */
487 + fc_config &= ~(0x1 << 5);
489 + fc_config &= ~(0x1f);
491 + fc_config |= (0xc);
493 + cns21xx_gec_wr(gec, GEC_REG_FC_CFG, fc_config);
496 +static void cns21xx_gec_internal_phy_config(struct cns21xx_gec *gec)
501 + dev_info(&gec->netdev->dev, "Internal PHY\n");
503 + phy_addr = CNS21XX_GEC_PHY_ADDR;
504 + gec->phy_addr = phy_addr;
506 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
508 + /* set phy addr for auto-polling */
509 + phy_ctrl1 |= (phy_addr & 0x1f) << 24;
511 + /* set internal phy mode */
512 + /* internel 10/100 phy */
513 + phy_ctrl1 |= 0x1 << 18;
516 + phy_ctrl1 &= ~(0x1 << 17);
519 + phy_ctrl1 &= ~(0x1 << 16);
521 + /* config PHY LED bit[13:12] */
522 + cns21xx_gec_read_phy(gec, phy_addr, 31, (u16 *)(&phy_ctrl1));
523 + /* clear LED control */
524 + phy_ctrl1 &= ~(0x3 << 12);
525 + phy_ctrl1 |= FE_PHY_LED_MODE;
526 + cns21xx_gec_write_phy(gec, phy_addr, 31, phy_ctrl1);
528 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
531 +static void cns21xx_gec_vsc8601_phy_config(struct cns21xx_gec *gec)
537 + phy_addr = CNS21XX_GEC_PHY_ADDR;
538 + gec->phy_addr = phy_addr;
540 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
542 + /* phy addr for auto-polling */
543 + phy_ctrl1 |= phy_addr << 24;
545 + /* set external phy mode */
546 + phy_ctrl1 &= ~(0x1 << 18);
549 + phy_ctrl1 |= (0x1 << 17);
551 + /* set MII interface */
552 + phy_ctrl1 &= ~(0x1 << 16);
554 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
556 + /* set phy addr for auto-polling */
557 + phy_ctrl1 |= phy_addr << 24;
559 + /* set external phy mode */
560 + /* MII/RGMII interface */
561 + phy_ctrl1 &= ~(0x1 << 18);
564 + phy_ctrl1 |= (0x1 << 17);
567 + phy_ctrl1 &= ~(0x1 << 16);
569 + cns21xx_gec_read_phy(gec, phy_addr, 3, &phy_data);
570 + if ((phy_data & 0x000f) == 0x0000) {
574 + dev_info(&gec->netdev->dev, "VSC8601 Type A Chip\n");
575 + cns21xx_gec_write_phy(gec, phy_addr, 31, 0x52B5);
576 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0xAF8A);
579 + cns21xx_gec_read_phy(gec, phy_addr, 18, &tmp16);
580 + phy_data |= (tmp16 & ~0x0);
581 + cns21xx_gec_write_phy(gec, phy_addr, 18, phy_data);
584 + cns21xx_gec_read_phy(gec, phy_addr, 17, &tmp16);
585 + phy_data |= (tmp16 & ~0x000C);
586 + cns21xx_gec_write_phy(gec, phy_addr, 17, phy_data);
588 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0x8F8A);
589 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0xAF86);
592 + cns21xx_gec_read_phy(gec, phy_addr, 18, &tmp16);
593 + phy_data |= (tmp16 & ~0x000C);
594 + cns21xx_gec_write_phy(gec, phy_addr, 18, phy_data);
597 + cns21xx_gec_read_phy(gec, phy_addr, 17, &tmp16);
598 + phy_data |= (tmp16 & ~0x0);
599 + cns21xx_gec_write_phy(gec, phy_addr, 17, phy_data);
601 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0x8F8A);
603 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0xAF82);
606 + cns21xx_gec_read_phy(gec, phy_addr, 18, &tmp16);
607 + phy_data |= (tmp16 & ~0x0);
608 + cns21xx_gec_write_phy(gec, phy_addr, 18, phy_data);
611 + cns21xx_gec_read_phy(gec, phy_addr, 17, &tmp16);
612 + phy_data |= (tmp16 & ~0x0180);
613 + cns21xx_gec_write_phy(gec, phy_addr, 17, phy_data);
615 + cns21xx_gec_write_phy(gec, phy_addr, 16, 0x8F82);
617 + cns21xx_gec_write_phy(gec, phy_addr, 31, 0x0);
619 + /* Set port type: single port */
620 + cns21xx_gec_read_phy(gec, phy_addr, 9, &phy_data);
621 + phy_data &= ~(0x1 << 10);
622 + cns21xx_gec_write_phy(gec, phy_addr, 9, phy_data);
623 + } else if ((phy_data & 0x000f) == 0x0001) {
625 + dev_info(&gec->netdev->dev, "VSC8601 Type B Chip\n");
627 + cns21xx_gec_read_phy(gec, phy_addr, 23, &phy_data);
628 + phy_data |= (0x1 << 8); /* set RGMII timing skew */
629 + cns21xx_gec_write_phy(gec, phy_addr, 23, phy_data);
632 + /* change to extened registers */
633 + cns21xx_gec_write_phy(gec, phy_addr, 31, 0x0001);
635 + cns21xx_gec_read_phy(gec, phy_addr, 28, &phy_data);
636 + phy_data &= ~(0x3 << 14); /* set RGMII TX timing skew */
637 + phy_data |= (0x3 << 14); /* 2.0ns */
638 + phy_data &= ~(0x3 << 12); /* set RGMII RX timing skew */
639 + phy_data |= (0x3 << 12); /* 2.0ns */
640 + cns21xx_gec_write_phy(gec, phy_addr, 28, phy_data);
642 + /* change to normal registers */
643 + cns21xx_gec_write_phy(gec, phy_addr, 31, 0x0000);
646 + /* set TX and RX clock skew */
647 + cns21xx_gec_wr(gec, GEC_REG_TEST0, (0x2 << 2) | (0x2 << 0));
650 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
653 +static void cns21xx_gec_ip101a_phy_config(struct cns21xx_gec *gec)
658 + dev_info(&gec->netdev->dev, "ICPlus IP101A\n");
661 + gec->phy_addr = phy_addr;
663 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
665 + /* set phy addr for auto-polling */
666 + phy_ctrl1 |= phy_addr << 24;
668 + /* set external phy mode */
669 + /* MII/RGMII interface */
670 + phy_ctrl1 &= ~(0x1 << 18);
673 + phy_ctrl1 &= ~(0x1 << 17);
676 + phy_ctrl1 &= ~(0x1 << 16);
678 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
681 +static void cns21xx_gec_ip1001_phy_config(struct cns21xx_gec *gec)
687 + dev_info(&gec->netdev->dev, "ICPlus IP1001\n");
690 + gec->phy_addr = phy_addr;
692 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
694 + /* set phy addr for auto-polling */
695 + phy_ctrl1 |= phy_addr << 24;
697 + /* set external phy mode */
698 + /* MII/RGMII interface */
699 + phy_ctrl1 &= ~(0x1 << 18);
702 + phy_ctrl1 |= (0x1 << 17);
705 + phy_ctrl1 &= ~(0x1 << 16);
707 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
708 + cns21xx_gec_read_phy(gec, phy_addr, 2, &phy_data);
710 + /* set AN capability */
711 + cns21xx_gec_read_phy(gec, phy_addr, 4, &phy_data);
712 + /* clear existing values */
713 + phy_data &= ~(0xf << 5);
715 + phy_data |= (0x1 << 5);
717 + phy_data |= (0x1 << 6);
719 + phy_data |= (0x1 << 7);
721 + phy_data |= (0x1 << 8);
723 + phy_data |= (0x1 << 10);
724 + cns21xx_gec_write_phy(gec, phy_addr, 4, phy_data);
726 + cns21xx_gec_read_phy(gec, phy_addr, 9, &phy_data);
728 + phy_data |= (0x1 << 9);
729 + phy_data &= ~(0x1 << 10);
730 + phy_data |= (0x1 << 12);
731 + cns21xx_gec_write_phy(gec, phy_addr, 9, phy_data);
733 + cns21xx_gec_read_phy(gec, phy_addr, 16, &phy_data);
734 + /* Smart function off */
735 + phy_data &= ~(0x1 << 11);
737 + phy_data |= (0x1 << 0);
739 + phy_data |= (0x1 << 1);
740 + cns21xx_gec_write_phy(gec, phy_addr, 16, phy_data);
742 + cns21xx_gec_read_phy(gec, phy_addr, 16, &phy_data);
745 + cns21xx_gec_read_phy(gec, phy_addr, 20, &phy_data);
746 + phy_data &= ~(0x1<<2);
748 + phy_data |= (0x1<<9);
749 + cns21xx_gec_write_phy(gec, phy_addr, 20, phy_data);
752 + cns21xx_gec_read_phy(gec, phy_addr, 0, &phy_data);
753 + phy_data |= (0x1 << 9); /* re-AN */
754 + cns21xx_gec_write_phy(gec, phy_addr, 0, phy_data);
756 + cns21xx_gec_read_phy(gec, phy_addr, 9, &phy_data);
758 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
761 +static int cns21xx_gec_phy_config(struct cns21xx_gec *gec)
765 + switch (gec->pdata->phy_type) {
766 + case CNS21XX_GEC_PHY_TYPE_INTERNAL:
767 + cns21xx_gec_internal_phy_config(gec);
770 + case CNS21XX_GEC_PHY_TYPE_VSC8601:
771 + cns21xx_gec_vsc8601_phy_config(gec);
774 + case CNS21XX_GEC_PHY_TYPE_IP101A:
775 + cns21xx_gec_ip101a_phy_config(gec);
778 + case CNS21XX_GEC_PHY_TYPE_IP1001:
779 + cns21xx_gec_ip1001_phy_config(gec);
786 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
789 + phy_ctrl1 |= (0x1 << 8);
790 + if (!((phy_ctrl1 >> 8) & 0x1)) { /* AN disable */
791 + /* Force to FullDuplex mode */
792 + phy_ctrl1 &= ~(0x1 << 11); /* Half */
794 + /* Force to 100Mbps mode */
795 + phy_ctrl1 &= ~(0x3 << 9); /* clear to 10M */
796 + phy_ctrl1 |= (0x1 << 9); /* set to 100M */
799 + /* Force TX FlowCtrl On,in 1000M */
800 + phy_ctrl1 |= (0x1 << 13);
802 + /* Force TX FlowCtrl On, in 10/100M */
803 + phy_ctrl1 |= (0x1 << 12);
805 + /* Enable MII auto polling */
806 + phy_ctrl1 &= ~(0x1 << 7);
808 + cns21xx_gec_wr(gec, GEC_REG_PHY_CTRL1, phy_ctrl1);
809 + cns21xx_gec_phy_powerdown(gec);
814 +static void cns21xx_gec_vlan_config(struct cns21xx_gec *gec)
816 + /* setup VLAN entries */
817 + gec->vlans[0].vid = 2;
818 + gec->vlans[0].control = 0;
819 + gec->vlans[1].vid = 2;
820 + gec->vlans[1].control = 1;
821 + gec->vlans[2].vid = 1;
822 + gec->vlans[2].control = 1;
823 + gec->vlans[3].vid = 1;
824 + gec->vlans[3].control = 0;
826 + cns21xx_gec_wr(gec, GEC_REG_VLAN_ID_0_1,
827 + (gec->vlans[0].vid & 0x0fff) |
828 + ((gec->vlans[1].vid & 0x0fff) << 16));
830 + cns21xx_gec_wr(gec, GEC_REG_VLAN_ID_2_3,
831 + (gec->vlans[2].vid & 0x0fff) |
832 + ((gec->vlans[3].vid & 0x0fff) << 16));
834 + cns21xx_gec_wr(gec, GEC_REG_VLAN_CTRL,
835 + (gec->vlans[0].control << 0) |
836 + (gec->vlans[1].control << 1) |
837 + (gec->vlans[2].control << 2) |
838 + (gec->vlans[3].control << 3));
841 +static int cns21xx_gec_arl_config(struct cns21xx_gec *gec)
845 + arl_config = cns21xx_gec_rr(gec, GEC_REG_ARL_CFG);
848 + arl_config |= (0x1 << 4);
850 + /* My MAC only enable */
851 + arl_config |= (0x1 << 3);
854 + arl_config &= ~(0x1 << 2);
856 + /* Forward MC to CPU */
857 + arl_config &= ~(0x1 << 1);
859 + /* Hash direct mode */
860 + arl_config &= ~(0x1);
862 + cns21xx_gec_wr(gec, GEC_REG_ARL_CFG, arl_config);
867 +static void cns21xx_gec_phy_powerdown(struct cns21xx_gec *gec)
871 + cns21xx_gec_read_phy(gec, gec->phy_addr, 0, &phy_data);
872 + phy_data |= (0x1 << 11);
873 + cns21xx_gec_write_phy(gec, gec->phy_addr, 0, phy_data);
875 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
876 + PWRMGT_SOFTWARE_RESET_CONTROL_REG &= ~(0x1 << 15);
879 +static void cns21xx_gec_phy_powerup(struct cns21xx_gec *gec)
883 + cns21xx_gec_read_phy(gec, gec->phy_addr, 0, &phy_data);
884 + phy_data &= ~(0x1 << 11);
885 + cns21xx_gec_write_phy(gec, gec->phy_addr, 0, phy_data);
887 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
890 +static void cns21xx_gec_enable(struct cns21xx_gec *gec)
893 + cns21xx_gec_wr(gec, GEC_REG_RX_DMA_CTRL, 1);
895 + cns21xx_gec_phy_powerup(gec);
896 + internal_phy_start_timer(gec);
899 +static void cns21xx_gec_shutdown(struct cns21xx_gec *gec)
901 + /* stop Rx and Tx DMA */
902 + cns21xx_gec_wr(gec, GEC_REG_RX_DMA_CTRL, 0);
903 + cns21xx_gec_wr(gec, GEC_REG_TX_DMA_CTRL, 0);
905 + internal_phy_stop_timer(gec);
908 +static irqreturn_t cns21xx_gec_receive_isr(int irq, void *dev_id)
910 + struct cns21xx_gec *gec = dev_id;
912 + if (!test_bit(NAPI_STATE_SCHED, &gec->napi.state)) {
913 + if (likely(napi_schedule_prep(&gec->napi)))
914 + __napi_schedule(&gec->napi);
917 + return IRQ_HANDLED;
920 +static int cns21xx_gec_install_receive_isr(struct cns21xx_gec *gec)
924 + err = request_irq(gec->rxrc_irq, cns21xx_gec_receive_isr,
925 + IRQF_SHARED, dev_name(&gec->netdev->dev), gec);
927 + dev_err(&gec->netdev->dev,
928 + "unable to get IRQ %d (err=%d)\n",
929 + gec->rxrc_irq, err);
934 +static void cns21xx_gec_uninstall_receive_isr(struct cns21xx_gec *gec)
936 + free_irq(gec->rxrc_irq, gec);
939 +static irqreturn_t cns21xx_gec_rxqf_isr(int irq, void *dev_id)
941 + struct cns21xx_gec *gec = dev_id;
944 + * because in normal state, fsql only invoke once
945 + * and set_bit is atomic function, so don't mask it
947 + set_bit(0, &gec->rx_queue_full);
948 + if (!test_bit(NAPI_STATE_SCHED, &gec->napi.state)) {
949 + if (likely(napi_schedule_prep(&gec->napi)))
950 + __napi_schedule(&gec->napi);
953 + return IRQ_HANDLED;
956 +static int cns21xx_gec_install_rxqf_isr(struct cns21xx_gec *gec)
960 + /* QUEUE full interrupt handler */
961 + err = request_irq(gec->rxqf_irq, cns21xx_gec_rxqf_isr,
962 + IRQF_SHARED, dev_name(&gec->netdev->dev), gec);
964 + dev_err(&gec->netdev->dev,
965 + "unable to get IRQ %d (err=%d)\n",
966 + gec->rxqf_irq, err);
971 +static void cns21xx_gec_uninstall_rxqf_isr(struct cns21xx_gec *gec)
973 + free_irq(gec->rxqf_irq, gec);
976 +static void cns21xx_gec_mib_reset(struct cns21xx_gec *gec)
978 + unsigned long flags;
980 + local_irq_save(flags);
981 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_OK_PKT_CNTR);
982 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_OK_BYTE_CNTR);
983 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_RUNT_BYTE_CNTR);
984 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_OSIZE_DROP_PKT_CNTR);
985 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_NO_BUF_DROP_PKT_CNTR);
986 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_CRC_ERR_PKT_CNTR);
987 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_ARL_DROP_PKT_CNTR);
988 + (void) cns21xx_gec_rr(gec, GEC_REG_MYVLANID_MISMATCH_DROP_PKT_CNTR);
989 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_CHKSUM_ERR_PKT_CNTR);
990 + (void) cns21xx_gec_rr(gec, GEC_REG_RX_PAUSE_FRAME_PKT_CNTR);
991 + (void) cns21xx_gec_rr(gec, GEC_REG_TX_OK_PKT_CNTR);
992 + (void) cns21xx_gec_rr(gec, GEC_REG_TX_OK_BYTE_CNTR);
993 + (void) cns21xx_gec_rr(gec, GEC_REG_TX_PAUSE_FRAME_CNTR);
994 + local_irq_restore(flags);
997 +static void cns21xx_gec_mib_read(struct cns21xx_gec *gec)
999 + unsigned long flags;
1001 + local_irq_save(flags);
1002 + gec->mib_info.mib_rx_ok_pkt +=
1003 + cns21xx_gec_rr(gec, GEC_REG_RX_OK_PKT_CNTR);
1004 + gec->mib_info.mib_rx_ok_byte +=
1005 + cns21xx_gec_rr(gec, GEC_REG_RX_OK_BYTE_CNTR);
1006 + gec->mib_info.mib_rx_runt +=
1007 + cns21xx_gec_rr(gec, GEC_REG_RX_RUNT_BYTE_CNTR);
1008 + gec->mib_info.mib_rx_over_size +=
1009 + cns21xx_gec_rr(gec, GEC_REG_RX_OSIZE_DROP_PKT_CNTR);
1010 + gec->mib_info.mib_rx_no_buffer_drop +=
1011 + cns21xx_gec_rr(gec, GEC_REG_RX_NO_BUF_DROP_PKT_CNTR);
1012 + gec->mib_info.mib_rx_crc_err +=
1013 + cns21xx_gec_rr(gec, GEC_REG_RX_CRC_ERR_PKT_CNTR);
1014 + gec->mib_info.mib_rx_arl_drop +=
1015 + cns21xx_gec_rr(gec, GEC_REG_RX_ARL_DROP_PKT_CNTR);
1016 + gec->mib_info.mib_rx_myvid_drop +=
1017 + cns21xx_gec_rr(gec, GEC_REG_MYVLANID_MISMATCH_DROP_PKT_CNTR);
1018 + gec->mib_info.mib_rx_csum_err +=
1019 + cns21xx_gec_rr(gec, GEC_REG_RX_CHKSUM_ERR_PKT_CNTR);
1020 + gec->mib_info.mib_rx_pause_frame +=
1021 + cns21xx_gec_rr(gec, GEC_REG_RX_PAUSE_FRAME_PKT_CNTR);
1022 + gec->mib_info.mib_tx_ok_pkt +=
1023 + cns21xx_gec_rr(gec, GEC_REG_TX_OK_PKT_CNTR);
1024 + gec->mib_info.mib_tx_ok_byte +=
1025 + cns21xx_gec_rr(gec, GEC_REG_TX_OK_BYTE_CNTR);
1026 + gec->mib_info.mib_tx_pause_frame +=
1027 + cns21xx_gec_rr(gec, GEC_REG_TX_PAUSE_FRAME_CNTR);
1028 + local_irq_restore(flags);
1031 +static const char *cns21xx_gec_speed_str(u32 phy_ctrl1)
1033 + switch ((phy_ctrl1 >> 2) & 3) {
1045 +static irqreturn_t cns21xx_gec_status_isr(int irq, void *dev_id)
1047 + struct cns21xx_gec *gec = dev_id;
1050 + int_status = cns21xx_gec_rr(gec, GEC_REG_INT);
1051 + cns21xx_gec_wr(gec, GEC_REG_INT, int_status);
1054 + (void) cns21xx_gec_rr(gec, GEC_REG_INT);
1056 + if (int_status & GEC_INT_MIB_COUNTER_TH)
1057 + cns21xx_gec_mib_read(gec);
1059 + if (int_status & GEC_INT_PORT_STATUS_CHG) {
1062 + phy_ctrl1 = cns21xx_gec_rr(gec, GEC_REG_PHY_CTRL1);
1063 + if (phy_ctrl1 & BIT(0)) {
1064 + netif_carrier_on(gec->netdev);
1065 + dev_info(&gec->netdev->dev,
1066 + "link up (%sMbps/%s duplex)\n",
1067 + cns21xx_gec_speed_str(phy_ctrl1),
1068 + (phy_ctrl1 & BIT(4)) ? "Full" : "Half");
1070 + netif_carrier_off(gec->netdev);
1071 + dev_info(&gec->netdev->dev, "link down\n");
1075 + return IRQ_HANDLED;
1078 +static inline void cns21xx_gec_enable_interrupt(struct cns21xx_gec *gec,
1081 + cns21xx_gec_wr(gec, GEC_REG_INT_MASK,
1082 + cns21xx_gec_rr(gec, GEC_REG_INT_MASK) & ~mask);
1085 +static int cns21xx_gec_install_status_isr(struct cns21xx_gec *gec)
1089 + err = request_irq(gec->status_irq, cns21xx_gec_status_isr,
1090 + IRQF_DISABLED, dev_name(&gec->netdev->dev), gec);
1093 + dev_err(&gec->netdev->dev,
1094 + "unable to get IRQ %d (err=%d)\n",
1095 + gec->status_irq, err);
1099 + cns21xx_gec_enable_interrupt(gec, GEC_INT_MIB_COUNTER_TH |
1100 + GEC_INT_PORT_STATUS_CHG);
1105 +static inline void cns21xx_gec_uninstall_status_isr(struct cns21xx_gec *gec)
1107 + free_irq(gec->status_irq, gec);
1110 +static void cns21xx_gec_uninstall_isr(struct cns21xx_gec *gec)
1112 + cns21xx_gec_uninstall_rxqf_isr(gec);
1113 + cns21xx_gec_uninstall_status_isr(gec);
1114 + cns21xx_gec_uninstall_receive_isr(gec);
1117 +static int cns21xx_gec_install_isr(struct cns21xx_gec *gec)
1121 + /* setup delayed interrupts */
1122 + cns21xx_gec_wr(gec, GEC_REG_DLY_INT_CFG,
1124 + ((CNS21XX_PEND_INT_COUNT & 0xFF) << 8) |
1125 + (CNS21XX_PEND_INT_TIME & 0xFF));
1127 + err = cns21xx_gec_install_receive_isr(gec);
1131 + err = cns21xx_gec_install_rxqf_isr(gec);
1133 + goto err_uninstall_receive;
1135 + err = cns21xx_gec_install_status_isr(gec);
1137 + goto err_uninstall_rxqf;
1141 + err_uninstall_rxqf:
1142 + cns21xx_gec_uninstall_rxqf_isr(gec);
1143 + err_uninstall_receive:
1144 + cns21xx_gec_uninstall_receive_isr(gec);
1149 +static int cns21xx_gec_lan_open(struct net_device *dev)
1151 + struct cns21xx_gec *gec = netdev_priv(dev);
1154 + dev_dbg(&gec->netdev->dev, "open\n");
1157 + MOD_INC_USE_COUNT;
1160 + napi_enable(&gec->napi);
1161 + netif_start_queue(dev);
1162 + err = cns21xx_gec_install_isr(gec);
1166 + cns21xx_gec_enable(gec);
1171 + netif_stop_queue(dev);
1172 + napi_disable(&gec->napi);
1176 +static void cns21xx_gec_timeout(struct net_device *dev)
1178 + dev_dbg(&dev->dev, "timeout\n");
1179 + netif_wake_queue(dev);
1180 + dev->trans_start = jiffies;
1183 +static int cns21xx_gec_close(struct net_device *dev)
1185 + struct cns21xx_gec *gec = netdev_priv(dev);
1187 + cns21xx_gec_phy_powerdown(gec);
1188 + cns21xx_gec_uninstall_isr(gec);
1189 + napi_disable(&gec->napi);
1190 + netif_stop_queue(dev);
1191 + cns21xx_gec_shutdown(gec);
1194 + MOD_DEC_USE_COUNT;
1200 +static inline struct sk_buff *cns21xx_gec_alloc_skb(void)
1202 + struct sk_buff *skb;
1204 + skb = dev_alloc_skb(MAX_PACKET_LEN + 2);
1206 + if (unlikely(!skb))
1209 + /* Make buffer alignment 2 beyond a 16 byte boundary
1210 + * this will result in a 16 byte aligned IP header after
1211 + * the 14 byte MAC header is removed
1213 + skb_reserve(skb, 2); /* 16 bit alignment */
1218 +static inline int cns21xx_gec_tx_dma_size(struct cns21xx_gec *gec)
1220 + return gec->txring.count * sizeof(struct cns21xx_gec_txd);
1223 +static inline int cns21xx_gec_rx_dma_size(struct cns21xx_gec *gec)
1225 + return gec->rxring.count * sizeof(struct cns21xx_gec_rxd);
1228 +static void __init cns21xx_gec_buffer_free(struct cns21xx_gec *gec)
1230 + struct cns21xx_gec_ring *txring = &gec->txring;
1231 + struct cns21xx_gec_ring *rxring = &gec->rxring;
1234 + if (rxring->desc_cpu) {
1235 + for (i = 0; i < rxring->count; i++) {
1236 + if (rxring->skbs[i])
1237 + dev_kfree_skb(rxring->skbs[i]);
1240 + dma_free_coherent(gec->parent, cns21xx_gec_rx_dma_size(gec),
1241 + rxring->desc_cpu, rxring->desc_dma);
1242 + memset((void *)&rxring, 0, cns21xx_gec_rx_dma_size(gec));
1245 + if (txring->desc_cpu) {
1246 + dma_free_coherent(gec->parent, cns21xx_gec_tx_dma_size(gec),
1247 + txring->desc_cpu, txring->desc_dma);
1248 + memset((void *)&txring, 0, cns21xx_gec_tx_dma_size(gec));
1251 + kfree(txring->skbs);
1252 + kfree(rxring->skbs);
1255 +static int __init cns21xx_gec_buffer_alloc(struct cns21xx_gec *gec)
1257 + struct cns21xx_gec_ring *txring = &gec->txring;
1258 + struct cns21xx_gec_ring *rxring = &gec->rxring;
1259 + struct cns21xx_gec_rxd *rxd;
1260 + struct cns21xx_gec_txd *txd;
1261 + struct sk_buff *skb;
1262 + int err = -ENOMEM;
1265 + rxring->skbs = kzalloc(rxring->count * sizeof(struct skb *),
1268 + if (rxring->skbs == NULL) {
1269 + dev_err(&gec->netdev->dev,
1270 + "%s allocation failed\n", "RX buffer");
1274 + txring->skbs = kzalloc(txring->count * sizeof(struct skb *),
1277 + if (txring->skbs == NULL) {
1278 + dev_err(&gec->netdev->dev,
1279 + "%s allocation failed\n", "TX buffer");
1283 + rxring->desc_cpu = dma_alloc_coherent(gec->parent,
1284 + cns21xx_gec_rx_dma_size(gec),
1285 + &rxring->desc_dma,
1287 + if (!rxring->desc_cpu) {
1288 + dev_err(&gec->netdev->dev,
1289 + "%s allocation failed\n", "RX ring");
1293 + txring->desc_cpu = dma_alloc_coherent(gec->parent,
1294 + cns21xx_gec_tx_dma_size(gec),
1295 + &txring->desc_dma,
1297 + if (!txring->desc_cpu) {
1298 + dev_err(&gec->netdev->dev,
1299 + "%s allocation failed\n", "TX ring");
1303 + /* Clean RX Memory */
1304 + memset((void *)rxring->desc_cpu, 0, cns21xx_gec_rx_dma_size(gec));
1305 + dev_dbg(&gec->netdev->dev,
1306 + "rxring->desc_cpu=0x%08X rxring->desc_dma=0x%08X\n",
1307 + (u32) rxring->desc_cpu,
1308 + (u32) rxring->desc_dma);
1310 + /* Set cur_index Point to Zero */
1312 + rxd = rxring->desc_cpu;
1313 + for (i = 0; i < rxring->count; i++, rxd++) {
1314 + if (i == (rxring->count - 1)) {
1315 + /* End bit == 0; */
1318 + skb = cns21xx_gec_alloc_skb();
1320 + dev_err(&gec->netdev->dev,
1321 + "%s allocation failed\n", "skb");
1325 + /* Trans Packet from Virtual Memory to Physical Memory */
1326 + rxring->skbs[i] = skb;
1327 + rxd->sdp = dma_map_single(gec->parent,
1331 + rxd->length = MAX_PACKET_LEN;
1334 + /* Clear TX Memory */
1335 + memset((void *)txring->desc_cpu, 0, cns21xx_gec_tx_dma_size(gec));
1336 + dev_dbg(&gec->netdev->dev,
1337 + "txring->desc_cpu=0x%08X txring->desc_dma=0x%08X\n",
1338 + (u32) txring->desc_cpu,
1339 + (u32) txring->desc_dma);
1341 + /* Set cur_index Point to Zero */
1343 + txd = txring->desc_cpu;
1344 + for (i = 0; i < txring->count; i++, txd++) {
1345 + if (i == (txring->count - 1)) {
1346 + /* End of Ring ==1 */
1349 + /* TX Ring , Cown == 1 */
1352 +#ifdef CNS21XX_GEC_TX_HW_CHECKSUM
1353 + /* Enable Checksum */
1362 + /* clear txring->skbs */
1363 + txring->skbs[i] = NULL;
1369 + cns21xx_gec_buffer_free(gec);
1373 +static int cns21xx_gec_get_rfd_buff(struct cns21xx_gec *gec, int index)
1375 + struct cns21xx_gec_ring *rxring = &gec->rxring;
1376 + struct cns21xx_gec_rxd *rxd;
1377 + struct sk_buff *skb;
1378 + unsigned char *data;
1381 + /* TODO: get rxdesc ptr */
1382 + rxd = ((struct cns21xx_gec_rxd *) rxring->desc_cpu) + index;
1383 + skb = rxring->skbs[index];
1385 + len = rxd->length;
1387 + dma_unmap_single(gec->parent, rxd->sdp, len,
1390 + data = skb_put(skb, len);
1392 + skb->dev = gec->netdev;
1394 +#ifdef CNS21XX_GEC_RX_HW_CHECKSUM
1395 + if (rxd->ipf == 1 || rxd->l4f == 1) {
1396 + if (rxd->prot != 0x11) {
1397 + skb->ip_summed = CHECKSUM_NONE;
1399 + /* CheckSum Fail */
1400 + skb->dev->stats.rx_errors++;
1404 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1407 + skb->ip_summed = CHECKSUM_NONE;
1410 + /* this line must, if no, packet will not send to network layer */
1411 + skb->protocol = eth_type_trans(skb, skb->dev);
1413 + skb->dev->stats.rx_packets++;
1414 + skb->dev->stats.rx_bytes += len;
1415 + skb->dev->last_rx = jiffies;
1417 + /* if netif_rx any package, will let this driver core dump. */
1418 + netif_receive_skb(skb);
1423 + dev_kfree_skb_any(skb);
1427 +static void cns21xx_gec_receive_packet(struct cns21xx_gec *gec,
1428 + int mode, int *work_done, int work_to_do)
1430 + struct cns21xx_gec_ring *rxring = &gec->rxring;
1433 + struct cns21xx_gec_rxd *rxd;
1434 + struct sk_buff *skb;
1435 + int i, rxcount = 0;
1437 + rxd = ((struct cns21xx_gec_rxd *) rxring->desc_cpu) + rxring->curr;
1438 + rxsd_current = cns21xx_gec_rr(gec, GEC_REG_RX_DPTR);
1439 + rxsd_index = (rxsd_current - (u32)rxring->desc_dma) >> 4;
1441 + if (rxsd_index > rxring->curr) {
1442 + rxcount = rxsd_index - rxring->curr;
1443 + } else if (rxsd_index < rxring->curr) {
1444 + rxcount = (rxring->count - rxring->curr) +
1447 + if (rxd->cown == 0) {
1448 + goto receive_packet_exit;
1451 + rxcount = rxring->count;
1455 + for (i = 0; i < rxcount; i++) {
1456 + if (*work_done >= work_to_do)
1461 + if (rxd->cown != 0) {
1462 + /* Alloc New skb_buff */
1463 + skb = cns21xx_gec_alloc_skb();
1465 + /* Check skb_buff */
1466 + if (skb != NULL) {
1467 + cns21xx_gec_get_rfd_buff(gec, rxring->curr);
1468 + rxring->skbs[rxring->curr] = skb;
1470 + dma_map_single(gec->parent,
1474 + rxd->length = MAX_PACKET_LEN;
1476 + /* set cbit to 0 for CPU Transfer */
1480 + * TODO: I will add dev->lp.stats->rx_dropped,
1481 + * it will effect the performance
1483 + dev_warn(&gec->netdev->dev,
1484 + "skb allocation failed, reuse the buffer\n");
1486 + /* set cbit to 0 for CPU Transfer */
1492 + dev_err(&gec->netdev->dev, "encounter COWN == 0 BUG\n");
1496 + if (rxring->curr == (rxring->count - 1)) {
1498 + rxd = rxring->desc_cpu;
1505 + receive_packet_exit:
1509 +static int cns21xx_gec_poll(struct napi_struct *napi, int budget)
1511 + struct cns21xx_gec *gec;
1512 + int work_done = 0;
1513 + int work_to_do = budget;
1515 + gec = container_of(napi, struct cns21xx_gec, napi);
1516 + cns21xx_gec_receive_packet(gec, 0, &work_done, work_to_do);
1518 + budget -= work_done;
1520 + /* if no Tx and not enough Rx work done, exit the polling mode */
1522 + if (test_bit(0, &gec->rx_queue_full) == 1) {
1524 + clear_bit(0, &gec->rx_queue_full);
1525 + /* start Rx DMA */
1526 + cns21xx_gec_wr(gec, GEC_REG_RX_DMA_CTRL, 1);
1530 + napi_complete(&gec->napi);
1531 +#ifdef CONFIG_STAR_NIC_NAPI_MASK_IRQ
1532 + enable_irq(gec->rxrc_irq);
1540 +static int cns21xx_gec_send_packet(struct sk_buff *skb, struct net_device *dev)
1542 + struct cns21xx_gec *gec = netdev_priv(dev);
1543 + struct cns21xx_gec_txd *txd;
1544 + struct cns21xx_gec_ring *txring = &gec->txring;
1545 + struct sk_buff *skb_free = NULL;
1546 + unsigned long flags;
1549 + if (skb_padto(skb, ETH_ZLEN))
1550 + return NETDEV_TX_OK;
1552 + len = max_t(unsigned int, skb->len, ETH_ZLEN);
1554 + spin_lock_irqsave(&gec->tx_lock, flags);
1556 + txd = ((struct cns21xx_gec_txd *) txring->desc_cpu) + txring->curr;
1557 + if (txd->cown == 0) {
1558 + /* This TFD is busy */
1559 + spin_unlock_irqrestore(&gec->tx_lock, flags);
1560 + /* re-queue the skb */
1561 + return NETDEV_TX_BUSY;
1564 + if (txd->sdp != 0) {
1565 + /* MUST TODO: Free skbuff */
1566 + skb_free = txring->skbs[txring->curr];
1568 + dma_unmap_single(gec->parent,
1572 + txring->dirty = txring->curr + 1;
1573 + if (txring->dirty == txring->count)
1574 + txring->dirty = 0;
1577 +#ifdef CNS21XX_GEC_TX_HW_CHECKSUM
1578 + if (skb->protocol == __constant_htons(ETH_P_IP)) {
1579 + if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1582 + } else if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1594 +#endif /* CNS21XX_GEC_TX_HW_CHECKSUM */
1596 + txring->skbs[txring->curr] = skb;
1598 + txd->length = len;
1599 + txd->sdp = dma_map_single(gec->parent, skb->data, len,
1605 + /* Wake interrupt */
1611 + /* Start Tx DMA */
1612 + cns21xx_gec_wr(gec, GEC_REG_TX_DMA_CTRL, 1);
1614 + dev->stats.tx_packets++;
1615 + dev->stats.tx_bytes += skb->len;
1616 + dev->trans_start = jiffies;
1619 + if (txring->curr == txring->count)
1622 + spin_unlock_irqrestore(&gec->tx_lock, flags);
1625 + dev_kfree_skb(skb_free);
1627 + cns21xx_gec_timer_modify(gec, 10);
1629 + return NETDEV_TX_OK;
1632 +static void cns21xx_gec_set_mac_addr(struct cns21xx_gec *gec,
1633 + const char *mac_addr)
1635 + cns21xx_gec_wr(gec, GEC_REG_MY_MAC_H,
1636 + (mac_addr[0] << 8) | mac_addr[1]);
1638 + cns21xx_gec_wr(gec, GEC_REG_MY_MAC_L,
1639 + (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1640 + (mac_addr[4] << 8) | mac_addr[5]);
1642 + dev_dbg(&gec->netdev->dev, "MAC address: %pM", mac_addr);
1645 +static int cns21xx_gec_set_lan_mac_addr(struct net_device *dev, void *addr)
1647 + struct sockaddr *sock_addr = addr;
1648 + struct cns21xx_gec *gec = netdev_priv(dev);
1650 + spin_lock_irq(&gec->lock);
1651 + memcpy(dev->dev_addr, sock_addr->sa_data, 6);
1652 + cns21xx_gec_set_mac_addr(gec, sock_addr->sa_data);
1653 + spin_unlock_irq(&gec->lock);
1658 +static int __init cns21xx_gec_setup(struct cns21xx_gec *gec)
1663 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
1665 + PWRMGT_SOFTWARE_RESET_CONTROL_REG &= ~(0x1 << 15);
1667 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
1668 + /* set NIC clock to 67.5MHz */
1669 + PWRMGT_SYSTEM_CLOCK_CONTROL_REG |= (0x1 << 7);
1671 + /* enable NIC clock */
1672 + HAL_PWRMGT_ENABLE_NIC_CLOCK();
1674 + cns21xx_gec_wr(gec, GEC_REG_MAC_CFG, 0x00527C00);
1678 + /* Configure GPIO for NIC MDC/MDIO pins */
1679 + HAL_MISC_ENABLE_MDC_MDIO_PINS();
1680 + HAL_MISC_ENABLE_NIC_COL_PINS();
1683 + MISC_GPIOA_PIN_ENABLE_REG |= (0x7 << 22);
1684 + MISC_FAST_ETHERNET_PHY_CONFIG_REG |= (FE_PHY_LED_MODE >> 12) & 0x3;
1687 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
1689 + PWRMGT_SOFTWARE_RESET_CONTROL_REG &= ~(0x1 << 15);
1691 + PWRMGT_SOFTWARE_RESET_CONTROL_REG |= (0x1 << 15);
1694 + /* disable all interrupt status sources */
1695 + cns21xx_gec_wr(gec, GEC_REG_INT_MASK, ~(0));
1696 + /* clear pending interrupts */
1697 + cns21xx_gec_wr(gec, GEC_REG_INT, ~(0));
1699 + /* stop Rx and Tx DMA */
1700 + cns21xx_gec_wr(gec, GEC_REG_TX_DMA_CTRL, 0);
1701 + cns21xx_gec_wr(gec, GEC_REG_RX_DMA_CTRL, 0);
1703 + gec->txring.count = CNS21XX_GEC_NUM_TXDS;
1704 + gec->rxring.count = CNS21XX_GEC_NUM_RXDS;
1705 + err = cns21xx_gec_buffer_alloc(gec);
1709 + cns21xx_gec_mac_config(gec);
1710 + cns21xx_gec_fc_config(gec);
1712 + err = cns21xx_gec_phy_config(gec);
1714 + cns21xx_gec_buffer_free(gec);
1718 + cns21xx_gec_vlan_config(gec);
1719 + cns21xx_gec_arl_config(gec);
1720 + cns21xx_gec_set_mac_addr(gec, gec->netdev->dev_addr);
1721 + cns21xx_gec_mib_reset(gec);
1723 + MISC_DEBUG_PROBE_SELECTION_REG = 0x00000125; /* 0x00000105 pb0_nic */
1725 + cns21xx_gec_wr(gec, GEC_REG_TX_DPTR, gec->txring.desc_dma);
1726 + cns21xx_gec_wr(gec, GEC_REG_TX_BASE_ADDR, gec->txring.desc_dma);
1727 + cns21xx_gec_wr(gec, GEC_REG_RX_DPTR, gec->rxring.desc_dma);
1728 + cns21xx_gec_wr(gec, GEC_REG_RX_BASE_ADDR, gec->rxring.desc_dma);
1730 + cns21xx_gec_dma_config(gec);
1731 + internal_phy_init_timer(gec);
1732 + cns21xx_gec_timer_init(gec);
1737 +static const struct net_device_ops cns21xx_gec_netdev_ops = {
1738 + .ndo_open = cns21xx_gec_lan_open,
1739 + .ndo_stop = cns21xx_gec_close,
1740 + .ndo_start_xmit = cns21xx_gec_send_packet,
1741 + .ndo_set_mac_address = cns21xx_gec_set_lan_mac_addr,
1742 + .ndo_tx_timeout = cns21xx_gec_timeout,
1743 + .ndo_validate_addr = eth_validate_addr,
1744 + .ndo_change_mtu = eth_change_mtu,
1747 +static int __init cns21xx_gec_probe(struct platform_device *pdev)
1749 + struct net_device *netdev;
1750 + struct cns21xx_gec *gec;
1751 + struct cns21xx_gec_plat_data *pdata;
1754 + pdata = pdev->dev.platform_data;
1756 + dev_dbg(&pdev->dev, "no platform data\n");
1761 + if (!pdata->mac_addr) {
1762 + dev_dbg(&pdev->dev, "no mac address\n");
1767 + netdev = alloc_etherdev(sizeof(struct cns21xx_gec));
1773 + SET_NETDEV_DEV(netdev, &pdev->dev);
1775 + gec = netdev_priv(netdev);
1776 + gec->pdata = pdata;
1777 + gec->parent = &pdev->dev;
1779 + gec->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1780 + if (!gec->mem_res) {
1781 + dev_dbg(&pdev->dev, "no iomem resource\n");
1783 + goto err_free_netdev;
1786 + gec->status_irq = platform_get_irq_byname(pdev,
1787 + CNS21XX_GEC_STATUS_IRQ_NAME);
1788 + if (gec->status_irq < 0) {
1789 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1790 + CNS21XX_GEC_STATUS_IRQ_NAME);
1792 + goto err_free_netdev;
1795 + gec->rxrc_irq = platform_get_irq_byname(pdev,
1796 + CNS21XX_GEC_RXRC_IRQ_NAME);
1797 + if (gec->rxrc_irq < 0) {
1798 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1799 + CNS21XX_GEC_RXRC_IRQ_NAME);
1801 + goto err_free_netdev;
1804 + gec->rxqf_irq = platform_get_irq_byname(pdev,
1805 + CNS21XX_GEC_RXQF_IRQ_NAME);
1806 + if (gec->rxqf_irq < 0) {
1807 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1808 + CNS21XX_GEC_RXQF_IRQ_NAME);
1810 + goto err_free_netdev;
1813 + gec->txtc_irq = platform_get_irq_byname(pdev,
1814 + CNS21XX_GEC_TXTC_IRQ_NAME);
1815 + if (gec->txtc_irq < 0) {
1816 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1817 + CNS21XX_GEC_TXTC_IRQ_NAME);
1819 + goto err_free_netdev;
1822 + gec->txqe_irq = platform_get_irq_byname(pdev,
1823 + CNS21XX_GEC_TXQE_IRQ_NAME);
1824 + if (gec->txqe_irq < 0) {
1825 + dev_dbg(&pdev->dev, "%s irq not specified\n",
1826 + CNS21XX_GEC_TXQE_IRQ_NAME);
1828 + goto err_free_netdev;
1831 + if (!request_mem_region(gec->mem_res->start,
1832 + resource_size(gec->mem_res), pdev->name)) {
1833 + dev_err(&pdev->dev, "unable to request mem region\n");
1835 + goto err_free_netdev;
1838 + gec->base = ioremap(gec->mem_res->start, resource_size(gec->mem_res));
1840 + dev_err(&pdev->dev, "ioremap failed \n");
1842 + goto err_release_mem;
1845 + platform_set_drvdata(pdev, netdev);
1847 + spin_lock_init(&gec->lock);
1848 + spin_lock_init(&gec->tx_lock);
1850 + netdev->base_addr = gec->mem_res->start;
1851 + netdev->netdev_ops = &cns21xx_gec_netdev_ops;
1852 +#if defined(CNS21XX_GEC_TX_HW_CHECKSUM)
1853 + netdev->features = NETIF_F_IP_CSUM;
1855 + memcpy(netdev->dev_addr, pdata->mac_addr, 6);
1856 + netif_napi_add(netdev, &gec->napi, cns21xx_gec_poll, 64);
1858 + gec->netdev = netdev;
1859 + err = register_netdev(netdev);
1863 + err = cns21xx_gec_setup(gec);
1865 + goto err_unregister_netdev;
1869 + err_unregister_netdev:
1870 + unregister_netdev(netdev);
1872 + platform_set_drvdata(pdev, NULL);
1873 + iounmap(gec->base);
1875 + release_mem_region(gec->mem_res->start, resource_size(gec->mem_res));
1877 + free_netdev(netdev);
1882 +static int __devexit cns21xx_gec_remove(struct platform_device *pdev)
1884 + struct net_device *netdev = platform_get_drvdata(pdev);
1885 + struct cns21xx_gec *gec = netdev_priv(netdev);
1887 + unregister_netdev(netdev);
1888 + platform_set_drvdata(pdev, NULL);
1889 + iounmap(gec->base);
1890 + release_mem_region(gec->mem_res->start, resource_size(gec->mem_res));
1891 + free_netdev(netdev);
1896 +static struct platform_driver cns21xx_gec_driver = {
1897 + .remove = __devexit_p(cns21xx_gec_remove),
1899 + .name = "cns21xx-gec",
1900 + .owner = THIS_MODULE,
1904 +static int __init cns21xx_gec_init(void)
1906 + return platform_driver_probe(&cns21xx_gec_driver, cns21xx_gec_probe);
1909 +static void __exit cns21xx_gec_exit(void)
1911 + platform_driver_unregister(&cns21xx_gec_driver);
1914 +module_init(cns21xx_gec_init);
1915 +module_exit(cns21xx_gec_exit);
1917 +#define INTERNAL_PHY_PATCH_CHECKCNT 16
1918 +#define INTERNAL_PHY_PATCH_CHECK_PERIOD 1000 /* ms */
1920 +static void (*phy_statemachine)(struct cns21xx_gec*, int, int, int);
1922 +#define ETH3220_PHY_MON_PERIOD INTERNAL_PHY_PATCH_CHECK_PERIOD
1924 +/* phy monitor state */
1926 +#define PHY_STATE_INIT 0
1927 +#define LINK_DOWN_POSITIVE 1
1928 +#define WAIT_LINK_UP_POSITIVE 2
1929 +#define LINK_UP_POSITIVE 3
1930 +#define WAIT_BYPASS_LINK_UP_POSITIVE 4
1931 +#define BYPASS_AND_LINK_UP_POSITIVE 5
1932 +#define LINK_UP_8101_POSITIVE 6
1933 +#define WAIT_8101_LINK_UP_POSITIVE 7
1935 +#define PHY_STATE_LAST (WAIT_8101_LINK_UP_POSITIVE+1)
1938 +#define WAIT_BYPASS_LINK_UP_POSITIVE_TIMEOUT 5000 /* 5000 ms */
1939 +#define WAIT_BYPASS_LINK_UP_NEGATIVE_TIMEOUT 5000 /* 5000 ms */
1940 +#define LINK_DOWN_ABILITY_DETECT_TIMEOUT 5000 /* 5000 ms */
1941 +#define DETECT_8101_PERIOD 7000 /* 7000 ms */
1942 +#define WAIT_8101_LINK_UP_TIMEOUT 3000 /* 3000 ms */
1944 +#define MAX_PHY_PORT 1
1945 +#define DEFAULT_AGC_TRAIN 16
1946 +#define MAX_AGC_TRAIN 16 /* train 16 times */
1947 +static int agc_train_num = DEFAULT_AGC_TRAIN;
1948 +u32 port_displaybuf[NUM_PHY][MAX_AGC_TRAIN + 1] = {
1952 +static int cuv[3][3] = {
1958 +static u32 link_status_old;
1960 +struct eth3220_phy {
1967 +#define DEBUG_PHY_STATE_TRANSITION 1
1968 +#if DEBUG_PHY_STATE_TRANSITION
1970 + * show state transition of debug phy port.
1971 + * -1 for all ports
1972 + * -2 for disable all ports
1973 + * 0 - 4 for each port
1975 +static int debug_phy_port = -2;
1976 +static char *phystate_name[] = {
1977 + "init", /* PHY_STATE_INIT */
1978 + "ldp", /* LINK_DOWN_POSITIVE */
1979 + "wait_lup", /* WAIT_LINK_UP_POSITIVE */
1980 + "lup", /* LINK_UP_POSITIVE */
1981 + "wait_bp_lup", /* WAIT_BYPASS_LINK_UP_POSITIVE */
1982 + "bp_lup", /* BYPASS_AND_LINK_UP_POSITIVE */
1983 + "8101_lup", /* LINK_UP_8101_POSITIVE */
1984 + "wait_8101_lup", /* WAIT_8101_LINK_UP_POSITIVE */
1987 +#endif /* DEBUG_PHY_STATE_TRANSITION */
1989 +static struct eth3220_phy phy[5] = {
1990 + {PHY_STATE_INIT, 0, 0, 0},
1991 + {PHY_STATE_INIT, 0, 0, 0},
1992 + {PHY_STATE_INIT, 0, 0, 0},
1993 + {PHY_STATE_INIT, 0, 0, 0},
1994 + {PHY_STATE_INIT, 0, 0, 0}
1997 +static u16 long_cable_global_reg[32] = {
1998 + 0x0000, 0x19a0, 0x1d00, 0x0e80,
1999 + 0x0f60, 0x07c0, 0x07e0, 0x03e0,
2000 + 0x0000, 0x0000, 0x0000, 0x2000,
2001 + 0x8250, 0x1700, 0x0000, 0x0000,
2002 + 0x0000, 0x0000, 0x0000, 0x0000,
2003 + 0x0000, 0x204b, 0x01c2, 0x0000,
2004 + 0x0000, 0x0000, 0x0fff, 0x4100,
2005 + 0x9319, 0x0021, 0x0034, 0x270a | FE_PHY_LED_MODE
2008 +static u16 long_cable_local_reg[32] = {
2009 + 0x3100, 0x786d, 0x01c1, 0xca51,
2010 + 0x05e1, 0x45e1, 0x0003, 0x001c,
2011 + 0x2000, 0x9828, 0xf3c4, 0x400c,
2012 + 0xf8ff, 0x6940, 0xb906, 0x503c,
2013 + 0x8000, 0x297a, 0x1010, 0x5010,
2014 + 0x6ae1, 0x7c73, 0x783c, 0xfbdf,
2015 + 0x2080, 0x3244, 0x1301, 0x1a80,
2016 + 0x8e8f, 0x8000, 0x9c29, 0xa70a | FE_PHY_LED_MODE
2019 +/*=============================================================*
2020 + * eth3220ac_rt8101_phy_setting
2021 + *=============================================================*/
2022 +static void eth3220ac_rt8101_phy_setting(struct cns21xx_gec *gec, int port)
2024 + cns21xx_gec_write_phy(gec, port, 12, 0x18ff);
2025 + cns21xx_gec_write_phy(gec, port, 18, 0x6400);
2028 +static void eth3220ac_release_bpf(struct cns21xx_gec *gec, int port)
2030 + cns21xx_gec_write_phy(gec, port, 18, 0x6210);
2033 +static void eth3220ac_def_bpf(struct cns21xx_gec *gec, int port)
2035 + cns21xx_gec_write_phy(gec, port, 18, 0x6bff);
2038 +static void eth3220ac_def_linkdown_setting(struct cns21xx_gec *gec, int port)
2040 + cns21xx_gec_write_phy(gec, port, 13, 0xe901);
2041 + cns21xx_gec_write_phy(gec, port, 14, 0xa3c6);
2044 +static void eth3220ac_def_linkup_setting(struct cns21xx_gec *gec, int port)
2046 + cns21xx_gec_write_phy(gec, port, 13, 0x6901);
2047 + cns21xx_gec_write_phy(gec, port, 14, 0xa286);
2050 +/*=============================================================*
2051 + * eth3220ac_link_agc:
2052 + *=============================================================*/
2053 +static int eth3220ac_link_agc(struct cns21xx_gec *gec, int port, int speed)
2060 + /* if speed = 100MHz, then continue */
2066 + for (i = 0; i < agc_train_num; i++) {
2067 + cns21xx_gec_read_phy(gec, port, 15, ®);
2069 + if (reg <= 0x12) {
2072 + agc_data += (u32)reg;
2077 + agc_data = (agc_data / jj) + 4;
2079 + agc_data = (cuv[2][0] * agc_data) / cuv[2][1] /
2080 + agc_train_num - 4;
2083 + agc_data = 0xd0 | (agc_data << 9);
2084 + cns21xx_gec_write_phy(gec, port, 15, agc_data);
2086 + cns21xx_gec_read_phy(gec, port, 15, ®);
2087 + reg &= ~(0x1 << 7);
2088 + cns21xx_gec_write_phy(gec, port, 15, reg);
2093 +/*=============================================================*
2094 + * eth3220ac_unlink_agc:
2095 + *=============================================================*/
2096 +static void eth3220ac_unlink_agc(struct cns21xx_gec *gec, int port)
2098 + /* start AGC adaptive */
2099 + cns21xx_gec_write_phy(gec, port, 15, 0xa050);
2102 +/*=============================================================*
2103 + * eth3220ac_rt8100_check
2104 + *=============================================================*/
2105 +static int eth3220ac_rt8100_check(struct cns21xx_gec *gec, int port)
2109 + /* Read reg27 (error register) */
2110 + cns21xx_gec_read_phy(gec, port, 27, ®);
2111 + /* if error exists, set Bypass Filter enable */
2112 + if ((reg & 0xfffc)) {
2113 + cns21xx_gec_read_phy(gec, port, 15, ®);
2114 + cns21xx_gec_read_phy(gec, port, 27, ®2);
2115 + if ((reg2 & 0xfffc) && (((reg >> 9) & 0xff) < 0x1c)) {
2116 + dev_err(&gec->netdev->dev, "8100 pos err\n");
2118 + /* Bypass agcgain disable */
2119 + cns21xx_gec_write_phy(gec, port, 15, (reg & (~(0x1 << 7))));
2121 + /* repeat counts when reaching threshold error */
2122 + cns21xx_gec_write_phy(gec, port, 13, 0x4940);
2125 + * Speed up AN speed and compensate threshold
2128 + cns21xx_gec_write_phy(gec, port, 14, 0xa306);
2130 + /* Bypass Filter enable */
2131 + cns21xx_gec_read_phy(gec, port, 18, ®2);
2133 + cns21xx_gec_write_phy(gec, port, 18, (reg | 0x400));
2136 + cns21xx_gec_write_phy(gec, port, 0, 0x3300);
2144 +/*=============================================================*
2145 + * eth3220ac_rt8100_linkdown
2146 + *=============================================================*/
2147 +static void eth3220ac_rt8100_linkdown(struct cns21xx_gec *gec, int port)
2151 + /* Bypass Filter disable */
2152 + cns21xx_gec_read_phy(gec, port, 18, ®);
2153 + cns21xx_gec_write_phy(gec, port, 18, (reg & (~(0x1 << 10))));
2154 + eth3220ac_def_linkdown_setting(gec, port);
2157 +static void eth3220ac_normal_phy_setting(struct cns21xx_gec *gec, int port)
2159 + cns21xx_gec_write_phy(gec, port, 12, 0xd8ff);
2160 + eth3220ac_def_bpf(gec, port);
2163 +/*=============================================================*
2164 + * wp3220ac_phystate
2165 + *=============================================================*/
2166 +static void wp3220ac_phystate(struct cns21xx_gec *gec, int port, int link, int speed)
2171 + phy[port].timer += ETH3220_PHY_MON_PERIOD;
2174 + /* Link up state */
2175 + switch (phy[port].state) {
2176 + case LINK_UP_POSITIVE:
2177 + next_state = eth3220ac_rt8100_check(gec, port) ?
2178 + WAIT_BYPASS_LINK_UP_POSITIVE :
2182 + case PHY_STATE_INIT:
2183 + case WAIT_LINK_UP_POSITIVE:
2184 + case LINK_DOWN_POSITIVE:
2185 + next_state = LINK_UP_POSITIVE;
2186 + eth3220ac_def_linkup_setting(gec, port);
2187 + eth3220ac_link_agc(gec, port, speed);
2188 + eth3220ac_release_bpf(gec, port);
2191 + case WAIT_BYPASS_LINK_UP_POSITIVE:
2192 + case BYPASS_AND_LINK_UP_POSITIVE:
2193 + next_state = BYPASS_AND_LINK_UP_POSITIVE;
2196 + case WAIT_8101_LINK_UP_POSITIVE:
2197 + next_state = LINK_UP_8101_POSITIVE;
2198 + eth3220ac_link_agc(gec, port, speed);
2199 + cns21xx_gec_write_phy(gec, port, 12, 0x98ff);
2202 + case LINK_UP_8101_POSITIVE:
2203 + next_state = LINK_UP_8101_POSITIVE;
2207 + next_state = LINK_UP_POSITIVE;
2208 + eth3220ac_def_linkup_setting(gec, port);
2209 + eth3220ac_link_agc(gec, port, speed);
2212 + /* Link down state */
2213 + switch (phy[port].state) {
2214 + case LINK_DOWN_POSITIVE:
2215 + cns21xx_gec_read_phy(gec, port, 5, ®);
2216 + cns21xx_gec_read_phy(gec, port, 28, ®2);
2218 + /* AN Link Partner Ability Register or NLP */
2219 + if (reg || (reg2 & 0x100))
2220 + next_state = WAIT_LINK_UP_POSITIVE;
2222 + next_state = LINK_DOWN_POSITIVE;
2225 + case WAIT_LINK_UP_POSITIVE:
2226 + if (phy[port].state_time >
2227 + LINK_DOWN_ABILITY_DETECT_TIMEOUT)
2228 + next_state = LINK_DOWN_POSITIVE;
2230 + next_state = WAIT_LINK_UP_POSITIVE;
2233 + case WAIT_BYPASS_LINK_UP_POSITIVE:
2234 + /* set timeout = 5 sec */
2235 + if (phy[port].state_time >
2236 + WAIT_BYPASS_LINK_UP_POSITIVE_TIMEOUT) {
2237 + next_state = LINK_DOWN_POSITIVE;
2239 + /* Bypass Filter disable */
2240 + eth3220ac_rt8100_linkdown(gec, port);
2241 + eth3220ac_def_bpf(gec, port);
2243 + next_state = WAIT_BYPASS_LINK_UP_POSITIVE;
2247 + case BYPASS_AND_LINK_UP_POSITIVE:
2248 + next_state = LINK_DOWN_POSITIVE;
2249 + eth3220ac_rt8100_linkdown(gec, port);
2250 + eth3220ac_def_bpf(gec, port);
2253 + case WAIT_8101_LINK_UP_POSITIVE:
2254 + if (phy[port].state_time > WAIT_8101_LINK_UP_TIMEOUT) {
2255 + next_state = LINK_DOWN_POSITIVE;
2256 + eth3220ac_normal_phy_setting(gec, port);
2257 + eth3220ac_def_linkdown_setting(gec, port);
2259 + next_state = WAIT_8101_LINK_UP_POSITIVE;
2263 + case LINK_UP_POSITIVE:
2264 + eth3220ac_unlink_agc(gec, port);
2265 + eth3220ac_def_linkdown_setting(gec, port);
2266 + eth3220ac_def_bpf(gec, port);
2267 + if (phy[port].timer > DETECT_8101_PERIOD) {
2268 + next_state = LINK_DOWN_POSITIVE;
2269 + phy[port].timer = 0;
2270 + phy[port].linkdown_cnt = 1;
2272 + if (++phy[port].linkdown_cnt > 2) {
2273 + next_state = WAIT_8101_LINK_UP_POSITIVE;
2274 + eth3220ac_rt8101_phy_setting(gec, port);
2276 + next_state = LINK_DOWN_POSITIVE;
2281 + case LINK_UP_8101_POSITIVE:
2282 + eth3220ac_normal_phy_setting(gec, port);
2283 + /* fall down to phy normal state */
2284 + case PHY_STATE_INIT:
2285 + eth3220ac_def_linkdown_setting(gec, port);
2286 + eth3220ac_unlink_agc(gec, port);
2288 + next_state = LINK_DOWN_POSITIVE;
2292 + if (phy[port].state != next_state) {
2293 + phy[port].state_time = 0;
2294 +#if DEBUG_PHY_STATE_TRANSITION
2295 + if (debug_phy_port == -1 || port == debug_phy_port) {
2296 + if ((phy[port].state < PHY_STATE_LAST) &&
2297 + (next_state < PHY_STATE_LAST))
2298 + dev_dbg(&gec->netdev->dev,
2299 + "p%d: %s->%s, %d, %d\n",
2300 + port, phystate_name[phy[port].state],
2301 + phystate_name[next_state],
2303 + phy[port].linkdown_cnt);
2305 + dev_dbg(&gec->netdev->dev,
2307 + port, phy[port].state, next_state);
2309 +#endif /* DEBUG_PHY_STATE_TRANSITION */
2311 + phy[port].state_time += ETH3220_PHY_MON_PERIOD;
2313 + phy[port].state = next_state;
2316 +/*=============================================================*
2317 + * eth3220_phyinit:
2318 + *=============================================================*/
2319 +static void eth3220ac_10m_agc(struct cns21xx_gec *gec)
2321 + /* Force 10M AGC = 2c globally */
2322 + cns21xx_gec_write_phy(gec, 0, 31, 0x2f1a);
2323 + cns21xx_gec_write_phy(gec, 0, 12, 0x112c);
2324 + cns21xx_gec_write_phy(gec, 0, 13, 0x2e21);
2325 + cns21xx_gec_write_phy(gec, 0, 31, 0xaf1a);
2328 +static void eth3220ac_dfe_init(struct cns21xx_gec *gec)
2332 + cns21xx_gec_write_phy(gec, 0, 31, 0x2f1a);
2333 + for (i = 0; i <= 7; i++)
2334 + cns21xx_gec_write_phy(gec, 0, i, 0);
2335 + cns21xx_gec_write_phy(gec, 0, 11, 0x0b50);
2336 + cns21xx_gec_write_phy(gec, 0, 31, 0xaf1a);
2339 +static void eth3220ac_phy_cdr_training_init(struct cns21xx_gec *gec)
2343 + /* Force all port in 10M FD mode */
2344 + for (i = 0; i < NUM_PHY; i++)
2345 + cns21xx_gec_write_phy(gec, i, 0, 0x100);
2347 + /* Global setting */
2348 + cns21xx_gec_write_phy(gec, 0, 31, 0x2f1a);
2349 + cns21xx_gec_write_phy(gec, 0, 29, 0x5021);
2350 + udelay(2000); /* 2ms, wait > 1 ms */
2351 + cns21xx_gec_write_phy(gec, 0, 29, 0x4021);
2352 + udelay(2000); /* 2ms, wait > 1 ms */
2353 + cns21xx_gec_write_phy(gec, 0, 31, 0xaf1a);
2355 + /* Enable phy AN */
2356 + for (i = 0; i < NUM_PHY; i++)
2357 + cns21xx_gec_write_phy(gec, i, 0, 0x3100);
2360 +static void eth3220_phyinit(struct cns21xx_gec *gec)
2362 + eth3220ac_10m_agc(gec);
2363 + eth3220ac_dfe_init(gec);
2364 + eth3220ac_phy_cdr_training_init(gec);
2367 +static void eth3220_phycfg(struct cns21xx_gec *gec, int phyaddr)
2369 + eth3220ac_def_linkdown_setting(gec, phyaddr);
2370 + eth3220ac_normal_phy_setting(gec, phyaddr);
2371 + cns21xx_gec_write_phy(gec, phyaddr, 9, 0x7f);
2374 +static void internal_phy_patch_check(struct cns21xx_gec *gec, int init)
2376 + u32 short_cable_agc_detect_count;
2377 + u32 link_status = 0, link_speed;
2378 + u32 phy_addr = gec->phy_addr;
2383 + cns21xx_gec_read_phy(gec, phy_addr, 1, &phy_data);
2385 + cns21xx_gec_read_phy(gec, phy_addr, 1, &phy_data2);
2386 + if (((phy_data & 0x0004) != 0x0004) &&
2387 + ((phy_data2 & 0x0004) != 0x0004)) {
2389 + short_cable_agc_detect_count = 0;
2390 + for (i = 0; i < INTERNAL_PHY_PATCH_CHECKCNT; i++) {
2391 + cns21xx_gec_read_phy(gec, phy_addr, 15, &phy_data);
2393 + if ((phy_data & 0x7F) <= 0x12) {
2395 + short_cable_agc_detect_count++;
2399 + if (short_cable_agc_detect_count) {
2403 + phy_statemachine = wp3220ac_phystate;
2404 + eth3220_phyinit(gec);
2405 + cns21xx_gec_read_phy(gec, phy_addr, 1, &phy_data);
2406 + if (phy_data & 0x0040)
2409 + mac_cfg = cns21xx_gec_rr(gec, GEC_REG_MAC_CFG);
2410 + if ((mac_cfg & 0xC) == 0x4) /* 100Mbps */
2415 + link_status_old = link_status;
2416 + for (i = 0; i < MAX_PHY_PORT; link_status >>= 1, i++)
2417 + eth3220_phycfg(gec, i);
2420 + /* set to global domain */
2421 + cns21xx_gec_write_phy(gec, phy_addr, 31,
2423 + for (i = 0; i < 32; i++)
2424 + cns21xx_gec_write_phy(gec, phy_addr, i,
2425 + long_cable_global_reg[i]);
2427 + /* set to local domain */
2428 + cns21xx_gec_write_phy(gec, phy_addr, 31,
2430 + for (i = 0; i < 32; i++)
2431 + cns21xx_gec_write_phy(gec, phy_addr, i,
2432 + long_cable_local_reg[i]);
2437 +static void internal_phy_timer_func(unsigned long data)
2439 + struct cns21xx_gec *gec = (struct cns21xx_gec *) data;
2441 + internal_phy_patch_check(gec, 0);
2442 + mod_timer(&gec->internal_phy_timer,
2443 + jiffies + INTERNAL_PHY_PATCH_CHECK_PERIOD / 10);
2446 +static void internal_phy_init_timer(struct cns21xx_gec *gec)
2448 + init_timer(&gec->internal_phy_timer);
2449 + gec->internal_phy_timer.function = internal_phy_timer_func;
2450 + gec->internal_phy_timer.data = (unsigned long) gec;
2453 +static void internal_phy_start_timer(struct cns21xx_gec *gec)
2455 + dev_dbg(&gec->netdev->dev, "starting patch check.\n");
2457 + internal_phy_patch_check(gec, 1);
2458 + mod_timer(&gec->internal_phy_timer,
2459 + jiffies + INTERNAL_PHY_PATCH_CHECK_PERIOD / 10);
2462 +static void internal_phy_stop_timer(struct cns21xx_gec *gec)
2464 + dev_dbg(&gec->netdev->dev, "stopping patch check.\n");
2466 + del_timer_sync(&gec->internal_phy_timer);
2469 +++ b/drivers/net/ethernet/cns21xx/Kconfig
2472 + tristate "CNS21XX Gigabit Ethernet Controller support"
2473 + depends on ARCH_CNS21XX
2475 + If you wish to compile a kernel for Cavium Networks CNS21XX
2476 + with ethernet support, then you should always answer Y to this.
2479 +++ b/drivers/net/ethernet/cns21xx/Makefile
2482 +# Makefile for the Cavium Networks CNS21XX ethernet driver
2485 +cns21xx_gec-y += cns21xx_gec_main.o
2487 +obj-$(CONFIG_CNS21XX_GEC) += cns21xx_gec.o
2488 --- a/drivers/net/ethernet/Kconfig
2489 +++ b/drivers/net/ethernet/Kconfig
2490 @@ -31,6 +31,7 @@ source "drivers/net/ethernet/brocade/Kco
2491 source "drivers/net/ethernet/chelsio/Kconfig"
2492 source "drivers/net/ethernet/cirrus/Kconfig"
2493 source "drivers/net/ethernet/cisco/Kconfig"
2494 +source "drivers/net/ethernet/cns21xx/Kconfig"
2495 source "drivers/net/ethernet/davicom/Kconfig"
2498 --- a/drivers/net/ethernet/Makefile
2499 +++ b/drivers/net/ethernet/Makefile
2500 @@ -17,6 +17,7 @@ obj-$(CONFIG_NET_VENDOR_BROCADE) += broc
2501 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
2502 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
2503 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
2504 +obj-$(CONFIG_CNS21XX_GEC) += cns21xx/
2505 obj-$(CONFIG_DM9000) += davicom/
2506 obj-$(CONFIG_DNET) += dnet.o
2507 obj-$(CONFIG_NET_VENDOR_DEC) += dec/