2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
29 #include <ramips_eth_platform.h>
30 #include "ramips_eth.h"
32 #define TX_TIMEOUT (20 * HZ / 100)
33 #define MAX_RX_LENGTH 1600
35 #ifdef CONFIG_RALINK_RT305X
36 #include "ramips_esw.c"
38 static inline int rt305x_esw_init(void) { return 0; }
39 static inline void rt305x_esw_exit(void) { }
42 #define phys_to_bus(a) (a & 0x1FFFFFFF)
44 #ifdef CONFIG_RAMIPS_ETH_DEBUG
45 #define RADEBUG(fmt, args...) printk(KERN_DEBUG fmt, ## args)
47 #define RADEBUG(fmt, args...) do {} while (0)
50 static struct net_device
* ramips_dev
;
51 static void __iomem
*ramips_fe_base
= 0;
54 ramips_fe_wr(u32 val
, unsigned reg
)
56 __raw_writel(val
, ramips_fe_base
+ reg
);
60 ramips_fe_rr(unsigned reg
)
62 return __raw_readl(ramips_fe_base
+ reg
);
66 ramips_fe_int_disable(u32 mask
)
68 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE
) & ~mask
,
69 RAMIPS_FE_INT_ENABLE
);
71 ramips_fe_rr(RAMIPS_FE_INT_ENABLE
);
75 ramips_fe_int_enable(u32 mask
)
77 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE
) | mask
,
78 RAMIPS_FE_INT_ENABLE
);
80 ramips_fe_rr(RAMIPS_FE_INT_ENABLE
);
84 ramips_hw_set_macaddr(unsigned char *mac
)
86 ramips_fe_wr((mac
[0] << 8) | mac
[1], RAMIPS_GDMA1_MAC_ADRH
);
87 ramips_fe_wr((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
88 RAMIPS_GDMA1_MAC_ADRL
);
91 static struct sk_buff
*
92 ramips_alloc_skb(struct raeth_priv
*re
)
96 skb
= netdev_alloc_skb(re
->netdev
, MAX_RX_LENGTH
+ NET_IP_ALIGN
);
100 skb_reserve(skb
, NET_IP_ALIGN
);
106 ramips_ring_setup(struct raeth_priv
*re
)
111 len
= NUM_TX_DESC
* sizeof(struct ramips_tx_dma
);
112 memset(re
->tx
, 0, len
);
114 for (i
= 0; i
< NUM_TX_DESC
; i
++) {
115 struct ramips_tx_dma
*txd
;
118 txd
->txd4
= TX_DMA_QN(3) | TX_DMA_PN(1);
119 txd
->txd2
= TX_DMA_LSO
| TX_DMA_DONE
;
121 if (re
->tx_skb
[i
] != NULL
) {
122 netdev_warn(re
->netdev
,
123 "dirty skb for TX desc %d\n", i
);
124 re
->tx_skb
[i
] = NULL
;
128 len
= NUM_RX_DESC
* sizeof(struct ramips_rx_dma
);
129 memset(re
->rx
, 0, len
);
131 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
134 BUG_ON(re
->rx_skb
[i
] == NULL
);
135 dma_addr
= dma_map_single(&re
->netdev
->dev
, re
->rx_skb
[i
]->data
,
136 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
137 re
->rx_dma
[i
] = dma_addr
;
138 re
->rx
[i
].rxd1
= (unsigned int) dma_addr
;
139 re
->rx
[i
].rxd2
= RX_DMA_LSO
;
142 /* flush descriptors */
147 ramips_ring_cleanup(struct raeth_priv
*re
)
151 for (i
= 0; i
< NUM_RX_DESC
; i
++)
153 dma_unmap_single(&re
->netdev
->dev
, re
->rx_dma
[i
],
154 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
156 for (i
= 0; i
< NUM_TX_DESC
; i
++)
158 dev_kfree_skb_any(re
->tx_skb
[i
]);
159 re
->tx_skb
[i
] = NULL
;
163 #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
165 #define RAMIPS_MDIO_RETRY 1000
167 static unsigned char *ramips_speed_str(struct raeth_priv
*re
)
181 static void ramips_link_adjust(struct raeth_priv
*re
)
183 struct ramips_eth_platform_data
*pdata
;
186 pdata
= re
->parent
->platform_data
;
188 netif_carrier_off(re
->netdev
);
189 netdev_info(re
->netdev
, "link down\n");
193 mdio_cfg
= RAMIPS_MDIO_CFG_TX_CLK_SKEW_200
|
194 RAMIPS_MDIO_CFG_TX_CLK_SKEW_200
|
195 RAMIPS_MDIO_CFG_GP1_FRC_EN
;
197 if (re
->duplex
== DUPLEX_FULL
)
198 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_DUPLEX
;
201 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_FC_TX
;
204 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_FC_RX
;
208 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_10
;
211 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_100
;
214 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_1000
;
220 ramips_fe_wr(mdio_cfg
, RAMIPS_MDIO_CFG
);
222 netif_carrier_on(re
->netdev
);
223 netdev_info(re
->netdev
, "link up (%sMbps/%s duplex)\n",
224 ramips_speed_str(re
),
225 (DUPLEX_FULL
== re
->duplex
) ? "Full" : "Half");
229 ramips_mdio_wait_ready(struct raeth_priv
*re
)
233 retries
= RAMIPS_MDIO_RETRY
;
237 t
= ramips_fe_rr(RAMIPS_MDIO_ACCESS
);
238 if ((t
& (0x1 << 31)) == 0)
247 dev_err(re
->parent
, "MDIO operation timed out\n");
252 ramips_mdio_read(struct mii_bus
*bus
, int phy_addr
, int phy_reg
)
254 struct raeth_priv
*re
= bus
->priv
;
258 err
= ramips_mdio_wait_ready(re
);
262 t
= (phy_addr
<< 24) | (phy_reg
<< 16);
263 ramips_fe_wr(t
, RAMIPS_MDIO_ACCESS
);
265 ramips_fe_wr(t
, RAMIPS_MDIO_ACCESS
);
267 err
= ramips_mdio_wait_ready(re
);
271 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__
,
272 phy_addr
, phy_reg
, ramips_fe_rr(RAMIPS_MDIO_ACCESS
) & 0xffff);
274 return ramips_fe_rr(RAMIPS_MDIO_ACCESS
) & 0xffff;
278 ramips_mdio_write(struct mii_bus
*bus
, int phy_addr
, int phy_reg
, u16 val
)
280 struct raeth_priv
*re
= bus
->priv
;
284 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__
,
285 phy_addr
, phy_reg
, ramips_fe_rr(RAMIPS_MDIO_ACCESS
) & 0xffff);
287 err
= ramips_mdio_wait_ready(re
);
291 t
= (1 << 30) | (phy_addr
<< 24) | (phy_reg
<< 16) | val
;
292 ramips_fe_wr(t
, RAMIPS_MDIO_ACCESS
);
294 ramips_fe_wr(t
, RAMIPS_MDIO_ACCESS
);
296 return ramips_mdio_wait_ready(re
);
300 ramips_mdio_reset(struct mii_bus
*bus
)
307 ramips_mdio_init(struct raeth_priv
*re
)
312 re
->mii_bus
= mdiobus_alloc();
313 if (re
->mii_bus
== NULL
)
316 re
->mii_bus
->name
= "ramips_mdio";
317 re
->mii_bus
->read
= ramips_mdio_read
;
318 re
->mii_bus
->write
= ramips_mdio_write
;
319 re
->mii_bus
->reset
= ramips_mdio_reset
;
320 re
->mii_bus
->irq
= re
->mii_irq
;
321 re
->mii_bus
->priv
= re
;
322 re
->mii_bus
->parent
= re
->parent
;
324 snprintf(re
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s", "ramips_mdio");
325 re
->mii_bus
->phy_mask
= 0;
327 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
328 re
->mii_irq
[i
] = PHY_POLL
;
330 err
= mdiobus_register(re
->mii_bus
);
342 ramips_mdio_cleanup(struct raeth_priv
*re
)
344 mdiobus_unregister(re
->mii_bus
);
349 ramips_phy_link_adjust(struct net_device
*dev
)
351 struct raeth_priv
*re
= netdev_priv(dev
);
352 struct phy_device
*phydev
= re
->phy_dev
;
354 int status_change
= 0;
356 spin_lock_irqsave(&re
->phy_lock
, flags
);
359 if (re
->duplex
!= phydev
->duplex
||
360 re
->speed
!= phydev
->speed
)
363 if (phydev
->link
!= re
->link
)
366 re
->link
= phydev
->link
;
367 re
->duplex
= phydev
->duplex
;
368 re
->speed
= phydev
->speed
;
371 ramips_link_adjust(re
);
373 spin_unlock_irqrestore(&re
->phy_lock
, flags
);
377 ramips_phy_connect_multi(struct raeth_priv
*re
)
379 struct net_device
*netdev
= re
->netdev
;
380 struct ramips_eth_platform_data
*pdata
;
381 struct phy_device
*phydev
= NULL
;
385 pdata
= re
->parent
->platform_data
;
386 for (phy_addr
= 0; phy_addr
< PHY_MAX_ADDR
; phy_addr
++) {
387 if (!(pdata
->phy_mask
& (1 << phy_addr
)))
390 if (re
->mii_bus
->phy_map
[phy_addr
] == NULL
)
393 RADEBUG("%s: PHY found at %s, uid=%08x\n",
395 dev_name(&re
->mii_bus
->phy_map
[phy_addr
]->dev
),
396 re
->mii_bus
->phy_map
[phy_addr
]->phy_id
);
399 phydev
= re
->mii_bus
->phy_map
[phy_addr
];
403 netdev_err(netdev
, "no PHY found with phy_mask=%08x\n",
408 re
->phy_dev
= phy_connect(netdev
, dev_name(&phydev
->dev
),
409 ramips_phy_link_adjust
, 0,
412 if (IS_ERR(re
->phy_dev
)) {
413 netdev_err(netdev
, "could not connect to PHY at %s\n",
414 dev_name(&phydev
->dev
));
415 return PTR_ERR(re
->phy_dev
);
418 phydev
->supported
&= PHY_GBIT_FEATURES
;
419 phydev
->advertising
= phydev
->supported
;
421 RADEBUG("%s: connected to PHY at %s [uid=%08x, driver=%s]\n",
422 netdev
->name
, dev_name(&phydev
->dev
),
423 phydev
->phy_id
, phydev
->drv
->name
);
435 ramips_phy_connect_fixed(struct raeth_priv
*re
)
437 struct ramips_eth_platform_data
*pdata
;
439 pdata
= re
->parent
->platform_data
;
440 switch (pdata
->speed
) {
446 netdev_err(re
->netdev
, "invalid speed specified\n");
450 RADEBUG("%s: using fixed link parameters\n", re
->netdev
->name
);
452 re
->speed
= pdata
->speed
;
453 re
->duplex
= pdata
->duplex
;
454 re
->tx_fc
= pdata
->tx_fc
;
455 re
->rx_fc
= pdata
->tx_fc
;
461 ramips_phy_connect(struct raeth_priv
*re
)
463 struct ramips_eth_platform_data
*pdata
;
465 pdata
= re
->parent
->platform_data
;
467 return ramips_phy_connect_multi(re
);
469 return ramips_phy_connect_fixed(re
);
473 ramips_phy_disconnect(struct raeth_priv
*re
)
476 phy_disconnect(re
->phy_dev
);
480 ramips_phy_start(struct raeth_priv
*re
)
485 phy_start(re
->phy_dev
);
487 spin_lock_irqsave(&re
->phy_lock
, flags
);
489 ramips_link_adjust(re
);
490 spin_unlock_irqrestore(&re
->phy_lock
, flags
);
495 ramips_phy_stop(struct raeth_priv
*re
)
500 phy_stop(re
->phy_dev
);
502 spin_lock_irqsave(&re
->phy_lock
, flags
);
504 ramips_link_adjust(re
);
505 spin_unlock_irqrestore(&re
->phy_lock
, flags
);
509 ramips_mdio_init(struct raeth_priv
*re
)
515 ramips_mdio_cleanup(struct raeth_priv
*re
)
520 ramips_phy_connect(struct raeth_priv
*re
)
526 ramips_phy_disconnect(struct raeth_priv
*re
)
531 ramips_phy_start(struct raeth_priv
*re
)
536 ramips_phy_stop(struct raeth_priv
*re
)
539 #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */
542 ramips_ring_free(struct raeth_priv
*re
)
547 for (i
= 0; i
< NUM_RX_DESC
; i
++)
549 dev_kfree_skb_any(re
->rx_skb
[i
]);
552 len
= NUM_RX_DESC
* sizeof(struct ramips_rx_dma
);
553 dma_free_coherent(&re
->netdev
->dev
, len
, re
->rx
,
558 len
= NUM_TX_DESC
* sizeof(struct ramips_tx_dma
);
559 dma_free_coherent(&re
->netdev
->dev
, len
, re
->tx
,
565 ramips_ring_alloc(struct raeth_priv
*re
)
571 /* allocate tx ring */
572 len
= NUM_TX_DESC
* sizeof(struct ramips_tx_dma
);
573 re
->tx
= dma_alloc_coherent(&re
->netdev
->dev
, len
,
574 &re
->tx_desc_dma
, GFP_ATOMIC
);
578 /* allocate rx ring */
579 len
= NUM_RX_DESC
* sizeof(struct ramips_rx_dma
);
580 re
->rx
= dma_alloc_coherent(&re
->netdev
->dev
, len
,
581 &re
->rx_desc_dma
, GFP_ATOMIC
);
585 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
588 skb
= ramips_alloc_skb(re
);
598 ramips_ring_free(re
);
603 ramips_setup_dma(struct raeth_priv
*re
)
605 ramips_fe_wr(re
->tx_desc_dma
, RAMIPS_TX_BASE_PTR0
);
606 ramips_fe_wr(NUM_TX_DESC
, RAMIPS_TX_MAX_CNT0
);
607 ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0
);
608 ramips_fe_wr(RAMIPS_PST_DTX_IDX0
, RAMIPS_PDMA_RST_CFG
);
610 ramips_fe_wr(re
->rx_desc_dma
, RAMIPS_RX_BASE_PTR0
);
611 ramips_fe_wr(NUM_RX_DESC
, RAMIPS_RX_MAX_CNT0
);
612 ramips_fe_wr((NUM_RX_DESC
- 1), RAMIPS_RX_CALC_IDX0
);
613 ramips_fe_wr(RAMIPS_PST_DRX_IDX0
, RAMIPS_PDMA_RST_CFG
);
617 ramips_eth_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
619 struct raeth_priv
*re
= netdev_priv(dev
);
621 unsigned int tx_next
;
622 dma_addr_t mapped_addr
;
624 if (re
->plat
->min_pkt_len
) {
625 if (skb
->len
< re
->plat
->min_pkt_len
) {
626 if (skb_padto(skb
, re
->plat
->min_pkt_len
)) {
628 "ramips_eth: skb_padto failed\n");
632 skb_put(skb
, re
->plat
->min_pkt_len
- skb
->len
);
636 dev
->trans_start
= jiffies
;
637 mapped_addr
= dma_map_single(&re
->netdev
->dev
, skb
->data
, skb
->len
,
640 spin_lock(&re
->page_lock
);
641 tx
= ramips_fe_rr(RAMIPS_TX_CTX_IDX0
);
642 tx_next
= (tx
+ 1) % NUM_TX_DESC
;
644 if ((re
->tx_skb
[tx
]) || (re
->tx_skb
[tx_next
]) ||
645 !(re
->tx
[tx
].txd2
& TX_DMA_DONE
) ||
646 !(re
->tx
[tx_next
].txd2
& TX_DMA_DONE
))
649 re
->tx
[tx
].txd1
= (unsigned int) mapped_addr
;
650 re
->tx
[tx
].txd2
&= ~(TX_DMA_PLEN0_MASK
| TX_DMA_DONE
);
651 re
->tx
[tx
].txd2
|= TX_DMA_PLEN0(skb
->len
);
652 dev
->stats
.tx_packets
++;
653 dev
->stats
.tx_bytes
+= skb
->len
;
654 re
->tx_skb
[tx
] = skb
;
656 ramips_fe_wr(tx_next
, RAMIPS_TX_CTX_IDX0
);
657 spin_unlock(&re
->page_lock
);
661 spin_unlock(&re
->page_lock
);
662 dev
->stats
.tx_dropped
++;
668 ramips_eth_rx_hw(unsigned long ptr
)
670 struct net_device
*dev
= (struct net_device
*) ptr
;
671 struct raeth_priv
*re
= netdev_priv(dev
);
676 struct sk_buff
*rx_skb
, *new_skb
;
679 rx
= (ramips_fe_rr(RAMIPS_RX_CALC_IDX0
) + 1) % NUM_RX_DESC
;
680 if (!(re
->rx
[rx
].rxd2
& RX_DMA_DONE
))
684 rx_skb
= re
->rx_skb
[rx
];
685 pktlen
= RX_DMA_PLEN0(re
->rx
[rx
].rxd2
);
687 new_skb
= ramips_alloc_skb(re
);
688 /* Reuse the buffer on allocation failures */
692 dma_unmap_single(&re
->netdev
->dev
, re
->rx_dma
[rx
],
693 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
695 skb_put(rx_skb
, pktlen
);
697 rx_skb
->protocol
= eth_type_trans(rx_skb
, dev
);
698 rx_skb
->ip_summed
= CHECKSUM_NONE
;
699 dev
->stats
.rx_packets
++;
700 dev
->stats
.rx_bytes
+= pktlen
;
703 re
->rx_skb
[rx
] = new_skb
;
705 dma_addr
= dma_map_single(&re
->netdev
->dev
,
709 re
->rx_dma
[rx
] = dma_addr
;
710 re
->rx
[rx
].rxd1
= (unsigned int) dma_addr
;
712 dev
->stats
.rx_dropped
++;
715 re
->rx
[rx
].rxd2
&= ~RX_DMA_DONE
;
717 ramips_fe_wr(rx
, RAMIPS_RX_CALC_IDX0
);
721 tasklet_schedule(&re
->rx_tasklet
);
723 ramips_fe_int_enable(RAMIPS_RX_DLY_INT
);
727 ramips_eth_tx_housekeeping(unsigned long ptr
)
729 struct net_device
*dev
= (struct net_device
*)ptr
;
730 struct raeth_priv
*re
= netdev_priv(dev
);
732 spin_lock(&re
->page_lock
);
733 while ((re
->tx
[re
->skb_free_idx
].txd2
& TX_DMA_DONE
) &&
734 (re
->tx_skb
[re
->skb_free_idx
])) {
735 dev_kfree_skb_irq(re
->tx_skb
[re
->skb_free_idx
]);
736 re
->tx_skb
[re
->skb_free_idx
] = 0;
738 if (re
->skb_free_idx
>= NUM_TX_DESC
)
739 re
->skb_free_idx
= 0;
741 spin_unlock(&re
->page_lock
);
743 ramips_fe_int_enable(RAMIPS_TX_DLY_INT
);
747 ramips_eth_timeout(struct net_device
*dev
)
749 struct raeth_priv
*re
= netdev_priv(dev
);
751 tasklet_schedule(&re
->tx_housekeeping_tasklet
);
755 ramips_eth_irq(int irq
, void *dev
)
757 struct raeth_priv
*re
= netdev_priv(dev
);
758 unsigned long fe_int
= ramips_fe_rr(RAMIPS_FE_INT_STATUS
);
760 ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS
);
762 if (fe_int
& RAMIPS_RX_DLY_INT
) {
763 ramips_fe_int_disable(RAMIPS_RX_DLY_INT
);
764 tasklet_schedule(&re
->rx_tasklet
);
767 if (fe_int
& RAMIPS_TX_DLY_INT
) {
768 ramips_fe_int_disable(RAMIPS_TX_DLY_INT
);
769 tasklet_schedule(&re
->tx_housekeeping_tasklet
);
772 raeth_debugfs_update_int_stats(re
, fe_int
);
778 ramips_eth_open(struct net_device
*dev
)
780 struct raeth_priv
*re
= netdev_priv(dev
);
783 err
= request_irq(dev
->irq
, ramips_eth_irq
, IRQF_DISABLED
,
788 err
= ramips_ring_alloc(re
);
792 ramips_ring_setup(re
);
793 ramips_hw_set_macaddr(dev
->dev_addr
);
795 ramips_setup_dma(re
);
796 ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG
) & 0xff) |
797 (RAMIPS_TX_WB_DDONE
| RAMIPS_RX_DMA_EN
|
798 RAMIPS_TX_DMA_EN
| RAMIPS_PDMA_SIZE_4DWORDS
),
799 RAMIPS_PDMA_GLO_CFG
);
800 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG
) &
801 ~(RAMIPS_US_CYC_CNT_MASK
<< RAMIPS_US_CYC_CNT_SHIFT
)) |
802 ((re
->plat
->sys_freq
/ RAMIPS_US_CYC_CNT_DIVISOR
) << RAMIPS_US_CYC_CNT_SHIFT
),
805 tasklet_init(&re
->tx_housekeeping_tasklet
, ramips_eth_tx_housekeeping
,
807 tasklet_init(&re
->rx_tasklet
, ramips_eth_rx_hw
, (unsigned long)dev
);
809 ramips_phy_start(re
);
811 ramips_fe_wr(RAMIPS_DELAY_INIT
, RAMIPS_DLY_INT_CFG
);
812 ramips_fe_wr(RAMIPS_TX_DLY_INT
| RAMIPS_RX_DLY_INT
, RAMIPS_FE_INT_ENABLE
);
813 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG
) &
814 ~(RAMIPS_GDM1_ICS_EN
| RAMIPS_GDM1_TCS_EN
| RAMIPS_GDM1_UCS_EN
| 0xffff),
815 RAMIPS_GDMA1_FWD_CFG
);
816 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG
) &
817 ~(RAMIPS_ICS_GEN_EN
| RAMIPS_TCS_GEN_EN
| RAMIPS_UCS_GEN_EN
),
818 RAMIPS_CDMA_CSG_CFG
);
819 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT
, RAMIPS_PSE_FQ_CFG
);
820 ramips_fe_wr(1, RAMIPS_FE_RST_GL
);
821 ramips_fe_wr(0, RAMIPS_FE_RST_GL
);
823 netif_start_queue(dev
);
827 free_irq(dev
->irq
, dev
);
832 ramips_eth_stop(struct net_device
*dev
)
834 struct raeth_priv
*re
= netdev_priv(dev
);
836 ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG
) &
837 ~(RAMIPS_TX_WB_DDONE
| RAMIPS_RX_DMA_EN
| RAMIPS_TX_DMA_EN
),
838 RAMIPS_PDMA_GLO_CFG
);
840 /* disable all interrupts in the hw */
841 ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE
);
844 free_irq(dev
->irq
, dev
);
845 netif_stop_queue(dev
);
846 tasklet_kill(&re
->tx_housekeeping_tasklet
);
847 tasklet_kill(&re
->rx_tasklet
);
848 ramips_ring_cleanup(re
);
849 ramips_ring_free(re
);
850 RADEBUG("ramips_eth: stopped\n");
855 ramips_eth_probe(struct net_device
*dev
)
857 struct raeth_priv
*re
= netdev_priv(dev
);
860 BUG_ON(!re
->plat
->reset_fe
);
861 re
->plat
->reset_fe();
862 net_srandom(jiffies
);
863 memcpy(dev
->dev_addr
, re
->plat
->mac
, ETH_ALEN
);
867 dev
->watchdog_timeo
= TX_TIMEOUT
;
868 spin_lock_init(&re
->page_lock
);
869 spin_lock_init(&re
->phy_lock
);
871 err
= ramips_mdio_init(re
);
875 err
= ramips_phy_connect(re
);
877 goto err_mdio_cleanup
;
879 err
= raeth_debugfs_init(re
);
881 goto err_phy_disconnect
;
886 ramips_phy_disconnect(re
);
888 ramips_mdio_cleanup(re
);
893 ramips_eth_uninit(struct net_device
*dev
)
895 struct raeth_priv
*re
= netdev_priv(dev
);
897 raeth_debugfs_exit(re
);
898 ramips_phy_disconnect(re
);
899 ramips_mdio_cleanup(re
);
902 static const struct net_device_ops ramips_eth_netdev_ops
= {
903 .ndo_init
= ramips_eth_probe
,
904 .ndo_uninit
= ramips_eth_uninit
,
905 .ndo_open
= ramips_eth_open
,
906 .ndo_stop
= ramips_eth_stop
,
907 .ndo_start_xmit
= ramips_eth_hard_start_xmit
,
908 .ndo_tx_timeout
= ramips_eth_timeout
,
909 .ndo_change_mtu
= eth_change_mtu
,
910 .ndo_set_mac_address
= eth_mac_addr
,
911 .ndo_validate_addr
= eth_validate_addr
,
915 ramips_eth_plat_probe(struct platform_device
*plat
)
917 struct raeth_priv
*re
;
918 struct ramips_eth_platform_data
*data
= plat
->dev
.platform_data
;
919 struct resource
*res
;
923 dev_err(&plat
->dev
, "no platform data specified\n");
927 res
= platform_get_resource(plat
, IORESOURCE_MEM
, 0);
929 dev_err(&plat
->dev
, "no memory resource found\n");
933 ramips_fe_base
= ioremap_nocache(res
->start
, res
->end
- res
->start
+ 1);
937 ramips_dev
= alloc_etherdev(sizeof(struct raeth_priv
));
939 dev_err(&plat
->dev
, "alloc_etherdev failed\n");
944 strcpy(ramips_dev
->name
, "eth%d");
945 ramips_dev
->irq
= platform_get_irq(plat
, 0);
946 if (ramips_dev
->irq
< 0) {
947 dev_err(&plat
->dev
, "no IRQ resource found\n");
951 ramips_dev
->addr_len
= ETH_ALEN
;
952 ramips_dev
->base_addr
= (unsigned long)ramips_fe_base
;
953 ramips_dev
->netdev_ops
= &ramips_eth_netdev_ops
;
955 re
= netdev_priv(ramips_dev
);
957 re
->netdev
= ramips_dev
;
958 re
->parent
= &plat
->dev
;
959 re
->speed
= data
->speed
;
960 re
->duplex
= data
->duplex
;
961 re
->rx_fc
= data
->rx_fc
;
962 re
->tx_fc
= data
->tx_fc
;
965 err
= register_netdev(ramips_dev
);
967 dev_err(&plat
->dev
, "error bringing up device\n");
971 RADEBUG("ramips_eth: loaded\n");
977 iounmap(ramips_fe_base
);
982 ramips_eth_plat_remove(struct platform_device
*plat
)
984 unregister_netdev(ramips_dev
);
985 free_netdev(ramips_dev
);
986 RADEBUG("ramips_eth: unloaded\n");
990 static struct platform_driver ramips_eth_driver
= {
991 .probe
= ramips_eth_plat_probe
,
992 .remove
= ramips_eth_plat_remove
,
994 .name
= "ramips_eth",
995 .owner
= THIS_MODULE
,
1000 ramips_eth_init(void)
1004 ret
= raeth_debugfs_root_init();
1008 ret
= rt305x_esw_init();
1010 goto err_debugfs_exit
;
1012 ret
= platform_driver_register(&ramips_eth_driver
);
1015 "ramips_eth: Error registering platfom driver!\n");
1024 raeth_debugfs_root_exit();
1030 ramips_eth_cleanup(void)
1032 platform_driver_unregister(&ramips_eth_driver
);
1034 raeth_debugfs_root_exit();
1037 module_init(ramips_eth_init
);
1038 module_exit(ramips_eth_cleanup
);
1040 MODULE_LICENSE("GPL");
1041 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1042 MODULE_DESCRIPTION("ethernet driver for ramips boards");