#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <ramips_eth_platform.h>
#ifdef CONFIG_RALINK_RT305X
#include "ramips_esw.c"
+#else
+static inline int rt305x_esw_init(void) { return 0; }
+static inline void rt305x_esw_exit(void) { }
#endif
#define phys_to_bus(a) (a & 0x1FFFFFFF)
return __raw_readl(ramips_fe_base + reg);
}
+static inline void
+ramips_fe_int_disable(u32 mask)
+{
+ ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~mask,
+ RAMIPS_FE_INT_ENABLE);
+ /* flush write */
+ ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
+}
+
+static inline void
+ramips_fe_int_enable(u32 mask)
+{
+ ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | mask,
+ RAMIPS_FE_INT_ENABLE);
+ /* flush write */
+ ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
+}
+
+static inline void
+ramips_hw_set_macaddr(unsigned char *mac)
+{
+ ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
+ ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
+ RAMIPS_GDMA1_MAC_ADRL);
+}
+
+#ifdef CONFIG_RALINK_RT288X
+static void
+ramips_setup_mdio_cfg(struct raeth_priv *re)
+{
+ unsigned int mdio_cfg;
+
+ mdio_cfg = RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
+ RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
+ RAMIPS_MDIO_CFG_GP1_FRC_EN;
+
+ if (re->duplex == DUPLEX_FULL)
+ mdio_cfg |= RAMIPS_MDIO_CFG_GP1_DUPLEX;
+
+ if (re->tx_fc)
+ mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_TX;
+
+ if (re->rx_fc)
+ mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_RX;
+
+ switch (re->speed) {
+ case SPEED_10:
+ mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_10;
+ break;
+ case SPEED_100:
+ mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_100;
+ break;
+ case SPEED_1000:
+ mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_1000;
+ break;
+ default:
+ BUG();
+ }
+
+ ramips_fe_wr(mdio_cfg, RAMIPS_MDIO_CFG);
+}
+#else
+static inline void ramips_setup_mdio_cfg(struct raeth_priv *re)
+{
+}
+#endif /* CONFIG_RALINK_RT288X */
+
static void
ramips_cleanup_dma(struct raeth_priv *re)
{
memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
for (i = 0; i < NUM_RX_DESC; i++) {
- struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);
+ struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH +
+ NET_IP_ALIGN);
if (!new_skb)
goto err_cleanup;
- skb_reserve(new_skb, 2);
+ skb_reserve(new_skb, NET_IP_ALIGN);
re->rx[i].rxd1 = dma_map_single(NULL,
- skb_put(new_skb, 2),
- MAX_RX_LENGTH + 2,
+ new_skb->data,
+ MAX_RX_LENGTH,
DMA_FROM_DEVICE);
re->rx[i].rxd2 |= RX_DMA_LSO;
re->rx_skb[i] = new_skb;
unsigned long tx;
unsigned int tx_next;
unsigned int mapped_addr;
- unsigned long flags;
if (priv->plat->min_pkt_len) {
if (skb->len < priv->plat->min_pkt_len) {
mapped_addr = (unsigned int) dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE);
dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
- spin_lock_irqsave(&priv->page_lock, flags);
+ spin_lock(&priv->page_lock);
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
tx_next = (tx + 1) % NUM_TX_DESC;
priv->tx_skb[tx] = skb;
wmb();
ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
- spin_unlock_irqrestore(&priv->page_lock, flags);
+ spin_unlock(&priv->page_lock);
return NETDEV_TX_OK;
out:
- spin_unlock_irqrestore(&priv->page_lock, flags);
+ spin_unlock(&priv->page_lock);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
while (max_rx) {
struct sk_buff *rx_skb, *new_skb;
+ int pktlen;
rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
max_rx--;
rx_skb = priv->rx_skb[rx];
- rx_skb->len = RX_DMA_PLEN0(priv->rx[rx].rxd2);
- rx_skb->dev = dev;
- rx_skb->protocol = eth_type_trans(rx_skb, dev);
- rx_skb->ip_summed = CHECKSUM_NONE;
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += rx_skb->len;
- netif_rx(rx_skb);
-
- new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + 2);
- priv->rx_skb[rx] = new_skb;
- BUG_ON(!new_skb);
- skb_reserve(new_skb, 2);
- priv->rx[rx].rxd1 = dma_map_single(NULL,
- new_skb->data,
- MAX_RX_LENGTH + 2,
- DMA_FROM_DEVICE);
+ pktlen = RX_DMA_PLEN0(priv->rx[rx].rxd2);
+
+ new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN);
+ /* Reuse the buffer on allocation failures */
+ if (new_skb) {
+ /* TODO: convert to use dma_address_t */
+ dma_unmap_single(NULL, priv->rx[rx].rxd1, MAX_RX_LENGTH,
+ DMA_FROM_DEVICE);
+
+ skb_put(rx_skb, pktlen);
+ rx_skb->dev = dev;
+ rx_skb->protocol = eth_type_trans(rx_skb, dev);
+ rx_skb->ip_summed = CHECKSUM_NONE;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pktlen;
+ netif_rx(rx_skb);
+
+ priv->rx_skb[rx] = new_skb;
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ priv->rx[rx].rxd1 = dma_map_single(NULL,
+ new_skb->data,
+ MAX_RX_LENGTH,
+ DMA_FROM_DEVICE);
+ } else {
+ dev->stats.rx_dropped++;
+ }
+
priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
wmb();
ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
if (max_rx == 0)
tasklet_schedule(&priv->rx_tasklet);
else
- ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT,
- RAMIPS_FE_INT_ENABLE);
+ ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
}
static void
struct net_device *dev = (struct net_device*)ptr;
struct raeth_priv *priv = netdev_priv(dev);
+ spin_lock(&priv->page_lock);
while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
(priv->tx_skb[priv->skb_free_idx])) {
- dev_kfree_skb_irq((struct sk_buff *) priv->tx_skb[priv->skb_free_idx]);
+ dev_kfree_skb_irq(priv->tx_skb[priv->skb_free_idx]);
priv->tx_skb[priv->skb_free_idx] = 0;
priv->skb_free_idx++;
if (priv->skb_free_idx >= NUM_TX_DESC)
priv->skb_free_idx = 0;
}
+ spin_unlock(&priv->page_lock);
- ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_TX_DLY_INT,
- RAMIPS_FE_INT_ENABLE);
-}
-
-static int
-ramips_eth_set_mac_addr(struct net_device *dev, void *priv)
-{
- unsigned char *mac = (unsigned char *) priv;
-
- if (netif_running(dev))
- return -EBUSY;
-
- memcpy(dev->dev_addr, ((struct sockaddr*)priv)->sa_data, dev->addr_len);
- ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
- ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
- RAMIPS_GDMA1_MAC_ADRL);
- return 0;
+ ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
}
static void
ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS);
if (fe_int & RAMIPS_RX_DLY_INT) {
- ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~(RAMIPS_RX_DLY_INT),
- RAMIPS_FE_INT_ENABLE);
+ ramips_fe_int_disable(RAMIPS_RX_DLY_INT);
tasklet_schedule(&priv->rx_tasklet);
}
- if (fe_int & RAMIPS_TX_DLY_INT)
- ramips_eth_tx_housekeeping((unsigned long)dev);
+ if (fe_int & RAMIPS_TX_DLY_INT) {
+ ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
+ tasklet_schedule(&priv->tx_housekeeping_tasklet);
+ }
return IRQ_HANDLED;
}
if (err)
goto err_free_irq;
+ ramips_hw_set_macaddr(dev->dev_addr);
+
ramips_setup_dma(priv);
ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
RAMIPS_PDMA_GLO_CFG);
ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
- ((rt305x_sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
+ ((priv->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
RAMIPS_FE_GLO_CFG);
tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
(unsigned long)dev);
tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
+ ramips_setup_mdio_cfg(priv);
+
ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
RAMIPS_PDMA_GLO_CFG);
+
+ /* disable all interrupts in the hw */
+ ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE);
+
free_irq(dev->irq, dev);
netif_stop_queue(dev);
tasklet_kill(&priv->tx_housekeeping_tasklet);
ramips_eth_probe(struct net_device *dev)
{
struct raeth_priv *priv = netdev_priv(dev);
- struct sockaddr addr;
BUG_ON(!priv->plat->reset_fe);
priv->plat->reset_fe();
net_srandom(jiffies);
- memcpy(addr.sa_data, priv->plat->mac, 6);
- ramips_eth_set_mac_addr(dev, &addr);
+ memcpy(dev->dev_addr, priv->plat->mac, ETH_ALEN);
ether_setup(dev);
dev->mtu = 1500;
.ndo_start_xmit = ramips_eth_hard_start_xmit,
.ndo_tx_timeout = ramips_eth_timeout,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = ramips_eth_set_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
priv = netdev_priv(ramips_dev);
+
+ priv->speed = data->speed;
+ priv->duplex = data->duplex;
+ priv->rx_fc = data->rx_fc;
+ priv->tx_fc = data->tx_fc;
priv->plat = data;
err = register_netdev(ramips_dev);
goto err_free_dev;
}
-#ifdef CONFIG_RALINK_RT305X
- rt305x_esw_init();
-#endif
printk(KERN_DEBUG "ramips_eth: loaded\n");
return 0;
static int __init
ramips_eth_init(void)
{
- int ret = platform_driver_register(&ramips_eth_driver);
+ int ret;
+
+ ret = rt305x_esw_init();
if (ret)
+ return ret;
+
+ ret = platform_driver_register(&ramips_eth_driver);
+ if (ret) {
printk(KERN_ERR
"ramips_eth: Error registering platfom driver!\n");
+ goto esw_cleanup;
+ }
+
+ return 0;
+
+esw_cleanup:
+ rt305x_esw_exit();
return ret;
}
ramips_eth_cleanup(void)
{
platform_driver_unregister(&ramips_eth_driver);
+ rt305x_esw_exit();
}
module_init(ramips_eth_init);