return __raw_readl(ramips_fe_base + reg);
}
+static inline void
+ramips_fe_int_disable(u32 mask)
+{
+ ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~mask,
+ RAMIPS_FE_INT_ENABLE);
+ /* flush write */
+ ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
+}
+
+static inline void
+ramips_fe_int_enable(u32 mask)
+{
+ ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | mask,
+ RAMIPS_FE_INT_ENABLE);
+ /* flush write */
+ ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
+}
+
static void
-ramips_cleanup_dma(struct net_device *dev)
+ramips_cleanup_dma(struct raeth_priv *re)
{
- struct raeth_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < NUM_RX_DESC; i++)
- if (priv->rx_skb[i])
- dev_kfree_skb_any(priv->rx_skb[i]);
+ if (re->rx_skb[i])
+ dev_kfree_skb_any(re->rx_skb[i]);
- if (priv->rx)
+ if (re->rx)
dma_free_coherent(NULL,
NUM_RX_DESC * sizeof(struct ramips_rx_dma),
- priv->rx, priv->phy_rx);
+ re->rx, re->phy_rx);
- if (priv->tx)
+ if (re->tx)
dma_free_coherent(NULL,
NUM_TX_DESC * sizeof(struct ramips_tx_dma),
- priv->tx, priv->phy_tx);
+ re->tx, re->phy_tx);
}
static int
-ramips_alloc_dma(struct net_device *dev)
+ramips_alloc_dma(struct raeth_priv *re)
{
- struct raeth_priv *priv = netdev_priv(dev);
int err = -ENOMEM;
int i;
- priv->skb_free_idx = 0;
+ re->skb_free_idx = 0;
/* setup tx ring */
- priv->tx = dma_alloc_coherent(NULL,
- NUM_TX_DESC * sizeof(struct ramips_tx_dma),
- &priv->phy_tx, GFP_ATOMIC);
- if (!priv->tx)
+ re->tx = dma_alloc_coherent(NULL,
+ NUM_TX_DESC * sizeof(struct ramips_tx_dma),
+ &re->phy_tx, GFP_ATOMIC);
+ if (!re->tx)
goto err_cleanup;
- memset(priv->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma));
+ memset(re->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma));
for (i = 0; i < NUM_TX_DESC; i++) {
- priv->tx[i].txd2 |= TX_DMA_LSO | TX_DMA_DONE;
- priv->tx[i].txd4 &= (TX_DMA_QN_MASK | TX_DMA_PN_MASK);
- priv->tx[i].txd4 |= TX_DMA_QN(3) | TX_DMA_PN(1);
+ re->tx[i].txd2 = TX_DMA_LSO | TX_DMA_DONE;
+ re->tx[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
}
/* setup rx ring */
- priv->rx = dma_alloc_coherent(NULL,
- NUM_RX_DESC * sizeof(struct ramips_rx_dma),
- &priv->phy_rx, GFP_ATOMIC);
- if (!priv->rx)
+ re->rx = dma_alloc_coherent(NULL,
+ NUM_RX_DESC * sizeof(struct ramips_rx_dma),
+ &re->phy_rx, GFP_ATOMIC);
+ if (!re->rx)
goto err_cleanup;
- memset(priv->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
+ memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
for (i = 0; i < NUM_RX_DESC; i++) {
struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);
goto err_cleanup;
skb_reserve(new_skb, 2);
- priv->rx[i].rxd1 = dma_map_single(NULL,
- skb_put(new_skb, 2),
- MAX_RX_LENGTH + 2,
- DMA_FROM_DEVICE);
- priv->rx[i].rxd2 |= RX_DMA_LSO;
- priv->rx_skb[i] = new_skb;
+ re->rx[i].rxd1 = dma_map_single(NULL,
+ skb_put(new_skb, 2),
+ MAX_RX_LENGTH + 2,
+ DMA_FROM_DEVICE);
+ re->rx[i].rxd2 |= RX_DMA_LSO;
+ re->rx_skb[i] = new_skb;
}
return 0;
err_cleanup:
- ramips_cleanup_dma(dev);
+ ramips_cleanup_dma(re);
return err;
}
static void
-ramips_setup_dma(struct net_device *dev)
+ramips_setup_dma(struct raeth_priv *re)
{
- struct raeth_priv *priv = netdev_priv(dev);
-
- ramips_fe_wr(phys_to_bus(priv->phy_tx), RAMIPS_TX_BASE_PTR0);
+ ramips_fe_wr(phys_to_bus(re->phy_tx), RAMIPS_TX_BASE_PTR0);
ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);
- ramips_fe_wr(phys_to_bus(priv->phy_rx), RAMIPS_RX_BASE_PTR0);
+ ramips_fe_wr(phys_to_bus(re->phy_rx), RAMIPS_RX_BASE_PTR0);
ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
spin_lock_irqsave(&priv->page_lock, flags);
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
- if (tx == NUM_TX_DESC - 1)
- tx_next = 0;
- else
- tx_next = tx + 1;
+ tx_next = (tx + 1) % NUM_TX_DESC;
if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
!(priv->tx[tx].txd2 & TX_DMA_DONE) ||
dev->stats.tx_bytes += skb->len;
priv->tx_skb[tx] = skb;
wmb();
- ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
+ ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
spin_unlock_irqrestore(&priv->page_lock, flags);
return NETDEV_TX_OK;
if (max_rx == 0)
tasklet_schedule(&priv->rx_tasklet);
else
- ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT,
- RAMIPS_FE_INT_ENABLE);
+ ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
}
static void
while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
(priv->tx_skb[priv->skb_free_idx])) {
- dev_kfree_skb_irq((struct sk_buff *) priv->tx_skb[priv->skb_free_idx]);
+ dev_kfree_skb_irq(priv->tx_skb[priv->skb_free_idx]);
priv->tx_skb[priv->skb_free_idx] = 0;
priv->skb_free_idx++;
if (priv->skb_free_idx >= NUM_TX_DESC)
priv->skb_free_idx = 0;
}
- ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_TX_DLY_INT,
- RAMIPS_FE_INT_ENABLE);
+ ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
}
static int
ramips_eth_set_mac_addr(struct net_device *dev, void *priv)
{
- unsigned char *mac = (unsigned char *) priv;
+ unsigned char *mac = priv;
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, ((struct sockaddr*)priv)->sa_data, dev->addr_len);
ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
- ramips_fe_wr(RAMIPS_GDMA1_MAC_ADRL,
- (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5]);
+ ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
+ RAMIPS_GDMA1_MAC_ADRL);
return 0;
}
ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS);
if (fe_int & RAMIPS_RX_DLY_INT) {
- ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~(RAMIPS_RX_DLY_INT),
- RAMIPS_FE_INT_ENABLE);
+ ramips_fe_int_disable(RAMIPS_RX_DLY_INT);
tasklet_schedule(&priv->rx_tasklet);
}
if (err)
return err;
- err = ramips_alloc_dma(dev);
+ err = ramips_alloc_dma(priv);
if (err)
goto err_free_irq;
- ramips_setup_dma(dev);
+ ramips_setup_dma(priv);
ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
{
struct raeth_priv *priv = netdev_priv(dev);
- ramips_fe_wr(RAMIPS_PDMA_GLO_CFG, ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
- ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN));
+ ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
+ ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
+ RAMIPS_PDMA_GLO_CFG);
free_irq(dev->irq, dev);
netif_stop_queue(dev);
tasklet_kill(&priv->tx_housekeeping_tasklet);
tasklet_kill(&priv->rx_tasklet);
- ramips_cleanup_dma(dev);
+ ramips_cleanup_dma(priv);
printk(KERN_DEBUG "ramips_eth: stopped\n");
return 0;
}
ramips_eth_set_mac_addr(dev, &addr);
ether_setup(dev);
- dev->open = ramips_eth_open;
- dev->stop = ramips_eth_stop;
- dev->hard_start_xmit = ramips_eth_hard_start_xmit;
- dev->set_mac_address = ramips_eth_set_mac_addr;
dev->mtu = 1500;
- dev->tx_timeout = ramips_eth_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
spin_lock_init(&priv->page_lock);
return 0;
}
+static const struct net_device_ops ramips_eth_netdev_ops = {
+ .ndo_init = ramips_eth_probe,
+ .ndo_open = ramips_eth_open,
+ .ndo_stop = ramips_eth_stop,
+ .ndo_start_xmit = ramips_eth_hard_start_xmit,
+ .ndo_tx_timeout = ramips_eth_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = ramips_eth_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
static int
ramips_eth_plat_probe(struct platform_device *plat)
{
}
ramips_dev->addr_len = ETH_ALEN;
ramips_dev->base_addr = (unsigned long)ramips_fe_base;
- ramips_dev->init = ramips_eth_probe;
+ ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
priv = netdev_priv(ramips_dev);
priv->plat = data;