mdelay(1);
/* clear descriptor addresses */
- ag71xx_wr(ag, AG71XX_REG_TX_DESC, 0);
- ag71xx_wr(ag, AG71XX_REG_RX_DESC, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
/* clear pending RX/TX interrupts */
for (i = 0; i < 256; i++) {
FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
FIFO_CFG5_17 | FIFO_CFG5_SF)
-static void ag71xx_hw_init(struct ag71xx *ag)
+static void ag71xx_hw_stop(struct ag71xx *ag)
{
- struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
-
- ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
- udelay(20);
+ /* disable all interrupts and stop the rx/tx engine */
+ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
+}
- ar71xx_device_stop(pdata->reset_bit);
- mdelay(100);
- ar71xx_device_start(pdata->reset_bit);
- mdelay(100);
+static void ag71xx_hw_setup(struct ag71xx *ag)
+{
+ struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
/* setup MAC configuration registers */
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_INIT);
/* setup max frame length */
ag71xx_wr(ag, AG71XX_REG_MAC_MFL, AG71XX_TX_MTU_LEN);
- /* setup MII interface type */
- ag71xx_mii_ctrl_set_if(ag, pdata->mii_if);
-
/* setup FIFO configuration registers */
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
if (pdata->is_ar724x) {
}
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
+}
+
+static void ag71xx_hw_init(struct ag71xx *ag)
+{
+ struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+ u32 reset_mask = pdata->reset_bit;
+
+ ag71xx_hw_stop(ag);
+
+ if (pdata->is_ar724x) {
+ u32 reset_phy = reset_mask;
+
+ reset_phy &= RESET_MODULE_GE0_PHY | RESET_MODULE_GE1_PHY;
+ reset_mask &= ~(RESET_MODULE_GE0_PHY | RESET_MODULE_GE1_PHY);
+
+ ar71xx_device_stop(reset_phy);
+ mdelay(50);
+ ar71xx_device_start(reset_phy);
+ mdelay(200);
+ }
+
+ ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
+ udelay(20);
+
+ ar71xx_device_stop(reset_mask);
+ mdelay(100);
+ ar71xx_device_start(reset_mask);
+ mdelay(200);
+
+ ag71xx_hw_setup(ag);
+
+ ag71xx_dma_reset(ag);
+}
+
+static void ag71xx_fast_reset(struct ag71xx *ag)
+{
+ struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+ struct net_device *dev = ag->dev;
+ u32 reset_mask = pdata->reset_bit;
+ u32 rx_ds, tx_ds;
+ u32 mii_reg;
+
+ reset_mask &= RESET_MODULE_GE0_MAC | RESET_MODULE_GE1_MAC;
+
+ mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
+ rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
+ tx_ds = ag71xx_rr(ag, AG71XX_REG_TX_DESC);
+
+ ar71xx_device_stop(reset_mask);
+ udelay(10);
+ ar71xx_device_start(reset_mask);
+ udelay(10);
ag71xx_dma_reset(ag);
+ ag71xx_hw_setup(ag);
+
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, tx_ds);
+ ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
+
+ ag71xx_hw_set_macaddr(ag, dev->dev_addr);
}
static void ag71xx_hw_start(struct ag71xx *ag)
ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
}
-static void ag71xx_hw_stop(struct ag71xx *ag)
-{
- /* disable all interrupts and stop the rx engine */
- ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
- ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
-}
-
void ag71xx_link_adjust(struct ag71xx *ag)
{
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
u32 cfg2;
u32 ifctl;
u32 fifo5;
- u32 mii_speed;
if (!ag->link) {
ag71xx_hw_stop(ag);
return;
}
+ if (pdata->is_ar724x)
+ ag71xx_fast_reset(ag);
+
cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
switch (ag->speed) {
case SPEED_1000:
- mii_speed = MII_CTRL_SPEED_1000;
cfg2 |= MAC_CFG2_IF_1000;
fifo5 |= FIFO_CFG5_BM;
break;
case SPEED_100:
- mii_speed = MII_CTRL_SPEED_100;
cfg2 |= MAC_CFG2_IF_10_100;
ifctl |= MAC_IFCTL_SPEED;
break;
case SPEED_10:
- mii_speed = MII_CTRL_SPEED_10;
cfg2 |= MAC_CFG2_IF_10_100;
break;
default:
else
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x008001ff);
- if (pdata->set_pll)
- pdata->set_pll(ag->speed);
-
- ag71xx_mii_ctrl_set_speed(ag, mii_speed);
+ if (pdata->set_speed)
+ pdata->set_speed(ag->speed);
ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
static void ag71xx_restart_work_func(struct work_struct *work)
{
struct ag71xx *ag = container_of(work, struct ag71xx, restart_work);
- struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+
+ if (ag71xx_get_pdata(ag)->is_ar724x) {
+ ag->link = 0;
+ ag71xx_link_adjust(ag);
+ return;
+ }
ag71xx_stop(ag->dev);
+ ag71xx_open(ag->dev);
+}
- if (pdata->is_ar724x)
- ag71xx_hw_init(ag);
+static bool ag71xx_check_dma_stuck(struct ag71xx *ag, unsigned long timestamp)
+{
+ u32 rx_sm, tx_sm, rx_fd;
- ag71xx_open(ag->dev);
+ if (likely(time_before(jiffies, timestamp + HZ/10)))
+ return false;
+
+ if (!netif_carrier_ok(ag->dev))
+ return false;
+
+ rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
+ if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
+ return true;
+
+ tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
+ rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
+ if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
+ ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
+ return true;
+
+ return false;
}
static int ag71xx_tx_packets(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->tx_ring;
+ struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
int sent;
DBG("%s: processing TX ring\n", ag->dev->name);
struct ag71xx_desc *desc = ring->buf[i].desc;
struct sk_buff *skb = ring->buf[i].skb;
- if (!ag71xx_desc_empty(desc))
+ if (!ag71xx_desc_empty(desc)) {
+ if (pdata->is_ar7240 &&
+ ag71xx_check_dma_stuck(ag, ring->buf[i].timestamp))
+ schedule_work(&ag->restart_work);
break;
+ }
ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
goto err_free_dev;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mii_ctrl");
- if (!res) {
- dev_err(&pdev->dev, "no mii_ctrl resource found\n");
- err = -ENXIO;
- goto err_unmap_base;
- }
-
- ag->mii_ctrl = ioremap_nocache(res->start, res->end - res->start + 1);
- if (!ag->mii_ctrl) {
- dev_err(&pdev->dev, "unable to ioremap mii_ctrl\n");
- err = -ENOMEM;
- goto err_unmap_base;
- }
-
dev->irq = platform_get_irq(pdev, 0);
err = request_irq(dev->irq, ag71xx_interrupt,
IRQF_DISABLED,
dev->name, dev);
if (err) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
- goto err_unmap_mii_ctrl;
+ goto err_unmap_base;
}
dev->base_addr = (unsigned long)ag->mac_base;
ag->tx_ring.size = AG71XX_TX_RING_SIZE_DEFAULT;
ag->rx_ring.size = AG71XX_RX_RING_SIZE_DEFAULT;
+ ag->stop_desc = dma_alloc_coherent(NULL,
+ sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);
+
+ if (!ag->stop_desc)
+ goto err_free_irq;
+
+ ag->stop_desc->data = 0;
+ ag->stop_desc->ctrl = 0;
+ ag->stop_desc->next = (u32) ag->stop_desc_dma;
+
memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN);
netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "unable to register net device\n");
- goto err_free_irq;
+ goto err_free_desc;
}
printk(KERN_INFO "%s: Atheros AG71xx at 0x%08lx, irq %d\n",
ag71xx_phy_disconnect(ag);
err_unregister_netdev:
unregister_netdev(dev);
+err_free_desc:
+ dma_free_coherent(NULL, sizeof(struct ag71xx_desc), ag->stop_desc,
+ ag->stop_desc_dma);
err_free_irq:
free_irq(dev->irq, dev);
-err_unmap_mii_ctrl:
- iounmap(ag->mii_ctrl);
err_unmap_base:
iounmap(ag->mac_base);
err_free_dev:
ag71xx_phy_disconnect(ag);
unregister_netdev(dev);
free_irq(dev->irq, dev);
- iounmap(ag->mii_ctrl);
iounmap(ag->mac_base);
kfree(dev);
platform_set_drvdata(pdev, NULL);