ring->descs_cpu, ring->descs_dma);
}
-static int ag71xx_ring_alloc(struct ag71xx_ring *ring, unsigned int size)
+static int ag71xx_ring_alloc(struct ag71xx_ring *ring)
{
int err;
int i;
ring->desc_size = roundup(ring->desc_size, cache_line_size());
}
- ring->descs_cpu = dma_alloc_coherent(NULL, size * ring->desc_size,
+ ring->descs_cpu = dma_alloc_coherent(NULL, ring->size * ring->desc_size,
&ring->descs_dma, GFP_ATOMIC);
if (!ring->descs_cpu) {
err = -ENOMEM;
goto err;
}
- ring->size = size;
- ring->buf = kzalloc(size * sizeof(*ring->buf), GFP_KERNEL);
+ ring->buf = kzalloc(ring->size * sizeof(*ring->buf), GFP_KERNEL);
if (!ring->buf) {
err = -ENOMEM;
goto err;
}
- for (i = 0; i < size; i++) {
+ for (i = 0; i < ring->size; i++) {
int idx = i * ring->desc_size;
ring->buf[i].desc = (struct ag71xx_desc *)&ring->descs_cpu[idx];
DBG("ag71xx: ring %p, desc %d at %p\n",
struct net_device *dev = ag->dev;
while (ring->curr != ring->dirty) {
- u32 i = ring->dirty % AG71XX_TX_RING_SIZE;
+ u32 i = ring->dirty % ring->size;
if (!ag71xx_desc_empty(ring->buf[i].desc)) {
ring->buf[i].desc->ctrl = 0;
struct ag71xx_ring *ring = &ag->tx_ring;
int i;
- for (i = 0; i < AG71XX_TX_RING_SIZE; i++) {
+ for (i = 0; i < ring->size; i++) {
ring->buf[i].desc->next = (u32) (ring->descs_dma +
- ring->desc_size * ((i + 1) % AG71XX_TX_RING_SIZE));
+ ring->desc_size * ((i + 1) % ring->size));
ring->buf[i].desc->ctrl = DESC_EMPTY;
ring->buf[i].skb = NULL;
if (!ring->buf)
return;
- for (i = 0; i < AG71XX_RX_RING_SIZE; i++)
+ for (i = 0; i < ring->size; i++)
if (ring->buf[i].skb) {
dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
AG71XX_RX_PKT_SIZE, DMA_FROM_DEVICE);
int ret;
ret = 0;
- for (i = 0; i < AG71XX_RX_RING_SIZE; i++) {
+ for (i = 0; i < ring->size; i++) {
ring->buf[i].desc->next = (u32) (ring->descs_dma +
- ring->desc_size * ((i + 1) % AG71XX_RX_RING_SIZE));
+ ring->desc_size * ((i + 1) % ring->size));
DBG("ag71xx: RX desc at %p, next is %08x\n",
ring->buf[i].desc,
ring->buf[i].desc->next);
}
- for (i = 0; i < AG71XX_RX_RING_SIZE; i++) {
+ for (i = 0; i < ring->size; i++) {
struct sk_buff *skb;
dma_addr_t dma_addr;
for (; ring->curr - ring->dirty > 0; ring->dirty++) {
unsigned int i;
- i = ring->dirty % AG71XX_RX_RING_SIZE;
+ i = ring->dirty % ring->size;
if (ring->buf[i].skb == NULL) {
dma_addr_t dma_addr;
{
int ret;
- ret = ag71xx_ring_alloc(&ag->tx_ring, AG71XX_TX_RING_SIZE);
+ ret = ag71xx_ring_alloc(&ag->tx_ring);
if (ret)
return ret;
ag71xx_ring_tx_init(ag);
- ret = ag71xx_ring_alloc(&ag->rx_ring, AG71XX_RX_RING_SIZE);
+ ret = ag71xx_ring_alloc(&ag->rx_ring);
if (ret)
return ret;
return "?";
}
+static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+{
+ u32 t;
+
+ t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
+ | (((u32) mac[3]) << 8) | ((u32) mac[2]);
+
+ ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
+
+ t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
+ ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
+}
+
static void ag71xx_dma_reset(struct ag71xx *ag)
{
u32 val;
mdelay(1);
/* clear descriptor addresses */
- ag71xx_wr(ag, AG71XX_REG_TX_DESC, 0);
- ag71xx_wr(ag, AG71XX_REG_RX_DESC, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
/* clear pending RX/TX interrupts */
for (i = 0; i < 256; i++) {
ag71xx_dump_dma_regs(ag);
}
-static void ag71xx_hw_start(struct ag71xx *ag)
-{
- /* start RX engine */
- ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
+#define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
+ MAC_CFG1_SRX | MAC_CFG1_STX)
- /* enable interrupts */
- ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
-}
+#define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
+
+#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
+ FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
+ FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
+ FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
+ FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
+ FIFO_CFG4_VT)
+
+#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
+ FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
+ FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
+ FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
+ FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
+ FIFO_CFG5_17 | FIFO_CFG5_SF)
static void ag71xx_hw_stop(struct ag71xx *ag)
{
- /* disable all interrupts */
+ /* disable all interrupts and stop the rx/tx engine */
ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
+}
+
+static void ag71xx_hw_setup(struct ag71xx *ag)
+{
+ struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+
+ /* setup MAC configuration registers */
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_INIT);
+
+ ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
+ MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
+
+ /* setup max frame length */
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, AG71XX_TX_MTU_LEN);
+
+ /* setup MII interface type */
+ ag71xx_mii_ctrl_set_if(ag, pdata->mii_if);
+
+ /* setup FIFO configuration registers */
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
+ if (pdata->is_ar724x) {
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, pdata->fifo_cfg1);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, pdata->fifo_cfg2);
+ } else {
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
+ }
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
+}
+
+static void ag71xx_hw_init(struct ag71xx *ag)
+{
+ struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+ u32 reset_mask = pdata->reset_bit;
+
+ ag71xx_hw_stop(ag);
+
+ if (pdata->is_ar724x) {
+ u32 reset_phy = reset_mask;
+
+ reset_phy &= RESET_MODULE_GE0_PHY | RESET_MODULE_GE1_PHY;
+ reset_mask &= ~(RESET_MODULE_GE0_PHY | RESET_MODULE_GE1_PHY);
+
+ ar71xx_device_stop(reset_phy);
+ mdelay(50);
+ ar71xx_device_start(reset_phy);
+ mdelay(200);
+ }
+
+ ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
+ udelay(20);
+
+ ar71xx_device_stop(reset_mask);
+ mdelay(100);
+ ar71xx_device_start(reset_mask);
+ mdelay(200);
+
+ ag71xx_hw_setup(ag);
+
+ ag71xx_dma_reset(ag);
+}
+
+static void ag71xx_fast_reset(struct ag71xx *ag)
+{
+ struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+ struct net_device *dev = ag->dev;
+ u32 reset_mask = pdata->reset_bit;
+ u32 rx_ds, tx_ds;
+ u32 mii_reg;
+
+ reset_mask &= RESET_MODULE_GE0_MAC | RESET_MODULE_GE1_MAC;
+
+ mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
+ rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
+ tx_ds = ag71xx_rr(ag, AG71XX_REG_TX_DESC);
+
+ ar71xx_device_stop(reset_mask);
+ udelay(10);
+ ar71xx_device_start(reset_mask);
+ udelay(10);
ag71xx_dma_reset(ag);
+ ag71xx_hw_setup(ag);
+
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, tx_ds);
+ ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
+
+ ag71xx_hw_set_macaddr(ag, dev->dev_addr);
+}
+
+static void ag71xx_hw_start(struct ag71xx *ag)
+{
+ /* start RX engine */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
+
+ /* enable interrupts */
+ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
}
void ag71xx_link_adjust(struct ag71xx *ag)
return;
}
+ if (pdata->is_ar724x)
+ ag71xx_fast_reset(ag);
+
cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
-
ag71xx_hw_start(ag);
netif_carrier_on(ag->dev);
ag71xx_mii_ctrl_rr(ag));
}
-static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
-{
- u32 t;
-
- t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
- | (((u32) mac[3]) << 8) | ((u32) mac[2]);
-
- ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
-
- t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
- ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
-}
-
-#define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
- MAC_CFG1_SRX | MAC_CFG1_STX)
-
-#define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
-
-#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
- FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
- FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
- FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
- FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
- FIFO_CFG4_VT)
-
-#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
- FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
- FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
- FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
- FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
- FIFO_CFG5_17 | FIFO_CFG5_SF)
-
-static void ag71xx_hw_init(struct ag71xx *ag)
-{
- struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
-
- ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
- udelay(20);
-
- ar71xx_device_stop(pdata->reset_bit);
- mdelay(100);
- ar71xx_device_start(pdata->reset_bit);
- mdelay(100);
-
- /* setup MAC configuration registers */
- ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_INIT);
-
- ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
- MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
-
- /* setup max frame length */
- ag71xx_wr(ag, AG71XX_REG_MAC_MFL, AG71XX_TX_MTU_LEN);
-
- /* setup MII interface type */
- ag71xx_mii_ctrl_set_if(ag, pdata->mii_if);
-
- /* setup FIFO configuration registers */
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
- if (pdata->is_ar724x) {
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, pdata->fifo_cfg1);
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, pdata->fifo_cfg2);
- } else {
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
- }
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
-
- ag71xx_dma_reset(ag);
-}
-
static int ag71xx_open(struct net_device *dev)
{
struct ag71xx *ag = netdev_priv(dev);
netif_stop_queue(dev);
ag71xx_hw_stop(ag);
+ ag71xx_dma_reset(ag);
napi_disable(&ag->napi);
del_timer_sync(&ag->oom_timer);
dma_addr_t dma_addr;
int i;
- i = ring->curr % AG71XX_TX_RING_SIZE;
+ i = ring->curr % ring->size;
desc = ring->buf[i].desc;
if (!ag71xx_desc_empty(desc))
wmb();
ring->curr++;
- if (ring->curr == (ring->dirty + AG71XX_TX_THRES_STOP)) {
+ if (ring->curr == (ring->dirty + ring->size)) {
DBG("%s: tx queue full\n", ag->dev->name);
netif_stop_queue(dev);
}
static void ag71xx_restart_work_func(struct work_struct *work)
{
struct ag71xx *ag = container_of(work, struct ag71xx, restart_work);
- struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
ag71xx_stop(ag->dev);
-
- if (pdata->is_ar724x)
- ag71xx_hw_init(ag);
-
ag71xx_open(ag->dev);
}
sent = 0;
while (ring->dirty != ring->curr) {
- unsigned int i = ring->dirty % AG71XX_TX_RING_SIZE;
+ unsigned int i = ring->dirty % ring->size;
struct ag71xx_desc *desc = ring->buf[i].desc;
struct sk_buff *skb = ring->buf[i].skb;
DBG("%s: %d packets sent out\n", ag->dev->name, sent);
- if ((ring->curr - ring->dirty) < AG71XX_TX_THRES_WAKEUP)
+ if ((ring->curr - ring->dirty) < (ring->size * 3) / 4)
netif_wake_queue(ag->dev);
return sent;
dev->name, limit, ring->curr, ring->dirty);
while (done < limit) {
- unsigned int i = ring->curr % AG71XX_RX_RING_SIZE;
+ unsigned int i = ring->curr % ring->size;
struct ag71xx_desc *desc = ring->buf[i].desc;
struct sk_buff *skb;
int pktlen;
if (ag71xx_desc_empty(desc))
break;
- if ((ring->dirty + AG71XX_RX_RING_SIZE) == ring->curr) {
+ if ((ring->dirty + ring->size) == ring->curr) {
ag71xx_assert(0);
break;
}
ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
rx_ring = &ag->rx_ring;
- if (rx_ring->buf[rx_ring->dirty % AG71XX_RX_RING_SIZE].skb == NULL)
+ if (rx_ring->buf[rx_ring->dirty % rx_ring->size].skb == NULL)
goto oom;
status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
ag->oom_timer.data = (unsigned long) dev;
ag->oom_timer.function = ag71xx_oom_timer_handler;
+ ag->tx_ring.size = AG71XX_TX_RING_SIZE_DEFAULT;
+ ag->rx_ring.size = AG71XX_RX_RING_SIZE_DEFAULT;
+
+ ag->stop_desc = dma_alloc_coherent(NULL,
+ sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);
+
+ if (!ag->stop_desc)
+ goto err_free_irq;
+
+ ag->stop_desc->data = 0;
+ ag->stop_desc->ctrl = 0;
+ ag->stop_desc->next = (u32) ag->stop_desc_dma;
+
memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN);
netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "unable to register net device\n");
- goto err_free_irq;
+ goto err_free_desc;
}
printk(KERN_INFO "%s: Atheros AG71xx at 0x%08lx, irq %d\n",
ag71xx_phy_disconnect(ag);
err_unregister_netdev:
unregister_netdev(dev);
+err_free_desc:
+ dma_free_coherent(NULL, sizeof(struct ag71xx_desc), ag->stop_desc,
+ ag->stop_desc_dma);
err_free_irq:
free_irq(dev->irq, dev);
err_unmap_mii_ctrl: