ring->descs_cpu, ring->descs_dma);
}
-static int ag71xx_ring_alloc(struct ag71xx_ring *ring, unsigned int size)
+static int ag71xx_ring_alloc(struct ag71xx_ring *ring)
{
int err;
int i;
ring->desc_size = roundup(ring->desc_size, cache_line_size());
}
- ring->descs_cpu = dma_alloc_coherent(NULL, size * ring->desc_size,
+ ring->descs_cpu = dma_alloc_coherent(NULL, ring->size * ring->desc_size,
&ring->descs_dma, GFP_ATOMIC);
if (!ring->descs_cpu) {
err = -ENOMEM;
goto err;
}
- ring->size = size;
- ring->buf = kzalloc(size * sizeof(*ring->buf), GFP_KERNEL);
+ ring->buf = kzalloc(ring->size * sizeof(*ring->buf), GFP_KERNEL);
if (!ring->buf) {
err = -ENOMEM;
goto err;
}
- for (i = 0; i < size; i++) {
+ for (i = 0; i < ring->size; i++) {
int idx = i * ring->desc_size;
ring->buf[i].desc = (struct ag71xx_desc *)&ring->descs_cpu[idx];
DBG("ag71xx: ring %p, desc %d at %p\n",
struct net_device *dev = ag->dev;
while (ring->curr != ring->dirty) {
- u32 i = ring->dirty % AG71XX_TX_RING_SIZE;
+ u32 i = ring->dirty % ring->size;
if (!ag71xx_desc_empty(ring->buf[i].desc)) {
ring->buf[i].desc->ctrl = 0;
struct ag71xx_ring *ring = &ag->tx_ring;
int i;
- for (i = 0; i < AG71XX_TX_RING_SIZE; i++) {
+ for (i = 0; i < ring->size; i++) {
ring->buf[i].desc->next = (u32) (ring->descs_dma +
- ring->desc_size * ((i + 1) % AG71XX_TX_RING_SIZE));
+ ring->desc_size * ((i + 1) % ring->size));
ring->buf[i].desc->ctrl = DESC_EMPTY;
ring->buf[i].skb = NULL;
if (!ring->buf)
return;
- for (i = 0; i < AG71XX_RX_RING_SIZE; i++)
+ for (i = 0; i < ring->size; i++)
if (ring->buf[i].skb) {
dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
AG71XX_RX_PKT_SIZE, DMA_FROM_DEVICE);
int ret;
ret = 0;
- for (i = 0; i < AG71XX_RX_RING_SIZE; i++) {
+ for (i = 0; i < ring->size; i++) {
ring->buf[i].desc->next = (u32) (ring->descs_dma +
- ring->desc_size * ((i + 1) % AG71XX_RX_RING_SIZE));
+ ring->desc_size * ((i + 1) % ring->size));
DBG("ag71xx: RX desc at %p, next is %08x\n",
ring->buf[i].desc,
ring->buf[i].desc->next);
}
- for (i = 0; i < AG71XX_RX_RING_SIZE; i++) {
+ for (i = 0; i < ring->size; i++) {
struct sk_buff *skb;
dma_addr_t dma_addr;
for (; ring->curr - ring->dirty > 0; ring->dirty++) {
unsigned int i;
- i = ring->dirty % AG71XX_RX_RING_SIZE;
+ i = ring->dirty % ring->size;
if (ring->buf[i].skb == NULL) {
dma_addr_t dma_addr;
{
int ret;
- ret = ag71xx_ring_alloc(&ag->tx_ring, AG71XX_TX_RING_SIZE);
+ ret = ag71xx_ring_alloc(&ag->tx_ring);
if (ret)
return ret;
ag71xx_ring_tx_init(ag);
- ret = ag71xx_ring_alloc(&ag->rx_ring, AG71XX_RX_RING_SIZE);
+ ret = ag71xx_ring_alloc(&ag->rx_ring);
if (ret)
return ret;
return "?";
}
-static void ag71xx_dma_reset(struct ag71xx *ag)
-{
- u32 val;
- int i;
-
- ag71xx_dump_dma_regs(ag);
-
- /* stop RX and TX */
- ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
- ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
-
- /*
- * give the hardware some time to really stop all rx/tx activity
- * clearing the descriptors too early causes random memory corruption
- */
- mdelay(1);
-
- /* clear descriptor addresses */
- ag71xx_wr(ag, AG71XX_REG_TX_DESC, 0);
- ag71xx_wr(ag, AG71XX_REG_RX_DESC, 0);
-
- /* clear pending RX/TX interrupts */
- for (i = 0; i < 256; i++) {
- ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
- ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
- }
-
- /* clear pending errors */
- ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
- ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
-
- val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
- if (val)
- printk(KERN_ALERT "%s: unable to clear DMA Rx status: %08x\n",
- ag->dev->name, val);
-
- val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
-
- /* mask out reserved bits */
- val &= ~0xff000000;
-
- if (val)
- printk(KERN_ALERT "%s: unable to clear DMA Tx status: %08x\n",
- ag->dev->name, val);
-
- ag71xx_dump_dma_regs(ag);
-}
-
-static void ag71xx_hw_start(struct ag71xx *ag)
-{
- /* start RX engine */
- ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
-
- /* enable interrupts */
- ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
-}
-
-static void ag71xx_hw_stop(struct ag71xx *ag)
-{
- /* disable all interrupts */
- ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
-
- ag71xx_dma_reset(ag);
-}
-
void ag71xx_link_adjust(struct ag71xx *ag)
{
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
u32 mii_speed;
if (!ag->link) {
- ag71xx_hw_stop(ag);
netif_carrier_off(ag->dev);
if (netif_msg_link(ag))
printk(KERN_INFO "%s: link down\n", ag->dev->name);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
- ag71xx_hw_start(ag);
-
netif_carrier_on(ag->dev);
if (netif_msg_link(ag))
printk(KERN_INFO "%s: link up (%sMbps/%s duplex)\n",
ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
}
+static void ag71xx_dma_reset(struct ag71xx *ag)
+{
+ u32 val;
+ int i;
+
+ ag71xx_dump_dma_regs(ag);
+
+ /* stop RX and TX */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
+
+ /*
+ * give the hardware some time to really stop all rx/tx activity
+ * clearing the descriptors too early causes random memory corruption
+ */
+ mdelay(1);
+
+ /* clear descriptor addresses */
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, 0);
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, 0);
+
+ /* clear pending RX/TX interrupts */
+ for (i = 0; i < 256; i++) {
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
+ }
+
+ /* clear pending errors */
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
+
+ val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
+ if (val)
+ printk(KERN_ALERT "%s: unable to clear DMA Rx status: %08x\n",
+ ag->dev->name, val);
+
+ val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
+
+ /* mask out reserved bits */
+ val &= ~0xff000000;
+
+ if (val)
+ printk(KERN_ALERT "%s: unable to clear DMA Tx status: %08x\n",
+ ag->dev->name, val);
+
+ ag71xx_dump_dma_regs(ag);
+}
+
#define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
MAC_CFG1_SRX | MAC_CFG1_STX)
ag71xx_dma_reset(ag);
}
+static void ag71xx_hw_start(struct ag71xx *ag)
+{
+ /* start RX engine */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
+
+ /* enable interrupts */
+ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
+}
+
+static void ag71xx_hw_stop(struct ag71xx *ag)
+{
+ /* disable all interrupts */
+ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
+
+ ag71xx_dma_reset(ag);
+}
+
static int ag71xx_open(struct net_device *dev)
{
struct ag71xx *ag = netdev_priv(dev);
ag71xx_hw_set_macaddr(ag, dev->dev_addr);
+ ag71xx_hw_start(ag);
+
netif_start_queue(dev);
return 0;
dma_addr_t dma_addr;
int i;
- i = ring->curr % AG71XX_TX_RING_SIZE;
+ i = ring->curr % ring->size;
desc = ring->buf[i].desc;
if (!ag71xx_desc_empty(desc))
wmb();
ring->curr++;
- if (ring->curr == (ring->dirty + AG71XX_TX_THRES_STOP)) {
+ if (ring->curr == (ring->dirty + ring->size)) {
DBG("%s: tx queue full\n", ag->dev->name);
netif_stop_queue(dev);
}
sent = 0;
while (ring->dirty != ring->curr) {
- unsigned int i = ring->dirty % AG71XX_TX_RING_SIZE;
+ unsigned int i = ring->dirty % ring->size;
struct ag71xx_desc *desc = ring->buf[i].desc;
struct sk_buff *skb = ring->buf[i].skb;
DBG("%s: %d packets sent out\n", ag->dev->name, sent);
- if ((ring->curr - ring->dirty) < AG71XX_TX_THRES_WAKEUP)
+ if ((ring->curr - ring->dirty) < (ring->size * 3) / 4)
netif_wake_queue(ag->dev);
return sent;
dev->name, limit, ring->curr, ring->dirty);
while (done < limit) {
- unsigned int i = ring->curr % AG71XX_RX_RING_SIZE;
+ unsigned int i = ring->curr % ring->size;
struct ag71xx_desc *desc = ring->buf[i].desc;
struct sk_buff *skb;
int pktlen;
if (ag71xx_desc_empty(desc))
break;
- if ((ring->dirty + AG71XX_RX_RING_SIZE) == ring->curr) {
+ if ((ring->dirty + ring->size) == ring->curr) {
ag71xx_assert(0);
break;
}
ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
rx_ring = &ag->rx_ring;
- if (rx_ring->buf[rx_ring->dirty % AG71XX_RX_RING_SIZE].skb == NULL)
+ if (rx_ring->buf[rx_ring->dirty % rx_ring->size].skb == NULL)
goto oom;
status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
ag->oom_timer.data = (unsigned long) dev;
ag->oom_timer.function = ag71xx_oom_timer_handler;
+ ag->tx_ring.size = AG71XX_TX_RING_SIZE_DEFAULT;
+ ag->rx_ring.size = AG71XX_RX_RING_SIZE_DEFAULT;
+
memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN);
netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);