#include <linux/etherdevice.h>
#include <linux/platform_device.h>
-#include <eth.h>
+#include <ramips_eth_platform.h>
+#include "ramips_eth.h"
#define TX_TIMEOUT (20 * HZ / 100)
-#define MAX_RX_LENGTH 1500
+#define MAX_RX_LENGTH 1600
#ifdef CONFIG_RALINK_RT305X
#include "ramips_esw.c"
if (priv->rx_skb[i])
dev_kfree_skb_any(priv->rx_skb[i]);
- dma_free_coherent(NULL, NUM_RX_DESC * sizeof(struct ramips_rx_dma),
- priv->rx, priv->phy_rx);
+ if (priv->rx)
+ dma_free_coherent(NULL,
+ NUM_RX_DESC * sizeof(struct ramips_rx_dma),
+ priv->rx, priv->phy_rx);
- dma_free_coherent(NULL, NUM_TX_DESC * sizeof(struct ramips_tx_dma),
- priv->tx, priv->phy_tx);
+ if (priv->tx)
+ dma_free_coherent(NULL,
+ NUM_TX_DESC * sizeof(struct ramips_tx_dma),
+ priv->tx, priv->phy_tx);
}
static int
ramips_alloc_dma(struct net_device *dev)
{
struct raeth_priv *priv = netdev_priv(dev);
+ int err = -ENOMEM;
int i;
priv->skb_free_idx = 0;
/* setup tx ring */
priv->tx = dma_alloc_coherent(NULL,
NUM_TX_DESC * sizeof(struct ramips_tx_dma), &priv->phy_tx, GFP_ATOMIC);
+ if (!priv->tx)
+ goto err_cleanup;
+
for(i = 0; i < NUM_TX_DESC; i++)
{
memset(&priv->tx[i], 0, sizeof(struct ramips_tx_dma));
/* setup rx ring */
priv->rx = dma_alloc_coherent(NULL,
NUM_RX_DESC * sizeof(struct ramips_rx_dma), &priv->phy_rx, GFP_ATOMIC);
+ if (!priv->rx)
+ goto err_cleanup;
+
memset(priv->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
for(i = 0; i < NUM_RX_DESC; i++)
{
struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);
- BUG_ON(!new_skb);
+
+ if (!new_skb)
+ goto err_cleanup;
+
skb_reserve(new_skb, 2);
priv->rx[i].rxd1 =
dma_map_single(NULL, skb_put(new_skb, 2), MAX_RX_LENGTH + 2,
}
return 0;
+
+ err_cleanup:
+ ramips_cleanup_dma(dev);
+ return err;
}
static void
unsigned long tx;
unsigned int tx_next;
unsigned int mapped_addr;
+ unsigned long flags;
+
if(priv->plat->min_pkt_len)
{
if(skb->len < priv->plat->min_pkt_len)
mapped_addr = (unsigned int)dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE);
dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
+ spin_lock_irqsave(&priv->page_lock, flags);
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
if(tx == NUM_TX_DESC - 1)
tx_next = 0;
else
tx_next = tx + 1;
- if((priv->tx_skb[tx]== 0) && (priv->tx_skb[tx_next] == 0))
- {
- if(!(priv->tx[tx].txd2 & TX_DMA_DONE))
- {
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- printk(KERN_ERR "%s: dropping\n", dev->name);
- return 0;
- }
- priv->tx[tx].txd1 = virt_to_phys(skb->data);
- priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
- priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
- ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- priv->tx_skb[tx] = skb;
- ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
- } else {
- dev->stats.tx_dropped++;
- kfree_skb(skb);
- }
- return 0;
+ if((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
+ !(priv->tx[tx].txd2 & TX_DMA_DONE) || !(priv->tx[tx_next].txd2 & TX_DMA_DONE))
+ goto out;
+ priv->tx[tx].txd1 = mapped_addr;
+ priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
+ priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ priv->tx_skb[tx] = skb;
+ wmb();
+ ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
+ spin_unlock_irqrestore(&priv->page_lock, flags);
+ return NETDEV_TX_OK;
+out:
+ spin_unlock_irqrestore(&priv->page_lock, flags);
+ dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
static void
rx_skb = priv->rx_skb[rx];
rx_skb->len = RX_DMA_PLEN0(priv->rx[rx].rxd2);
- rx_skb->tail = rx_skb->data + rx_skb->len;
rx_skb->dev = dev;
rx_skb->protocol = eth_type_trans(rx_skb, dev);
rx_skb->ip_summed = CHECKSUM_NONE;
dev->stats.rx_bytes += rx_skb->len;
netif_rx(rx_skb);
- new_skb = __dev_alloc_skb(MAX_RX_LENGTH + 2, GFP_DMA | GFP_ATOMIC);
+ new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + 2);
priv->rx_skb[rx] = new_skb;
BUG_ON(!new_skb);
skb_reserve(new_skb, 2);
dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH + 2,
DMA_FROM_DEVICE);
priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
+ wmb();
ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
}
if(max_rx == 0)
ramips_eth_open(struct net_device *dev)
{
struct raeth_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
+ dev->name, dev);
+ if (err)
+ return err;
+
+ err = ramips_alloc_dma(dev);
+ if (err)
+ goto err_free_irq;
- ramips_alloc_dma(dev);
ramips_setup_dma(dev);
ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
((rt305x_sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
RAMIPS_FE_GLO_CFG);
- request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED, dev->name, dev);
tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
(unsigned long)dev);
tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
ramips_fe_wr(0, RAMIPS_FE_RST_GL);
netif_start_queue(dev);
return 0;
+
+ err_free_irq:
+ free_irq(dev->irq, dev);
+ return err;
}
static int
dev->stop = ramips_eth_stop;
dev->hard_start_xmit = ramips_eth_hard_start_xmit;
dev->set_mac_address = ramips_eth_set_mac_addr;
- dev->mtu = MAX_RX_LENGTH;
+ dev->mtu = 1500;
dev->tx_timeout = ramips_eth_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
+ spin_lock_init(&priv->page_lock);
return 0;
}