}
}
+static int ag71xx_rx_reserve(struct ag71xx *ag)
+{
+ int reserve = 0;
+
+ if (ag71xx_get_pdata(ag)->is_ar724x) {
+ if (!ag71xx_has_ar8216(ag))
+ reserve = 2;
+
+ if (ag->phy_dev)
+ reserve += 4 - (ag->phy_dev->pkt_align % 4);
+
+ reserve %= 4;
+ }
+
+ return reserve + AG71XX_RX_PKT_RESERVE;
+}
+
+
static int ag71xx_ring_rx_init(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
+ unsigned int reserve = ag71xx_rx_reserve(ag);
unsigned int i;
int ret;
struct sk_buff *skb;
dma_addr_t dma_addr;
- skb = dev_alloc_skb(AG71XX_RX_PKT_SIZE + AG71XX_RX_PKT_RESERVE);
+ skb = dev_alloc_skb(AG71XX_RX_PKT_SIZE + reserve);
if (!skb) {
ret = -ENOMEM;
break;
}
skb->dev = ag->dev;
- skb_reserve(skb, AG71XX_RX_PKT_RESERVE);
+ skb_reserve(skb, reserve);
dma_addr = dma_map_single(&ag->dev->dev, skb->data,
AG71XX_RX_PKT_SIZE,
static int ag71xx_ring_rx_refill(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
+ unsigned int reserve = ag71xx_rx_reserve(ag);
unsigned int count;
count = 0;
dma_addr_t dma_addr;
struct sk_buff *skb;
- skb = dev_alloc_skb(AG71XX_RX_PKT_SIZE +
- AG71XX_RX_PKT_RESERVE);
+ skb = dev_alloc_skb(AG71XX_RX_PKT_SIZE + reserve);
if (skb == NULL)
break;
- skb_reserve(skb, AG71XX_RX_PKT_RESERVE);
+ skb_reserve(skb, reserve);
skb->dev = ag->dev;
dma_addr = dma_map_single(&ag->dev->dev, skb->data,
ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
+ /*
+ * give the hardware some time to really stop all rx/tx activity
+ * clearing the descriptors too early causes random memory corruption
+ */
+ mdelay(1);
+
/* clear descriptor addresses */
ag71xx_wr(ag, AG71XX_REG_TX_DESC, 0);
ag71xx_wr(ag, AG71XX_REG_RX_DESC, 0);
static int ag71xx_open(struct net_device *dev)
{
struct ag71xx *ag = netdev_priv(dev);
+ struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
int ret;
ret = ag71xx_rings_init(ag);
if (ret)
goto err;
+ if (pdata->is_ar724x)
+ ag71xx_hw_init(ag);
+
napi_enable(&ag->napi);
netif_carrier_off(dev);
return sent;
}
-static int ag71xx_rx_copy_skb(struct ag71xx *ag, struct sk_buff **pskb,
- int pktlen)
-{
- struct sk_buff *copy_skb;
-
- copy_skb = netdev_alloc_skb(ag->dev, pktlen + NET_IP_ALIGN);
- if (!copy_skb)
- return -ENOMEM;
-
- skb_reserve(copy_skb, NET_IP_ALIGN);
- skb_copy_from_linear_data(*pskb, copy_skb->data, pktlen);
- skb_put(copy_skb, pktlen);
-
- dev_kfree_skb_any(*pskb);
- *pskb = copy_skb;
-
- return 0;
-}
-
static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
{
struct net_device *dev = ag->dev;
dev->stats.rx_packets++;
dev->stats.rx_bytes += pktlen;
+ skb_put(skb, pktlen);
if (ag71xx_has_ar8216(ag))
err = ag71xx_remove_ar8216_header(ag, skb, pktlen);
- else
- err = ag71xx_rx_copy_skb(ag, &skb, pktlen);
if (err) {
dev->stats.rx_dropped++;
} else {
skb->dev = dev;
skb->ip_summed = CHECKSUM_NONE;
- skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
+ if (ag->phy_dev) {
+ ag->phy_dev->netif_receive_skb(skb);
+ } else {
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
}
ring->buf[i].skb = NULL;