1 This function needs an early exit condition to function properly, or
2 else caller assumes napi workload wasn't enough to handle all received
3 packets and korina_rx is called again (and again and again and ...).
5 Signed-off-by: Phil Sutter <n0-1@freewrt.org>
7 --- a/drivers/net/korina.c
8 +++ b/drivers/net/korina.c
9 @@ -353,15 +353,20 @@ static int korina_rx(struct net_device *
10 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
11 struct sk_buff *skb, *skb_new;
13 - u32 devcs, pkt_len, dmas, rx_free_desc;
14 + u32 devcs, pkt_len, dmas;
17 dma_cache_inv((u32)rd, sizeof(*rd));
19 for (count = 0; count < limit; count++) {
20 + skb = lp->rx_skb[lp->rx_next_done];
25 + if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
28 /* Update statistics counters */
29 if (devcs & ETH_RX_CRC)
30 dev->stats.rx_crc_errors++;
31 @@ -384,64 +389,53 @@ static int korina_rx(struct net_device *
32 * in Rc32434 (errata ref #077) */
33 dev->stats.rx_errors++;
34 dev->stats.rx_dropped++;
37 - while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
38 - /* init the var. used for the later
39 - * operations within the while loop */
41 + } else if ((devcs & ETH_RX_ROK)) {
42 pkt_len = RCVPKT_LENGTH(devcs);
43 - skb = lp->rx_skb[lp->rx_next_done];
44 + /* must be the (first and) last
45 + * descriptor then */
46 + pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
48 + /* invalidate the cache */
49 + dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
51 + /* Malloc up new buffer. */
52 + skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
56 + /* Do not count the CRC */
57 + skb_put(skb, pkt_len - 4);
58 + skb->protocol = eth_type_trans(skb, dev);
60 + /* Pass the packet to upper layers */
61 + netif_receive_skb(skb);
62 + dev->stats.rx_packets++;
63 + dev->stats.rx_bytes += pkt_len;
65 + /* Update the mcast stats */
66 + if (devcs & ETH_RX_MP)
67 + dev->stats.multicast++;
69 - if ((devcs & ETH_RX_ROK)) {
70 - /* must be the (first and) last
71 - * descriptor then */
72 - pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
74 - /* invalidate the cache */
75 - dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
77 - /* Malloc up new buffer. */
78 - skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
82 - /* Do not count the CRC */
83 - skb_put(skb, pkt_len - 4);
84 - skb->protocol = eth_type_trans(skb, dev);
86 - /* Pass the packet to upper layers */
87 - netif_receive_skb(skb);
88 - dev->last_rx = jiffies;
89 - dev->stats.rx_packets++;
90 - dev->stats.rx_bytes += pkt_len;
92 - /* Update the mcast stats */
93 - if (devcs & ETH_RX_MP)
94 - dev->stats.multicast++;
96 - lp->rx_skb[lp->rx_next_done] = skb_new;
101 - /* Restore descriptor's curr_addr */
103 - rd->ca = CPHYSADDR(skb_new->data);
105 - rd->ca = CPHYSADDR(skb->data);
106 + lp->rx_skb[lp->rx_next_done] = skb_new;
110 + /* Restore descriptor's curr_addr */
112 + rd->ca = CPHYSADDR(skb_new->data);
114 + rd->ca = CPHYSADDR(skb->data);
116 - rd->control = DMA_COUNT(KORINA_RBSIZE) |
117 + rd->control = DMA_COUNT(KORINA_RBSIZE) |
118 DMA_DESC_COD | DMA_DESC_IOD;
119 - lp->rd_ring[(lp->rx_next_done - 1) &
120 - KORINA_RDS_MASK].control &=
123 - lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
124 - dma_cache_wback((u32)rd, sizeof(*rd));
125 - rd = &lp->rd_ring[lp->rx_next_done];
126 - writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
128 + lp->rd_ring[(lp->rx_next_done - 1) &
129 + KORINA_RDS_MASK].control &=
132 + lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
133 + dma_cache_wback((u32)rd, sizeof(*rd));
134 + rd = &lp->rd_ring[lp->rx_next_done];
135 + writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
138 dmas = readl(&lp->rx_dma_regs->dmas);