2 * originally drivers/net/tulip/interrupt.c
3 * Copyright 2000,2001 The Linux Kernel Team
4 * Written/copyright 1994-2001 by Donald Becker.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
13 int tulip_refill_rx(struct net_device
*dev
)
15 struct tulip_private
*tp
= netdev_priv(dev
);
19 /* Refill the Rx ring buffers. */
20 for (; tp
->cur_rx
- tp
->dirty_rx
> 0; tp
->dirty_rx
++) {
21 entry
= tp
->dirty_rx
% RX_RING_SIZE
;
22 if (tp
->rx_buffers
[entry
].skb
== NULL
) {
26 skb
= tp
->rx_buffers
[entry
].skb
= dev_alloc_skb(PKT_BUF_SZ
);
30 mapping
= dma_map_single(&dev
->dev
, skb
->data
,
31 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
32 tp
->rx_buffers
[entry
].mapping
= mapping
;
34 skb
->dev
= dev
; /* Mark as being used by this device. */
35 tp
->rx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
38 tp
->rx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
43 void oom_timer(unsigned long data
)
45 struct net_device
*dev
= (struct net_device
*)data
;
46 struct tulip_private
*tp
= netdev_priv(dev
);
47 napi_schedule(&tp
->napi
);
50 int tulip_poll(struct napi_struct
*napi
, int budget
)
52 struct tulip_private
*tp
= container_of(napi
, struct tulip_private
, napi
);
53 struct net_device
*dev
= tp
->dev
;
54 int entry
= tp
->cur_rx
% RX_RING_SIZE
;
58 printk(KERN_DEBUG
" In tulip_rx(), entry %d %08x\n",
59 entry
, tp
->rx_ring
[entry
].status
);
62 if (ioread32(tp
->base_addr
+ CSR5
) == 0xffffffff) {
63 printk(KERN_DEBUG
" In tulip_poll(), hardware disappeared\n");
66 /* Acknowledge current RX interrupt sources. */
67 iowrite32((RxIntr
| RxNoBuf
), tp
->base_addr
+ CSR5
);
70 /* If we own the next entry, it is a new packet. Send it up. */
71 while ( ! (tp
->rx_ring
[entry
].status
& cpu_to_le32(DescOwned
))) {
72 s32 status
= le32_to_cpu(tp
->rx_ring
[entry
].status
);
75 if (tp
->dirty_rx
+ RX_RING_SIZE
== tp
->cur_rx
)
79 printk(KERN_DEBUG
"%s: In tulip_rx(), entry %d %08x\n",
80 dev
->name
, entry
, status
);
82 if (++work_done
>= budget
)
86 * Omit the four octet CRC from the length.
87 * (May not be considered valid until we have
88 * checked status for RxLengthOver2047 bits)
90 pkt_len
= ((status
>> 16) & 0x7ff) - 4;
93 csr6
= ioread32(tp
->base_addr
+ CSR6
);
99 * Maximum pkt_len is 1518 (1514 + vlan header)
100 * Anything higher than this is always invalid
101 * regardless of RxLengthOver2047 bits
104 if ((status
& (RxLengthOver2047
|
106 RxDescCollisionSeen
|
109 RxWholePkt
)) != RxWholePkt
||
111 if ((status
& (RxLengthOver2047
|
112 RxWholePkt
)) != RxWholePkt
) {
113 /* Ingore earlier buffers. */
114 if ((status
& 0xffff) != 0x7fff) {
117 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
119 tp
->stats
.rx_length_errors
++;
122 /* There was a fatal error. */
124 printk(KERN_DEBUG
"%s: Receive error, Rx status %08x\n",
126 tp
->stats
.rx_errors
++; /* end of a packet.*/
127 if (pkt_len
> 1518 ||
128 (status
& RxDescRunt
))
129 tp
->stats
.rx_length_errors
++;
131 if (status
& 0x0004) tp
->stats
.rx_frame_errors
++;
132 if (status
& 0x0002) tp
->stats
.rx_crc_errors
++;
133 if (status
& 0x0001) tp
->stats
.rx_fifo_errors
++;
136 struct sk_buff
*skb
= tp
->rx_buffers
[entry
].skb
;
137 char *temp
= skb_put(skb
, pkt_len
);
143 #ifndef final_version
144 if (tp
->rx_buffers
[entry
].mapping
!=
145 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
)) {
147 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
148 le32_to_cpu(tp
->rx_ring
[entry
].buffer1
),
149 (unsigned long long)tp
->rx_buffers
[entry
].mapping
,
154 tp
->rx_buffers
[entry
].skb
= NULL
;
155 tp
->rx_buffers
[entry
].mapping
= 0;
156 skb
->protocol
= eth_type_trans(skb
, dev
);
158 netif_receive_skb(skb
);
160 tp
->stats
.rx_packets
++;
161 tp
->stats
.rx_bytes
+= pkt_len
;
163 entry
= (++tp
->cur_rx
) % RX_RING_SIZE
;
164 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/4)
165 tulip_refill_rx(dev
);
169 /* New ack strategy... irq does not ack Rx any longer
170 hopefully this helps */
172 /* Really bad things can happen here... If new packet arrives
173 * and an irq arrives (tx or just due to occasionally unset
174 * mask), it will be acked by irq handler, but new thread
175 * is not scheduled. It is major hole in design.
176 * No idea how to fix this if "playing with fire" will fail
177 * tomorrow (night 011029). If it will not fail, we won
178 * finally: amount of IO did not increase at all. */
179 } while ((ioread32(tp
->base_addr
+ CSR5
) & RxIntr
));
181 tulip_refill_rx(dev
);
183 /* If RX ring is not full we are out of memory. */
184 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
187 /* Remove us from polling list and enable RX intr. */
189 iowrite32(VALID_INTR
, tp
->base_addr
+CSR7
);
191 /* The last op happens after poll completion. Which means the following:
192 * 1. it can race with disabling irqs in irq handler
193 * 2. it can race with dise/enabling irqs in other poll threads
194 * 3. if an irq raised after beginning loop, it will be immediately
197 * Summarizing: the logic results in some redundant irqs both
198 * due to races in masking and due to too late acking of already
199 * processed irqs. But it must not result in losing events.
205 if (tp
->cur_rx
- tp
->dirty_rx
> RX_RING_SIZE
/2 ||
206 tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
207 tulip_refill_rx(dev
);
209 if (tp
->rx_buffers
[tp
->dirty_rx
% RX_RING_SIZE
].skb
== NULL
)
214 oom
: /* Executed with RX ints disabled */
216 /* Start timer, stop polling, but do not enable rx interrupts. */
217 mod_timer(&tp
->oom_timer
, jiffies
+1);
219 /* Think: timer_pending() was an explicit signature of bug.
220 * Timer can be pending now but fired and completed
221 * before we did napi_complete(). See? We would lose it. */
223 /* remove ourselves from the polling list */
229 /* The interrupt handler does all of the Rx thread work and cleans up
230 after the Tx thread. */
231 irqreturn_t
tulip_interrupt(int irq
, void *dev_instance
)
233 struct net_device
*dev
= (struct net_device
*)dev_instance
;
234 struct tulip_private
*tp
= netdev_priv(dev
);
235 void __iomem
*ioaddr
= tp
->base_addr
;
241 int maxrx
= RX_RING_SIZE
;
242 int maxtx
= TX_RING_SIZE
;
243 int maxoi
= TX_RING_SIZE
;
245 unsigned int work_count
= 25;
246 unsigned int handled
= 0;
248 /* Let's see whether the interrupt really is for us */
249 csr5
= ioread32(ioaddr
+ CSR5
);
251 if ((csr5
& (NormalIntr
|AbnormalIntr
)) == 0)
252 return IRQ_RETVAL(handled
);
258 if (!rxd
&& (csr5
& (RxIntr
| RxNoBuf
))) {
260 /* Mask RX intrs and add the device to poll list. */
261 iowrite32(VALID_INTR
&~RxPollInt
, ioaddr
+ CSR7
);
262 napi_schedule(&tp
->napi
);
264 if (!(csr5
&~(AbnormalIntr
|NormalIntr
|RxPollInt
|TPLnkPass
)))
268 /* Acknowledge the interrupt sources we handle here ASAP
269 the poll function does Rx and RxNoBuf acking */
271 iowrite32(csr5
& 0x0001ff3f, ioaddr
+ CSR5
);
274 printk(KERN_DEBUG
"%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
275 dev
->name
, csr5
, ioread32(ioaddr
+ CSR5
));
278 if (csr5
& (TxNoBuf
| TxDied
| TxIntr
| TimerInt
)) {
279 unsigned int dirty_tx
;
281 spin_lock(&tp
->lock
);
283 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
285 int entry
= dirty_tx
% TX_RING_SIZE
;
286 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
289 break; /* It still has not been Txed */
291 if (status
& 0x8000) {
292 /* There was an major error, log it. */
293 #ifndef final_version
295 printk(KERN_DEBUG
"%s: Transmit error, Tx status %08x\n",
298 tp
->stats
.tx_errors
++;
299 if (status
& 0x4104) tp
->stats
.tx_aborted_errors
++;
300 if (status
& 0x0C00) tp
->stats
.tx_carrier_errors
++;
301 if (status
& 0x0200) tp
->stats
.tx_window_errors
++;
302 if (status
& 0x0002) tp
->stats
.tx_fifo_errors
++;
303 if (status
& 0x0080) tp
->stats
.tx_heartbeat_errors
++;
305 tp
->stats
.tx_bytes
+=
306 tp
->tx_buffers
[entry
].skb
->len
;
307 tp
->stats
.collisions
+= (status
>> 3) & 15;
308 tp
->stats
.tx_packets
++;
311 dma_unmap_single(&tp
->pdev
->dev
, tp
->tx_buffers
[entry
].mapping
,
312 tp
->tx_buffers
[entry
].skb
->len
, DMA_TO_DEVICE
);
313 /* Free the original skb. */
314 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
315 tp
->tx_buffers
[entry
].skb
= NULL
;
316 tp
->tx_buffers
[entry
].mapping
= 0;
320 #ifndef final_version
321 if (tp
->cur_tx
- dirty_tx
> TX_RING_SIZE
) {
323 "Out-of-sync dirty pointer, %d vs. %d\n",
324 dirty_tx
, tp
->cur_tx
);
325 dirty_tx
+= TX_RING_SIZE
;
329 if (tp
->cur_tx
- dirty_tx
< TX_RING_SIZE
- 2)
330 netif_wake_queue(dev
);
332 tp
->dirty_tx
= dirty_tx
;
336 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
337 csr5
, ioread32(ioaddr
+ CSR6
),
339 tulip_restart_rxtx(tp
);
341 spin_unlock(&tp
->lock
);
345 if (csr5
& AbnormalIntr
) { /* Abnormal error summary bit. */
346 if (csr5
== 0xffffffff)
348 if (csr5
& TxJabber
) tp
->stats
.tx_errors
++;
349 if (csr5
& TxFIFOUnderflow
) {
350 if ((tp
->csr6
& 0xC000) != 0xC000)
351 tp
->csr6
+= 0x4000; /* Bump up the Tx threshold */
353 tp
->csr6
|= 0x00200000; /* Store-n-forward. */
354 /* Restart the transmit process. */
355 tulip_restart_rxtx(tp
);
356 iowrite32(0, ioaddr
+ CSR1
);
358 if (csr5
& (RxDied
| RxNoBuf
)) {
359 iowrite32(tp
->mc_filter
[0], ioaddr
+ CSR27
);
360 iowrite32(tp
->mc_filter
[1], ioaddr
+ CSR28
);
362 if (csr5
& RxDied
) { /* Missed a Rx frame. */
363 tp
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
364 tp
->stats
.rx_errors
++;
365 tulip_start_rxtx(tp
);
368 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
369 * call is ever done under the spinlock
371 if (csr5
& (TPLnkPass
| TPLnkFail
| 0x08000000)) {
373 (tp
->link_change
)(dev
, csr5
);
375 if (csr5
& SystemError
) {
376 int error
= (csr5
>> 23) & 7;
377 /* oops, we hit a PCI error. The code produced corresponds
382 * Note that on parity error, we should do a software reset
383 * of the chip to get it back into a sane state (according
384 * to the 21142/3 docs that is).
388 "(%lu) System Error occurred (%d)\n",
391 /* Clear all error sources, included undocumented ones! */
392 iowrite32(0x0800f7ba, ioaddr
+ CSR5
);
395 if (csr5
& TimerInt
) {
399 "Re-enabling interrupts, %08x\n",
401 iowrite32(VALID_INTR
, ioaddr
+ CSR7
);
404 if (tx
> maxtx
|| rx
> maxrx
|| oi
> maxoi
) {
406 dev_warn(&dev
->dev
, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
407 csr5
, tp
->nir
, tx
, rx
, oi
);
409 /* Acknowledge all interrupt sources. */
410 iowrite32(0x8001ffff, ioaddr
+ CSR5
);
411 /* Mask all interrupting sources, set timer to
413 iowrite32(((~csr5
) & 0x0001ebef) | AbnormalIntr
| TimerInt
, ioaddr
+ CSR7
);
414 iowrite32(0x0012, ioaddr
+ CSR11
);
422 csr5
= ioread32(ioaddr
+ CSR5
);
426 } while ((csr5
& (TxNoBuf
|
435 SystemError
)) != 0);
437 if ((missed
= ioread32(ioaddr
+ CSR8
) & 0x1ffff)) {
438 tp
->stats
.rx_dropped
+= missed
& 0x10000 ? 0x10000 : missed
;
442 printk(KERN_DEBUG
"%s: exiting interrupt, csr5=%#04x\n",
443 dev
->name
, ioread32(ioaddr
+ CSR5
));
This page took 0.067334 seconds and 5 git commands to generate.