[adm5120] fix RB-133/133C led definitions
[openwrt.git] / target / linux / rb532 / files-2.6.23 / drivers / net / korina.c
1 /**************************************************************************
2 *
3 * BRIEF MODULE DESCRIPTION
4 * Driver for the IDT RC32434 on-chip ethernet controller.
5 *
6 * Copyright 2004 IDT Inc. (rischelp@idt.com)
7 * Copyright 2006 Felix Fietkau <nbd@openwrt.org>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
20 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 *
30 **************************************************************************
31 * May 2004 rkt, neb
32 *
33 * Based on the driver developed by B. Maruthanayakam, H. Kou and others.
34 *
35 * Aug 2004 Sadik
36 *
37 * Added NAPI
38 *
39 **************************************************************************
40 */
41
42 #include <linux/autoconf.h>
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/moduleparam.h>
46 #include <linux/sched.h>
47 #include <linux/ctype.h>
48 #include <linux/types.h>
49 #include <linux/fcntl.h>
50 #include <linux/interrupt.h>
51 #include <linux/ptrace.h>
52 #include <linux/init.h>
53 #include <linux/ioport.h>
54 #include <linux/proc_fs.h>
55 #include <linux/in.h>
56 #include <linux/slab.h>
57 #include <linux/string.h>
58 #include <linux/delay.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/skbuff.h>
62 #include <linux/errno.h>
63 #include <linux/platform_device.h>
64 #include <asm/bootinfo.h>
65 #include <asm/system.h>
66 #include <asm/bitops.h>
67 #include <asm/pgtable.h>
68 #include <asm/segment.h>
69 #include <asm/io.h>
70 #include <asm/dma.h>
71
72 #include <asm/rc32434/rb.h>
73 #include "rc32434_eth.h"
74
75 #define DRIVER_VERSION "(mar2904)"
76
77 #define DRIVER_NAME "rc32434 Ethernet driver. " DRIVER_VERSION
78
79 #define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
80 ((dev)->dev_addr[1]))
81 #define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \
82 ((dev)->dev_addr[3] << 16) | \
83 ((dev)->dev_addr[4] << 8) | \
84 ((dev)->dev_addr[5]))
85
86 #define MII_CLOCK 1250000 /* no more than 2.5MHz */
87 #define CONFIG_IDT_USE_NAPI 1
88
89
90 static inline void rc32434_abort_tx(struct net_device *dev)
91 {
92 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
93 rc32434_abort_dma(dev, lp->tx_dma_regs);
94
95 }
96
97 static inline void rc32434_abort_rx(struct net_device *dev)
98 {
99 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
100 rc32434_abort_dma(dev, lp->rx_dma_regs);
101
102 }
103
104 static inline void rc32434_start_tx(struct rc32434_local *lp, volatile DMAD_t td)
105 {
106 rc32434_start_dma(lp->tx_dma_regs, CPHYSADDR(td));
107 }
108
109 static inline void rc32434_start_rx(struct rc32434_local *lp, volatile DMAD_t rd)
110 {
111 rc32434_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
112 }
113
114 static inline void rc32434_chain_tx(struct rc32434_local *lp, volatile DMAD_t td)
115 {
116 rc32434_chain_dma(lp->tx_dma_regs, CPHYSADDR(td));
117 }
118
119 static inline void rc32434_chain_rx(struct rc32434_local *lp, volatile DMAD_t rd)
120 {
121 rc32434_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
122 }
123
124 #ifdef RC32434_PROC_DEBUG
125 static int rc32434_read_proc(char *buf, char **start, off_t fpos,
126 int length, int *eof, void *data)
127 {
128 struct net_device *dev = (struct net_device *)data;
129 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
130 int len = 0;
131
132 /* print out header */
133 len += sprintf(buf + len, "\n\tKorina Ethernet Debug\n\n");
134 len += sprintf (buf + len,
135 "DMA halt count = %10d, DMA run count = %10d\n",
136 lp->dma_halt_cnt, lp->dma_run_cnt);
137
138 if (fpos >= len) {
139 *start = buf;
140 *eof = 1;
141 return 0;
142 }
143 *start = buf + fpos;
144
145 if ((len -= fpos) > length)
146 return length;
147 *eof = 1;
148
149 return len;
150
151 }
152 #endif
153
154
155 /*
156 * Restart the RC32434 ethernet controller.
157 */
158 static int rc32434_restart(struct net_device *dev)
159 {
160 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
161
162 /*
163 * Disable interrupts
164 */
165 disable_irq(lp->rx_irq);
166 disable_irq(lp->tx_irq);
167 #ifdef RC32434_REVISION
168 disable_irq(lp->ovr_irq);
169 #endif
170 disable_irq(lp->und_irq);
171
172 /* Mask F E bit in Tx DMA */
173 __raw_writel(__raw_readl(&lp->tx_dma_regs->dmasm) | DMASM_f_m | DMASM_e_m, &lp->tx_dma_regs->dmasm);
174 /* Mask D H E bit in Rx DMA */
175 __raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) | DMASM_d_m | DMASM_h_m | DMASM_e_m, &lp->rx_dma_regs->dmasm);
176
177 rc32434_init(dev);
178 rc32434_multicast_list(dev);
179
180 enable_irq(lp->und_irq);
181 #ifdef RC32434_REVISION
182 enable_irq(lp->ovr_irq);
183 #endif
184 enable_irq(lp->tx_irq);
185 enable_irq(lp->rx_irq);
186
187 return 0;
188 }
189
190 static int rc32434_probe(struct platform_device *pdev)
191 {
192 struct korina_device *bif = (struct korina_device *) pdev->dev.platform_data;
193 struct rc32434_local *lp = NULL;
194 struct net_device *dev = NULL;
195 struct resource *r;
196 int i, retval,err;
197
198 dev = alloc_etherdev(sizeof(struct rc32434_local));
199 if(!dev) {
200 ERR("Korina_eth: alloc_etherdev failed\n");
201 return -1;
202 }
203
204 platform_set_drvdata(pdev, dev);
205 SET_MODULE_OWNER(dev);
206 bif->dev = dev;
207
208 memcpy(dev->dev_addr, bif->mac, 6);
209
210 /* Initialize the device structure. */
211 if (dev->priv == NULL) {
212 lp = (struct rc32434_local *)kmalloc(sizeof(*lp), GFP_KERNEL);
213 memset(lp, 0, sizeof(struct rc32434_local));
214 }
215 else {
216 lp = (struct rc32434_local *)dev->priv;
217 }
218
219 lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
220 lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
221 lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
222 lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
223
224 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
225 dev->base_addr = r->start;
226 lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
227 if (!lp->eth_regs) {
228 ERR("Can't remap eth registers\n");
229 retval = -ENXIO;
230 goto probe_err_out;
231 }
232
233 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
234 lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
235 if (!lp->rx_dma_regs) {
236 ERR("Can't remap Rx DMA registers\n");
237 retval = -ENXIO;
238 goto probe_err_out;
239 }
240
241 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
242 lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
243 if (!lp->tx_dma_regs) {
244 ERR("Can't remap Tx DMA registers\n");
245 retval = -ENXIO;
246 goto probe_err_out;
247 }
248
249 #ifdef RC32434_PROC_DEBUG
250 lp->ps = create_proc_read_entry (bif->name, 0, proc_net,
251 rc32434_read_proc, dev);
252 #endif
253
254 lp->td_ring = (DMAD_t)kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
255 if (!lp->td_ring) {
256 ERR("Can't allocate descriptors\n");
257 retval = -ENOMEM;
258 goto probe_err_out;
259 }
260
261 dma_cache_inv((unsigned long)(lp->td_ring), TD_RING_SIZE + RD_RING_SIZE);
262
263 /* now convert TD_RING pointer to KSEG1 */
264 lp->td_ring = (DMAD_t )KSEG1ADDR(lp->td_ring);
265 lp->rd_ring = &lp->td_ring[RC32434_NUM_TDS];
266
267
268 spin_lock_init(&lp->lock);
269
270 /* just use the rx dma irq */
271 dev->irq = lp->rx_irq;
272
273 dev->priv = lp;
274
275 dev->open = rc32434_open;
276 dev->stop = rc32434_close;
277 dev->hard_start_xmit = rc32434_send_packet;
278 dev->get_stats = rc32434_get_stats;
279 dev->set_multicast_list = &rc32434_multicast_list;
280 dev->tx_timeout = rc32434_tx_timeout;
281 dev->watchdog_timeo = RC32434_TX_TIMEOUT;
282
283 #ifdef CONFIG_IDT_USE_NAPI
284 dev->poll = rc32434_poll;
285 dev->weight = 64;
286 printk("Using NAPI with weight %d\n",dev->weight);
287 #else
288 lp->rx_tasklet = kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);
289 tasklet_init(lp->rx_tasklet, rc32434_rx_tasklet, (unsigned long)dev);
290 #endif
291 lp->tx_tasklet = kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);
292 tasklet_init(lp->tx_tasklet, rc32434_tx_tasklet, (unsigned long)dev);
293
294 if ((err = register_netdev(dev))) {
295 printk(KERN_ERR "rc32434 ethernet. Cannot register net device %d\n", err);
296 free_netdev(dev);
297 retval = -EINVAL;
298 goto probe_err_out;
299 }
300
301 INFO("Rx IRQ %d, Tx IRQ %d, ", lp->rx_irq, lp->tx_irq);
302 for (i = 0; i < 6; i++) {
303 printk("%2.2x", dev->dev_addr[i]);
304 if (i<5)
305 printk(":");
306 }
307 printk("\n");
308
309 return 0;
310
311 probe_err_out:
312 rc32434_cleanup_module();
313 ERR(" failed. Returns %d\n", retval);
314 return retval;
315
316 }
317
318 static int rc32434_remove(struct platform_device *pdev)
319 {
320 struct korina_device *bif = (struct korina_device *) pdev->dev.platform_data;
321
322 if (bif->dev != NULL) {
323 struct rc32434_local *lp = (struct rc32434_local *)bif->dev->priv;
324 if (lp != NULL) {
325 if (lp->eth_regs)
326 iounmap((void*)lp->eth_regs);
327 if (lp->rx_dma_regs)
328 iounmap((void*)lp->rx_dma_regs);
329 if (lp->tx_dma_regs)
330 iounmap((void*)lp->tx_dma_regs);
331 if (lp->td_ring)
332 kfree((void*)KSEG0ADDR(lp->td_ring));
333
334 #ifdef RC32434_PROC_DEBUG
335 if (lp->ps) {
336 remove_proc_entry(bif->name, proc_net);
337 }
338 #endif
339 kfree(lp);
340 }
341
342 platform_set_drvdata(pdev, NULL);
343 unregister_netdev(bif->dev);
344 free_netdev(bif->dev);
345 kfree(bif->dev);
346 }
347 return 0;
348 }
349
350
351 static int rc32434_open(struct net_device *dev)
352 {
353 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
354
355 /* Initialize */
356 if (rc32434_init(dev)) {
357 ERR("Error: cannot open the Ethernet device\n");
358 return -EAGAIN;
359 }
360
361 /* Install the interrupt handler that handles the Done Finished Ovr and Und Events */
362 if (request_irq(lp->rx_irq, &rc32434_rx_dma_interrupt,
363 SA_SHIRQ | SA_INTERRUPT,
364 "Korina ethernet Rx", dev)) {
365 ERR(": unable to get Rx DMA IRQ %d\n",
366 lp->rx_irq);
367 return -EAGAIN;
368 }
369 if (request_irq(lp->tx_irq, &rc32434_tx_dma_interrupt,
370 SA_SHIRQ | SA_INTERRUPT,
371 "Korina ethernet Tx", dev)) {
372 ERR(": unable to get Tx DMA IRQ %d\n",
373 lp->tx_irq);
374 free_irq(lp->rx_irq, dev);
375 return -EAGAIN;
376 }
377
378 #ifdef RC32434_REVISION
379 /* Install handler for overrun error. */
380 if (request_irq(lp->ovr_irq, &rc32434_ovr_interrupt,
381 SA_SHIRQ | SA_INTERRUPT,
382 "Ethernet Overflow", dev)) {
383 ERR(": unable to get OVR IRQ %d\n",
384 lp->ovr_irq);
385 free_irq(lp->rx_irq, dev);
386 free_irq(lp->tx_irq, dev);
387 return -EAGAIN;
388 }
389 #endif
390
391 /* Install handler for underflow error. */
392 if (request_irq(lp->und_irq, &rc32434_und_interrupt,
393 SA_SHIRQ | SA_INTERRUPT,
394 "Ethernet Underflow", dev)) {
395 ERR(": unable to get UND IRQ %d\n",
396 lp->und_irq);
397 free_irq(lp->rx_irq, dev);
398 free_irq(lp->tx_irq, dev);
399 #ifdef RC32434_REVISION
400 free_irq(lp->ovr_irq, dev);
401 #endif
402 return -EAGAIN;
403 }
404
405
406 return 0;
407 }
408
409
410
411
412 static int rc32434_close(struct net_device *dev)
413 {
414 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
415 u32 tmp;
416
417 /* Disable interrupts */
418 disable_irq(lp->rx_irq);
419 disable_irq(lp->tx_irq);
420 #ifdef RC32434_REVISION
421 disable_irq(lp->ovr_irq);
422 #endif
423 disable_irq(lp->und_irq);
424
425 tmp = __raw_readl(&lp->tx_dma_regs->dmasm);
426 tmp = tmp | DMASM_f_m | DMASM_e_m;
427 __raw_writel(tmp, &lp->tx_dma_regs->dmasm);
428
429 tmp = __raw_readl(&lp->rx_dma_regs->dmasm);
430 tmp = tmp | DMASM_d_m | DMASM_h_m | DMASM_e_m;
431 __raw_writel(tmp, &lp->rx_dma_regs->dmasm);
432
433 free_irq(lp->rx_irq, dev);
434 free_irq(lp->tx_irq, dev);
435 #ifdef RC32434_REVISION
436 free_irq(lp->ovr_irq, dev);
437 #endif
438 free_irq(lp->und_irq, dev);
439 return 0;
440 }
441
442
443 /* transmit packet */
444 static int rc32434_send_packet(struct sk_buff *skb, struct net_device *dev)
445 {
446 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
447 unsigned long flags;
448 u32 length;
449 DMAD_t td;
450
451
452 spin_lock_irqsave(&lp->lock, flags);
453
454 td = &lp->td_ring[lp->tx_chain_tail];
455
456 /* stop queue when full, drop pkts if queue already full */
457 if(lp->tx_count >= (RC32434_NUM_TDS - 2)) {
458 lp->tx_full = 1;
459
460 if(lp->tx_count == (RC32434_NUM_TDS - 2)) {
461 netif_stop_queue(dev);
462 }
463 else {
464 lp->stats.tx_dropped++;
465 dev_kfree_skb_any(skb);
466 spin_unlock_irqrestore(&lp->lock, flags);
467 return 1;
468 }
469 }
470
471 lp->tx_count ++;
472
473 lp->tx_skb[lp->tx_chain_tail] = skb;
474
475 length = skb->len;
476 dma_cache_wback((u32)skb->data, skb->len);
477
478 /* Setup the transmit descriptor. */
479 dma_cache_inv((u32) td, sizeof(*td));
480 td->ca = CPHYSADDR(skb->data);
481
482 if(__raw_readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
483 if( lp->tx_chain_status == empty ) {
484 td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
485 lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
486 __raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
487 lp->tx_chain_head = lp->tx_chain_tail; /* Move head to tail */
488 }
489 else {
490 td->control = DMA_COUNT(length) |DMAD_cof_m|DMAD_iof_m; /* Update tail */
491 lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &= ~(DMAD_cof_m); /* Link to prev */
492 lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link = CPHYSADDR(td); /* Link to prev */
493 lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
494 __raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
495 lp->tx_chain_head = lp->tx_chain_tail; /* Move head to tail */
496 lp->tx_chain_status = empty;
497 }
498 }
499 else {
500 if( lp->tx_chain_status == empty ) {
501 td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
502 lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
503 lp->tx_chain_status = filled;
504 }
505 else {
506 td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
507 lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &= ~(DMAD_cof_m); /* Link to prev */
508 lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link = CPHYSADDR(td); /* Link to prev */
509 lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
510 }
511 }
512 dma_cache_wback((u32) td, sizeof(*td));
513
514 dev->trans_start = jiffies;
515
516 spin_unlock_irqrestore(&lp->lock, flags);
517
518 return 0;
519 }
520
521
522 /* Ethernet MII-PHY Handler */
523 static void rc32434_mii_handler(unsigned long data)
524 {
525 struct net_device *dev = (struct net_device *)data;
526 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
527 unsigned long flags;
528 unsigned long duplex_status;
529 int port_addr = (lp->rx_irq == 0x2c? 1:0) << 8;
530
531 spin_lock_irqsave(&lp->lock, flags);
532
533 /* Two ports are using the same MII, the difference is the PHY address */
534 __raw_writel(0, &rc32434_eth0_regs->miimcfg);
535 __raw_writel(0, &rc32434_eth0_regs->miimcmd);
536 __raw_writel(port_addr |0x05, &rc32434_eth0_regs->miimaddr);
537 __raw_writel(MIIMCMD_scn_m, &rc32434_eth0_regs->miimcmd);
538 while(__raw_readl(&rc32434_eth0_regs->miimind) & MIIMIND_nv_m);
539
540 ERR("irq:%x port_addr:%x RDD:%x\n",
541 lp->rx_irq, port_addr, __raw_readl(&rc32434_eth0_regs->miimrdd));
542 duplex_status = (__raw_readl(&rc32434_eth0_regs->miimrdd) & 0x140)? ETHMAC2_fd_m: 0;
543 if(duplex_status != lp->duplex_mode) {
544 ERR("The MII-PHY is Auto-negotiated to %s-Duplex mode for Eth-%x\n", duplex_status? "Full":"Half", lp->rx_irq == 0x2c? 1:0);
545 lp->duplex_mode = duplex_status;
546 rc32434_restart(dev);
547 }
548
549 lp->mii_phy_timer.expires = jiffies + 10 * HZ;
550 add_timer(&lp->mii_phy_timer);
551
552 spin_unlock_irqrestore(&lp->lock, flags);
553
554 }
555
556 #ifdef RC32434_REVISION
557 /* Ethernet Rx Overflow interrupt */
558 static irqreturn_t
559 rc32434_ovr_interrupt(int irq, void *dev_id)
560 {
561 struct net_device *dev = (struct net_device *)dev_id;
562 struct rc32434_local *lp;
563 unsigned int ovr;
564 irqreturn_t retval = IRQ_NONE;
565
566 ASSERT(dev != NULL);
567
568 lp = (struct rc32434_local *)dev->priv;
569 spin_lock(&lp->lock);
570 ovr = __raw_readl(&lp->eth_regs->ethintfc);
571
572 if(ovr & ETHINTFC_ovr_m) {
573 netif_stop_queue(dev);
574
575 /* clear OVR bit */
576 __raw_writel((ovr & ~ETHINTFC_ovr_m), &lp->eth_regs->ethintfc);
577
578 /* Restart interface */
579 rc32434_restart(dev);
580 retval = IRQ_HANDLED;
581 }
582 spin_unlock(&lp->lock);
583
584 return retval;
585 }
586
587 #endif
588
589
590 /* Ethernet Tx Underflow interrupt */
591 static irqreturn_t
592 rc32434_und_interrupt(int irq, void *dev_id)
593 {
594 struct net_device *dev = (struct net_device *)dev_id;
595 struct rc32434_local *lp;
596 unsigned int und;
597 irqreturn_t retval = IRQ_NONE;
598
599 ASSERT(dev != NULL);
600
601 lp = (struct rc32434_local *)dev->priv;
602
603 spin_lock(&lp->lock);
604
605 und = __raw_readl(&lp->eth_regs->ethintfc);
606
607 if(und & ETHINTFC_und_m) {
608 netif_stop_queue(dev);
609
610 __raw_writel((und & ~ETHINTFC_und_m), &lp->eth_regs->ethintfc);
611
612 /* Restart interface */
613 rc32434_restart(dev);
614 retval = IRQ_HANDLED;
615 }
616
617 spin_unlock(&lp->lock);
618
619 return retval;
620 }
621
622
623 /* Ethernet Rx DMA interrupt */
624 static irqreturn_t
625 rc32434_rx_dma_interrupt(int irq, void *dev_id)
626 {
627 struct net_device *dev = (struct net_device *)dev_id;
628 struct rc32434_local* lp;
629 volatile u32 dmas,dmasm;
630 irqreturn_t retval;
631
632 ASSERT(dev != NULL);
633
634 lp = (struct rc32434_local *)dev->priv;
635
636 spin_lock(&lp->lock);
637 dmas = __raw_readl(&lp->rx_dma_regs->dmas);
638 if(dmas & (DMAS_d_m|DMAS_h_m|DMAS_e_m)) {
639 /* Mask D H E bit in Rx DMA */
640 dmasm = __raw_readl(&lp->rx_dma_regs->dmasm);
641 __raw_writel(dmasm | (DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm);
642 #ifdef CONFIG_IDT_USE_NAPI
643 if(netif_rx_schedule_prep(dev))
644 __netif_rx_schedule(dev);
645 #else
646 tasklet_hi_schedule(lp->rx_tasklet);
647 #endif
648
649 if (dmas & DMAS_e_m)
650 ERR(": DMA error\n");
651
652 retval = IRQ_HANDLED;
653 }
654 else
655 retval = IRQ_NONE;
656
657 spin_unlock(&lp->lock);
658 return retval;
659 }
660
661 #ifdef CONFIG_IDT_USE_NAPI
662 static int rc32434_poll(struct net_device *rx_data_dev, int *budget)
663 #else
664 static void rc32434_rx_tasklet(unsigned long rx_data_dev)
665 #endif
666 {
667 struct net_device *dev = (struct net_device *)rx_data_dev;
668 struct rc32434_local* lp = netdev_priv(dev);
669 volatile DMAD_t rd = &lp->rd_ring[lp->rx_next_done];
670 struct sk_buff *skb, *skb_new;
671 u8* pkt_buf;
672 u32 devcs, count, pkt_len, pktuncrc_len;
673 volatile u32 dmas;
674 #ifdef CONFIG_IDT_USE_NAPI
675 u32 received = 0;
676 int rx_work_limit = min(*budget,dev->quota);
677 #else
678 unsigned long flags;
679 spin_lock_irqsave(&lp->lock, flags);
680 #endif
681
682 dma_cache_inv((u32)rd, sizeof(*rd));
683 while ( (count = RC32434_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
684 #ifdef CONFIG_IDT_USE_NAPI
685 if(--rx_work_limit <0)
686 {
687 break;
688 }
689 #endif
690 /* init the var. used for the later operations within the while loop */
691 skb_new = NULL;
692 devcs = rd->devcs;
693 pkt_len = RCVPKT_LENGTH(devcs);
694 skb = lp->rx_skb[lp->rx_next_done];
695
696 if (count < 64) {
697 lp->stats.rx_errors++;
698 lp->stats.rx_dropped++;
699 }
700 else if ((devcs & ( ETHRX_ld_m)) != ETHRX_ld_m) {
701 /* check that this is a whole packet */
702 /* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
703 lp->stats.rx_errors++;
704 lp->stats.rx_dropped++;
705 }
706 else if ( (devcs & ETHRX_rok_m) ) {
707
708 {
709 /* must be the (first and) last descriptor then */
710 pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
711
712 pktuncrc_len = pkt_len - 4;
713 /* invalidate the cache */
714 dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
715
716 /* Malloc up new buffer. */
717 skb_new = dev_alloc_skb(RC32434_RBSIZE + 2);
718
719 if (skb_new != NULL){
720 /* Make room */
721 skb_put(skb, pktuncrc_len);
722
723 skb->protocol = eth_type_trans(skb, dev);
724
725 /* pass the packet to upper layers */
726 #ifdef CONFIG_IDT_USE_NAPI
727 netif_receive_skb(skb);
728 #else
729 netif_rx(skb);
730 #endif
731
732 dev->last_rx = jiffies;
733 lp->stats.rx_packets++;
734 lp->stats.rx_bytes += pktuncrc_len;
735
736 if (IS_RCV_MP(devcs))
737 lp->stats.multicast++;
738
739 /* 16 bit align */
740 skb_reserve(skb_new, 2);
741
742 skb_new->dev = dev;
743 lp->rx_skb[lp->rx_next_done] = skb_new;
744 }
745 else {
746 ERR("no memory, dropping rx packet.\n");
747 lp->stats.rx_errors++;
748 lp->stats.rx_dropped++;
749 }
750 }
751
752 }
753 else {
754 /* This should only happen if we enable accepting broken packets */
755 lp->stats.rx_errors++;
756 lp->stats.rx_dropped++;
757
758 /* add statistics counters */
759 if (IS_RCV_CRC_ERR(devcs)) {
760 DBG(2, "RX CRC error\n");
761 lp->stats.rx_crc_errors++;
762 }
763 else if (IS_RCV_LOR_ERR(devcs)) {
764 DBG(2, "RX LOR error\n");
765 lp->stats.rx_length_errors++;
766 }
767 else if (IS_RCV_LE_ERR(devcs)) {
768 DBG(2, "RX LE error\n");
769 lp->stats.rx_length_errors++;
770 }
771 else if (IS_RCV_OVR_ERR(devcs)) {
772 lp->stats.rx_over_errors++;
773 }
774 else if (IS_RCV_CV_ERR(devcs)) {
775 /* code violation */
776 DBG(2, "RX CV error\n");
777 lp->stats.rx_frame_errors++;
778 }
779 else if (IS_RCV_CES_ERR(devcs)) {
780 DBG(2, "RX Preamble error\n");
781 }
782 }
783
784 rd->devcs = 0;
785
786 /* restore descriptor's curr_addr */
787 if(skb_new)
788 rd->ca = CPHYSADDR(skb_new->data);
789 else
790 rd->ca = CPHYSADDR(skb->data);
791
792 rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
793 lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &= ~(DMAD_cod_m);
794
795 lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
796 dma_cache_wback((u32)rd, sizeof(*rd));
797 rd = &lp->rd_ring[lp->rx_next_done];
798 __raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
799 }
800 #ifdef CONFIG_IDT_USE_NAPI
801 dev->quota -= received;
802 *budget =- received;
803 if(rx_work_limit < 0)
804 goto not_done;
805 #endif
806
807 dmas = __raw_readl(&lp->rx_dma_regs->dmas);
808
809 if(dmas & DMAS_h_m) {
810 __raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
811 #ifdef RC32434_PROC_DEBUG
812 lp->dma_halt_cnt++;
813 #endif
814 rd->devcs = 0;
815 skb = lp->rx_skb[lp->rx_next_done];
816 rd->ca = CPHYSADDR(skb->data);
817 dma_cache_wback((u32)rd, sizeof(*rd));
818 rc32434_chain_rx(lp,rd);
819 }
820
821 #ifdef CONFIG_IDT_USE_NAPI
822 netif_rx_complete(dev);
823 #endif
824 /* Enable D H E bit in Rx DMA */
825 __raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m |DMASM_e_m), &lp->rx_dma_regs->dmasm);
826 #ifdef CONFIG_IDT_USE_NAPI
827 return 0;
828 not_done:
829 return 1;
830 #else
831 spin_unlock_irqrestore(&lp->lock, flags);
832 return;
833 #endif
834
835
836 }
837
838
839
840 /* Ethernet Tx DMA interrupt */
841 static irqreturn_t
842 rc32434_tx_dma_interrupt(int irq, void *dev_id)
843 {
844 struct net_device *dev = (struct net_device *)dev_id;
845 struct rc32434_local *lp;
846 volatile u32 dmas,dmasm;
847 irqreturn_t retval;
848
849 ASSERT(dev != NULL);
850
851 lp = (struct rc32434_local *)dev->priv;
852
853 spin_lock(&lp->lock);
854
855 dmas = __raw_readl(&lp->tx_dma_regs->dmas);
856
857 if (dmas & (DMAS_f_m | DMAS_e_m)) {
858 dmasm = __raw_readl(&lp->tx_dma_regs->dmasm);
859 /* Mask F E bit in Tx DMA */
860 __raw_writel(dmasm | (DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm);
861
862 tasklet_hi_schedule(lp->tx_tasklet);
863
864 if(lp->tx_chain_status == filled && (__raw_readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
865 __raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr));
866 lp->tx_chain_status = empty;
867 lp->tx_chain_head = lp->tx_chain_tail;
868 dev->trans_start = jiffies;
869 }
870
871 if (dmas & DMAS_e_m)
872 ERR(": DMA error\n");
873
874 retval = IRQ_HANDLED;
875 }
876 else
877 retval = IRQ_NONE;
878
879 spin_unlock(&lp->lock);
880
881 return retval;
882 }
883
884
885 static void rc32434_tx_tasklet(unsigned long tx_data_dev)
886 {
887 struct net_device *dev = (struct net_device *)tx_data_dev;
888 struct rc32434_local* lp = (struct rc32434_local *)dev->priv;
889 volatile DMAD_t td = &lp->td_ring[lp->tx_next_done];
890 u32 devcs;
891 unsigned long flags;
892 volatile u32 dmas;
893
894 spin_lock_irqsave(&lp->lock, flags);
895
896 /* process all desc that are done */
897 while(IS_DMA_FINISHED(td->control)) {
898 if(lp->tx_full == 1) {
899 netif_wake_queue(dev);
900 lp->tx_full = 0;
901 }
902
903 devcs = lp->td_ring[lp->tx_next_done].devcs;
904 if ((devcs & (ETHTX_fd_m | ETHTX_ld_m)) != (ETHTX_fd_m | ETHTX_ld_m)) {
905 lp->stats.tx_errors++;
906 lp->stats.tx_dropped++;
907
908 /* should never happen */
909 DBG(1, __FUNCTION__ ": split tx ignored\n");
910 }
911 else if (IS_TX_TOK(devcs)) {
912 lp->stats.tx_packets++;
913 lp->stats.tx_bytes+=lp->tx_skb[lp->tx_next_done]->len;
914 }
915 else {
916 lp->stats.tx_errors++;
917 lp->stats.tx_dropped++;
918
919 /* underflow */
920 if (IS_TX_UND_ERR(devcs))
921 lp->stats.tx_fifo_errors++;
922
923 /* oversized frame */
924 if (IS_TX_OF_ERR(devcs))
925 lp->stats.tx_aborted_errors++;
926
927 /* excessive deferrals */
928 if (IS_TX_ED_ERR(devcs))
929 lp->stats.tx_carrier_errors++;
930
931 /* collisions: medium busy */
932 if (IS_TX_EC_ERR(devcs))
933 lp->stats.collisions++;
934
935 /* late collision */
936 if (IS_TX_LC_ERR(devcs))
937 lp->stats.tx_window_errors++;
938
939 }
940
941 /* We must always free the original skb */
942 if (lp->tx_skb[lp->tx_next_done] != NULL) {
943 dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
944 lp->tx_skb[lp->tx_next_done] = NULL;
945 }
946
947 lp->td_ring[lp->tx_next_done].control = DMAD_iof_m;
948 lp->td_ring[lp->tx_next_done].devcs = ETHTX_fd_m | ETHTX_ld_m;
949 lp->td_ring[lp->tx_next_done].link = 0;
950 lp->td_ring[lp->tx_next_done].ca = 0;
951 lp->tx_count --;
952
953 /* go on to next transmission */
954 lp->tx_next_done = (lp->tx_next_done + 1) & RC32434_TDS_MASK;
955 td = &lp->td_ring[lp->tx_next_done];
956
957 }
958
959 dmas = __raw_readl(&lp->tx_dma_regs->dmas);
960 __raw_writel( ~dmas, &lp->tx_dma_regs->dmas);
961
962 /* Enable F E bit in Tx DMA */
963 __raw_writel(__raw_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm);
964 spin_unlock_irqrestore(&lp->lock, flags);
965
966 }
967
968
969 static struct net_device_stats * rc32434_get_stats(struct net_device *dev)
970 {
971 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
972 return &lp->stats;
973 }
974
975
976 /*
977 * Set or clear the multicast filter for this adaptor.
978 */
979 static void rc32434_multicast_list(struct net_device *dev)
980 {
981 /* listen to broadcasts always and to treat */
982 /* IFF bits independantly */
983 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
984 unsigned long flags;
985 u32 recognise = ETHARC_ab_m; /* always accept broadcasts */
986
987 if (dev->flags & IFF_PROMISC) /* set promiscuous mode */
988 recognise |= ETHARC_pro_m;
989
990 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15))
991 recognise |= ETHARC_am_m; /* all multicast & bcast */
992 else if (dev->mc_count > 0) {
993 DBG(2, __FUNCTION__ ": mc_count %d\n", dev->mc_count);
994 recognise |= ETHARC_am_m; /* for the time being */
995 }
996
997 spin_lock_irqsave(&lp->lock, flags);
998 __raw_writel(recognise, &lp->eth_regs->etharc);
999 spin_unlock_irqrestore(&lp->lock, flags);
1000 }
1001
1002
1003 static void rc32434_tx_timeout(struct net_device *dev)
1004 {
1005 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
1006 unsigned long flags;
1007
1008 spin_lock_irqsave(&lp->lock, flags);
1009 rc32434_restart(dev);
1010 spin_unlock_irqrestore(&lp->lock, flags);
1011
1012 }
1013
1014
1015 /*
1016 * Initialize the RC32434 ethernet controller.
1017 */
1018 static int rc32434_init(struct net_device *dev)
1019 {
1020 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
1021 int i, j;
1022
1023 /* Disable DMA */
1024 rc32434_abort_tx(dev);
1025 rc32434_abort_rx(dev);
1026
1027 /* reset ethernet logic */
1028 __raw_writel(0, &lp->eth_regs->ethintfc);
1029 while((__raw_readl(&lp->eth_regs->ethintfc) & ETHINTFC_rip_m))
1030 dev->trans_start = jiffies;
1031
1032 /* Enable Ethernet Interface */
1033 __raw_writel(ETHINTFC_en_m, &lp->eth_regs->ethintfc);
1034
1035 #ifndef CONFIG_IDT_USE_NAPI
1036 tasklet_disable(lp->rx_tasklet);
1037 #endif
1038 tasklet_disable(lp->tx_tasklet);
1039
1040 /* Initialize the transmit Descriptors */
1041 for (i = 0; i < RC32434_NUM_TDS; i++) {
1042 lp->td_ring[i].control = DMAD_iof_m;
1043 lp->td_ring[i].devcs = ETHTX_fd_m | ETHTX_ld_m;
1044 lp->td_ring[i].ca = 0;
1045 lp->td_ring[i].link = 0;
1046 if (lp->tx_skb[i] != NULL) {
1047 dev_kfree_skb_any(lp->tx_skb[i]);
1048 lp->tx_skb[i] = NULL;
1049 }
1050 }
1051 lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail = lp->tx_full = lp->tx_count = 0;
1052 lp-> tx_chain_status = empty;
1053
1054 /*
1055 * Initialize the receive descriptors so that they
1056 * become a circular linked list, ie. let the last
1057 * descriptor point to the first again.
1058 */
1059 for (i=0; i<RC32434_NUM_RDS; i++) {
1060 struct sk_buff *skb = lp->rx_skb[i];
1061
1062 if (lp->rx_skb[i] == NULL) {
1063 skb = dev_alloc_skb(RC32434_RBSIZE + 2);
1064 if (skb == NULL) {
1065 ERR("No memory in the system\n");
1066 for (j = 0; j < RC32434_NUM_RDS; j ++)
1067 if (lp->rx_skb[j] != NULL)
1068 dev_kfree_skb_any(lp->rx_skb[j]);
1069
1070 return 1;
1071 }
1072 else {
1073 skb->dev = dev;
1074 skb_reserve(skb, 2);
1075 lp->rx_skb[i] = skb;
1076 lp->rd_ring[i].ca = CPHYSADDR(skb->data);
1077
1078 }
1079 }
1080 lp->rd_ring[i].control = DMAD_iod_m | DMA_COUNT(RC32434_RBSIZE);
1081 lp->rd_ring[i].devcs = 0;
1082 lp->rd_ring[i].ca = CPHYSADDR(skb->data);
1083 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
1084
1085 }
1086 /* loop back */
1087 lp->rd_ring[RC32434_NUM_RDS-1].link = CPHYSADDR(&lp->rd_ring[0]);
1088 lp->rx_next_done = 0;
1089
1090 lp->rd_ring[RC32434_NUM_RDS-1].control |= DMAD_cod_m;
1091 lp->rx_chain_head = 0;
1092 lp->rx_chain_tail = 0;
1093 lp->rx_chain_status = empty;
1094
1095 __raw_writel(0, &lp->rx_dma_regs->dmas);
1096 /* Start Rx DMA */
1097 rc32434_start_rx(lp, &lp->rd_ring[0]);
1098
1099 /* Enable F E bit in Tx DMA */
1100 __raw_writel(__raw_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm);
1101 /* Enable D H E bit in Rx DMA */
1102 __raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm);
1103
1104 /* Accept only packets destined for this Ethernet device address */
1105 __raw_writel(ETHARC_ab_m, &lp->eth_regs->etharc);
1106
1107 /* Set all Ether station address registers to their initial values */
1108 __raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
1109 __raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
1110
1111 __raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
1112 __raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
1113
1114 __raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
1115 __raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
1116
1117 __raw_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
1118 __raw_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
1119
1120
1121 /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
1122 __raw_writel(ETHMAC2_pe_m | ETHMAC2_cen_m | ETHMAC2_fd_m, &lp->eth_regs->ethmac2);
1123 //ETHMAC2_flc_m ETHMAC2_fd_m lp->duplex_mode
1124
1125 /* Back to back inter-packet-gap */
1126 __raw_writel(0x15, &lp->eth_regs->ethipgt);
1127 /* Non - Back to back inter-packet-gap */
1128 __raw_writel(0x12, &lp->eth_regs->ethipgr);
1129
1130 /* Management Clock Prescaler Divisor */
1131 /* Clock independent setting */
1132 __raw_writel(((idt_cpu_freq)/MII_CLOCK+1) & ~1,
1133 &lp->eth_regs->ethmcp);
1134
1135 /* don't transmit until fifo contains 48b */
1136 __raw_writel(48, &lp->eth_regs->ethfifott);
1137
1138 __raw_writel(ETHMAC1_re_m, &lp->eth_regs->ethmac1);
1139
1140 #ifndef CONFIG_IDT_USE_NAPI
1141 tasklet_enable(lp->rx_tasklet);
1142 #endif
1143 tasklet_enable(lp->tx_tasklet);
1144
1145 netif_start_queue(dev);
1146
1147 return 0;
1148 }
1149
1150 static struct platform_driver korina_driver = {
1151 .driver.name = "korina",
1152 .probe = rc32434_probe,
1153 .remove = rc32434_remove,
1154 };
1155
1156 static int __init rc32434_init_module(void)
1157 {
1158 return platform_driver_register(&korina_driver);
1159 }
1160
1161 static void rc32434_cleanup_module(void)
1162 {
1163 return platform_driver_unregister(&korina_driver);
1164 }
1165
1166 module_init(rc32434_init_module);
1167 module_exit(rc32434_cleanup_module);
This page took 0.108677 seconds and 5 git commands to generate.