Remove reference to old ar7-2.4 code
[openwrt.git] / target / linux / aruba-2.6 / files / drivers / net / rc32434_eth.c
1 /**************************************************************************
2 *
3 * BRIEF MODULE DESCRIPTION
4 * Driver for the IDT RC32434 on-chip ethernet controller.
5 *
6 * Copyright 2004 IDT Inc. (rischelp@idt.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 675 Mass Ave, Cambridge, MA 02139, USA.
27 *
28 *
29 **************************************************************************
30 * May 2004 rkt, neb
31 *
32 * Based on the driver developed by B. Maruthanayakam, H. Kou and others.
33 *
34 * Aug 2004 Sadik
35 *
36 * Added NAPI
37 *
38 **************************************************************************
39 */
40
41 #include <linux/autoconf.h>
42 #include <linux/version.h>
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/moduleparam.h>
46 #include <linux/sched.h>
47 #include <linux/ctype.h>
48 #include <linux/types.h>
49 #include <linux/fcntl.h>
50 #include <linux/interrupt.h>
51 #include <linux/ptrace.h>
52 #include <linux/init.h>
53 #include <linux/ioport.h>
54 #include <linux/proc_fs.h>
55 #include <linux/in.h>
56 #include <linux/slab.h>
57 #include <linux/string.h>
58 #include <linux/delay.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/skbuff.h>
62 #include <linux/errno.h>
63 #include <asm/bootinfo.h>
64 #include <asm/system.h>
65 #include <asm/bitops.h>
66 #include <asm/pgtable.h>
67 #include <asm/segment.h>
68 #include <asm/io.h>
69 #include <asm/dma.h>
70
71 #include "rc32434_eth.h"
72
73 #define DRIVER_VERSION "(mar2904)"
74
75 #define DRIVER_NAME "rc32434 Ethernet driver. " DRIVER_VERSION
76
77
78 #define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
79 ((dev)->dev_addr[1]))
80 #define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \
81 ((dev)->dev_addr[3] << 16) | \
82 ((dev)->dev_addr[4] << 8) | \
83 ((dev)->dev_addr[5]))
84
85 #define MII_CLOCK 1250000 /* no more than 2.5MHz */
86 static char mac0[18] = "08:00:06:05:40:01";
87
88 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,52)
89 module_param_string(mac0, mac0, 18, 0);
90 #else
91 MODULE_PARM(mac0, "c18");
92 #endif
93 MODULE_PARM_DESC(mac0, "MAC address for RC32434 ethernet0");
94
95 static struct rc32434_if_t {
96 char *name;
97 struct net_device *dev;
98 char* mac_str;
99 int weight;
100 u32 iobase;
101 u32 rxdmabase;
102 u32 txdmabase;
103 int rx_dma_irq;
104 int tx_dma_irq;
105 int rx_ovr_irq;
106 int tx_und_irq;
107 } rc32434_iflist[] =
108 {
109 {
110 "rc32434_eth0", NULL, mac0,
111 64,
112 ETH0_PhysicalAddress,
113 ETH0_RX_DMA_ADDR,
114 ETH0_TX_DMA_ADDR,
115 ETH0_DMA_RX_IRQ,
116 ETH0_DMA_TX_IRQ,
117 ETH0_RX_OVR_IRQ,
118 ETH0_TX_UND_IRQ
119 }
120 };
121
122
123 static int parse_mac_addr(struct net_device *dev, char* macstr)
124 {
125 int i, j;
126 unsigned char result, value;
127
128 for (i=0; i<6; i++) {
129 result = 0;
130 if (i != 5 && *(macstr+2) != ':') {
131 ERR("invalid mac address format: %d %c\n",
132 i, *(macstr+2));
133 return -EINVAL;
134 }
135 for (j=0; j<2; j++) {
136 if (isxdigit(*macstr) && (value = isdigit(*macstr) ? *macstr-'0' :
137 toupper(*macstr)-'A'+10) < 16) {
138 result = result*16 + value;
139 macstr++;
140 }
141 else {
142 ERR("invalid mac address "
143 "character: %c\n", *macstr);
144 return -EINVAL;
145 }
146 }
147
148 macstr++;
149 dev->dev_addr[i] = result;
150 }
151
152 return 0;
153 }
154
155
156
157 static inline void rc32434_abort_tx(struct net_device *dev)
158 {
159 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
160 rc32434_abort_dma(dev, lp->tx_dma_regs);
161
162 }
163
164 static inline void rc32434_abort_rx(struct net_device *dev)
165 {
166 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
167 rc32434_abort_dma(dev, lp->rx_dma_regs);
168
169 }
170
171 static inline void rc32434_start_tx(struct rc32434_local *lp, volatile DMAD_t td)
172 {
173 rc32434_start_dma(lp->tx_dma_regs, CPHYSADDR(td));
174 }
175
176 static inline void rc32434_start_rx(struct rc32434_local *lp, volatile DMAD_t rd)
177 {
178 rc32434_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
179 }
180
181 static inline void rc32434_chain_tx(struct rc32434_local *lp, volatile DMAD_t td)
182 {
183 rc32434_chain_dma(lp->tx_dma_regs, CPHYSADDR(td));
184 }
185
186 static inline void rc32434_chain_rx(struct rc32434_local *lp, volatile DMAD_t rd)
187 {
188 rc32434_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
189 }
190
191 #ifdef RC32434_PROC_DEBUG
192 static int rc32434_read_proc(char *buf, char **start, off_t fpos,
193 int length, int *eof, void *data)
194 {
195 struct net_device *dev = (struct net_device *)data;
196 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
197 int len = 0;
198
199 /* print out header */
200 len += sprintf(buf + len, "\n\tRC32434 Ethernet Debug\n\n");
201 len += sprintf (buf + len,
202 "DMA halt count = %10d, DMA run count = %10d\n",
203 lp->dma_halt_cnt, lp->dma_run_cnt);
204
205 if (fpos >= len) {
206 *start = buf;
207 *eof = 1;
208 return 0;
209 }
210 *start = buf + fpos;
211
212 if ((len -= fpos) > length)
213 return length;
214 *eof = 1;
215
216 return len;
217
218 }
219 #endif
220
221
222 /*
223 * Restart the RC32434 ethernet controller.
224 */
225 static int rc32434_restart(struct net_device *dev)
226 {
227 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
228
229 /*
230 * Disable interrupts
231 */
232 disable_irq(lp->rx_irq);
233 disable_irq(lp->tx_irq);
234 #ifdef RC32434_REVISION
235 disable_irq(lp->ovr_irq);
236 #endif
237 disable_irq(lp->und_irq);
238
239 /* Mask F E bit in Tx DMA */
240 rc32434_writel(rc32434_readl(&lp->tx_dma_regs->dmasm) | DMASM_f_m | DMASM_e_m, &lp->tx_dma_regs->dmasm);
241 /* Mask D H E bit in Rx DMA */
242 rc32434_writel(rc32434_readl(&lp->rx_dma_regs->dmasm) | DMASM_d_m | DMASM_h_m | DMASM_e_m, &lp->rx_dma_regs->dmasm);
243
244 rc32434_init(dev);
245 rc32434_multicast_list(dev);
246
247 enable_irq(lp->und_irq);
248 #ifdef RC32434_REVISION
249 enable_irq(lp->ovr_irq);
250 #endif
251 enable_irq(lp->tx_irq);
252 enable_irq(lp->rx_irq);
253
254 return 0;
255 }
256
257 int rc32434_init_module(void)
258 {
259 #ifdef CONFIG_MACH_ARUBA
260 if (mips_machtype != MACH_ARUBA_AP70)
261 return 1;
262 #endif
263
264 printk(KERN_INFO DRIVER_NAME " \n");
265 return rc32434_probe(0);
266 }
267
268 static int rc32434_probe(int port_num)
269 {
270 struct rc32434_if_t *bif = &rc32434_iflist[port_num];
271 struct rc32434_local *lp = NULL;
272 struct net_device *dev = NULL;
273 int i, retval,err;
274
275 dev = alloc_etherdev(sizeof(struct rc32434_local));
276 if(!dev) {
277 ERR("rc32434_eth: alloc_etherdev failed\n");
278 return -1;
279 }
280
281 SET_MODULE_OWNER(dev);
282 bif->dev = dev;
283
284 #ifdef CONFIG_MACH_ARUBA
285 {
286 extern char * getenv(char *e);
287 memcpy(bif->mac_str, getenv("ethaddr"), 17);
288 }
289 #endif
290
291 printk("mac: %s\n", bif->mac_str);
292 if ((retval = parse_mac_addr(dev, bif->mac_str))) {
293 ERR("MAC address parse failed\n");
294 free_netdev(dev);
295 return -1;
296 }
297
298
299 /* Initialize the device structure. */
300 if (dev->priv == NULL) {
301 lp = (struct rc32434_local *)kmalloc(sizeof(*lp), GFP_KERNEL);
302 memset(lp, 0, sizeof(struct rc32434_local));
303 }
304 else {
305 lp = (struct rc32434_local *)dev->priv;
306 }
307
308 lp->rx_irq = bif->rx_dma_irq;
309 lp->tx_irq = bif->tx_dma_irq;
310 lp->ovr_irq = bif->rx_ovr_irq;
311 lp->und_irq = bif->tx_und_irq;
312
313 lp->eth_regs = ioremap_nocache(bif->iobase, sizeof(*lp->eth_regs));
314
315 if (!lp->eth_regs) {
316 ERR("Can't remap eth registers\n");
317 retval = -ENXIO;
318 goto probe_err_out;
319 }
320
321 lp->rx_dma_regs = ioremap_nocache(bif->rxdmabase, sizeof(struct DMA_Chan_s));
322
323 if (!lp->rx_dma_regs) {
324 ERR("Can't remap Rx DMA registers\n");
325 retval = -ENXIO;
326 goto probe_err_out;
327 }
328 lp->tx_dma_regs = ioremap_nocache(bif->txdmabase,sizeof(struct DMA_Chan_s));
329
330 if (!lp->tx_dma_regs) {
331 ERR("Can't remap Tx DMA registers\n");
332 retval = -ENXIO;
333 goto probe_err_out;
334 }
335
336 #ifdef RC32434_PROC_DEBUG
337 lp->ps = create_proc_read_entry (bif->name, 0, proc_net,
338 rc32434_read_proc, dev);
339 #endif
340
341 lp->td_ring = (DMAD_t)kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
342 if (!lp->td_ring) {
343 ERR("Can't allocate descriptors\n");
344 retval = -ENOMEM;
345 goto probe_err_out;
346 }
347
348 dma_cache_inv((unsigned long)(lp->td_ring), TD_RING_SIZE + RD_RING_SIZE);
349
350 /* now convert TD_RING pointer to KSEG1 */
351 lp->td_ring = (DMAD_t )KSEG1ADDR(lp->td_ring);
352 lp->rd_ring = &lp->td_ring[RC32434_NUM_TDS];
353
354
355 spin_lock_init(&lp->lock);
356
357 dev->base_addr = bif->iobase;
358 /* just use the rx dma irq */
359 dev->irq = bif->rx_dma_irq;
360
361 dev->priv = lp;
362
363 dev->open = rc32434_open;
364 dev->stop = rc32434_close;
365 dev->hard_start_xmit = rc32434_send_packet;
366 dev->get_stats = rc32434_get_stats;
367 dev->set_multicast_list = &rc32434_multicast_list;
368 dev->tx_timeout = rc32434_tx_timeout;
369 dev->watchdog_timeo = RC32434_TX_TIMEOUT;
370
371 #ifdef CONFIG_IDT_USE_NAPI
372 dev->poll = rc32434_poll;
373 dev->weight = bif->weight;
374 printk("Using NAPI with weight %d\n",dev->weight);
375 #else
376 lp->rx_tasklet = kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);
377 tasklet_init(lp->rx_tasklet, rc32434_rx_tasklet, (unsigned long)dev);
378 #endif
379 lp->tx_tasklet = kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);
380 tasklet_init(lp->tx_tasklet, rc32434_tx_tasklet, (unsigned long)dev);
381
382 if ((err = register_netdev(dev))) {
383 printk(KERN_ERR "rc32434 ethernet. Cannot register net device %d\n", err);
384 free_netdev(dev);
385 retval = -EINVAL;
386 goto probe_err_out;
387 }
388
389 INFO("Rx IRQ %d, Tx IRQ %d, ", bif->rx_dma_irq, bif->tx_dma_irq);
390 for (i = 0; i < 6; i++) {
391 printk("%2.2x", dev->dev_addr[i]);
392 if (i<5)
393 printk(":");
394 }
395 printk("\n");
396
397 return 0;
398
399 probe_err_out:
400 rc32434_cleanup_module();
401 ERR(" failed. Returns %d\n", retval);
402 return retval;
403
404 }
405
406
407 static void rc32434_cleanup_module(void)
408 {
409 int i;
410
411 for (i = 0; rc32434_iflist[i].iobase; i++) {
412 struct rc32434_if_t * bif = &rc32434_iflist[i];
413 if (bif->dev != NULL) {
414 struct rc32434_local *lp = (struct rc32434_local *)bif->dev->priv;
415 if (lp != NULL) {
416 if (lp->eth_regs)
417 iounmap((void*)lp->eth_regs);
418 if (lp->rx_dma_regs)
419 iounmap((void*)lp->rx_dma_regs);
420 if (lp->tx_dma_regs)
421 iounmap((void*)lp->tx_dma_regs);
422 if (lp->td_ring)
423 kfree((void*)KSEG0ADDR(lp->td_ring));
424
425 #ifdef RC32434_PROC_DEBUG
426 if (lp->ps) {
427 remove_proc_entry(bif->name, proc_net);
428 }
429 #endif
430 kfree(lp);
431 }
432
433 unregister_netdev(bif->dev);
434 free_netdev(bif->dev);
435 kfree(bif->dev);
436 }
437 }
438 }
439
440
441
442 static int rc32434_open(struct net_device *dev)
443 {
444 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
445
446 /* Initialize */
447 if (rc32434_init(dev)) {
448 ERR("Error: cannot open the Ethernet device\n");
449 return -EAGAIN;
450 }
451
452 /* Install the interrupt handler that handles the Done Finished Ovr and Und Events */
453 if (request_irq(lp->rx_irq, &rc32434_rx_dma_interrupt,
454 SA_SHIRQ | SA_INTERRUPT,
455 "rc32434 ethernet Rx", dev)) {
456 ERR(": unable to get Rx DMA IRQ %d\n",
457 lp->rx_irq);
458 return -EAGAIN;
459 }
460 if (request_irq(lp->tx_irq, &rc32434_tx_dma_interrupt,
461 SA_SHIRQ | SA_INTERRUPT,
462 "rc32434 ethernet Tx", dev)) {
463 ERR(": unable to get Tx DMA IRQ %d\n",
464 lp->tx_irq);
465 free_irq(lp->rx_irq, dev);
466 return -EAGAIN;
467 }
468
469 #ifdef RC32434_REVISION
470 /* Install handler for overrun error. */
471 if (request_irq(lp->ovr_irq, &rc32434_ovr_interrupt,
472 SA_SHIRQ | SA_INTERRUPT,
473 "Ethernet Overflow", dev)) {
474 ERR(": unable to get OVR IRQ %d\n",
475 lp->ovr_irq);
476 free_irq(lp->rx_irq, dev);
477 free_irq(lp->tx_irq, dev);
478 return -EAGAIN;
479 }
480 #endif
481
482 /* Install handler for underflow error. */
483 if (request_irq(lp->und_irq, &rc32434_und_interrupt,
484 SA_SHIRQ | SA_INTERRUPT,
485 "Ethernet Underflow", dev)) {
486 ERR(": unable to get UND IRQ %d\n",
487 lp->und_irq);
488 free_irq(lp->rx_irq, dev);
489 free_irq(lp->tx_irq, dev);
490 #ifdef RC32434_REVISION
491 free_irq(lp->ovr_irq, dev);
492 #endif
493 return -EAGAIN;
494 }
495
496
497 return 0;
498 }
499
500
501
502
503 static int rc32434_close(struct net_device *dev)
504 {
505 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
506 u32 tmp;
507
508 /* Disable interrupts */
509 disable_irq(lp->rx_irq);
510 disable_irq(lp->tx_irq);
511 #ifdef RC32434_REVISION
512 disable_irq(lp->ovr_irq);
513 #endif
514 disable_irq(lp->und_irq);
515
516 tmp = rc32434_readl(&lp->tx_dma_regs->dmasm);
517 tmp = tmp | DMASM_f_m | DMASM_e_m;
518 rc32434_writel(tmp, &lp->tx_dma_regs->dmasm);
519
520 tmp = rc32434_readl(&lp->rx_dma_regs->dmasm);
521 tmp = tmp | DMASM_d_m | DMASM_h_m | DMASM_e_m;
522 rc32434_writel(tmp, &lp->rx_dma_regs->dmasm);
523
524 free_irq(lp->rx_irq, dev);
525 free_irq(lp->tx_irq, dev);
526 #ifdef RC32434_REVISION
527 free_irq(lp->ovr_irq, dev);
528 #endif
529 free_irq(lp->und_irq, dev);
530 return 0;
531 }
532
533
534 /* transmit packet */
535 static int rc32434_send_packet(struct sk_buff *skb, struct net_device *dev)
536 {
537 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
538 unsigned long flags;
539 u32 length;
540 DMAD_t td;
541
542
543 spin_lock_irqsave(&lp->lock, flags);
544
545 td = &lp->td_ring[lp->tx_chain_tail];
546
547 /* stop queue when full, drop pkts if queue already full */
548 if(lp->tx_count >= (RC32434_NUM_TDS - 2)) {
549 lp->tx_full = 1;
550
551 if(lp->tx_count == (RC32434_NUM_TDS - 2)) {
552 netif_stop_queue(dev);
553 }
554 else {
555 lp->stats.tx_dropped++;
556 dev_kfree_skb_any(skb);
557 spin_unlock_irqrestore(&lp->lock, flags);
558 return 1;
559 }
560 }
561
562 lp->tx_count ++;
563
564 lp->tx_skb[lp->tx_chain_tail] = skb;
565
566 length = skb->len;
567
568 /* Setup the transmit descriptor. */
569 td->ca = CPHYSADDR(skb->data);
570
571 if(rc32434_readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
572 if( lp->tx_chain_status == empty ) {
573 td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
574 lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
575 rc32434_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
576 lp->tx_chain_head = lp->tx_chain_tail; /* Move head to tail */
577 }
578 else {
579 td->control = DMA_COUNT(length) |DMAD_cof_m|DMAD_iof_m; /* Update tail */
580 lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &= ~(DMAD_cof_m); /* Link to prev */
581 lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link = CPHYSADDR(td); /* Link to prev */
582 lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
583 rc32434_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
584 lp->tx_chain_head = lp->tx_chain_tail; /* Move head to tail */
585 lp->tx_chain_status = empty;
586 }
587 }
588 else {
589 if( lp->tx_chain_status == empty ) {
590 td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
591 lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
592 lp->tx_chain_status = filled;
593 }
594 else {
595 td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; /* Update tail */
596 lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &= ~(DMAD_cof_m); /* Link to prev */
597 lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link = CPHYSADDR(td); /* Link to prev */
598 lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK; /* Move tail */
599 }
600 }
601
602 dev->trans_start = jiffies;
603
604 spin_unlock_irqrestore(&lp->lock, flags);
605
606 return 0;
607 }
608
609
610 /* Ethernet MII-PHY Handler */
611 static void rc32434_mii_handler(unsigned long data)
612 {
613 struct net_device *dev = (struct net_device *)data;
614 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
615 unsigned long flags;
616 unsigned long duplex_status;
617 int port_addr = (lp->rx_irq == 0x2c? 1:0) << 8;
618
619 spin_lock_irqsave(&lp->lock, flags);
620
621 /* Two ports are using the same MII, the difference is the PHY address */
622 rc32434_writel(0, &rc32434_eth0_regs->miimcfg);
623 rc32434_writel(0, &rc32434_eth0_regs->miimcmd);
624 rc32434_writel(port_addr |0x05, &rc32434_eth0_regs->miimaddr);
625 rc32434_writel(MIIMCMD_scn_m, &rc32434_eth0_regs->miimcmd);
626 while(rc32434_readl(&rc32434_eth0_regs->miimind) & MIIMIND_nv_m);
627
628 ERR("irq:%x port_addr:%x RDD:%x\n",
629 lp->rx_irq, port_addr, rc32434_readl(&rc32434_eth0_regs->miimrdd));
630 duplex_status = (rc32434_readl(&rc32434_eth0_regs->miimrdd) & 0x140)? ETHMAC2_fd_m: 0;
631 if(duplex_status != lp->duplex_mode) {
632 ERR("The MII-PHY is Auto-negotiated to %s-Duplex mode for Eth-%x\n", duplex_status? "Full":"Half", lp->rx_irq == 0x2c? 1:0);
633 lp->duplex_mode = duplex_status;
634 rc32434_restart(dev);
635 }
636
637 lp->mii_phy_timer.expires = jiffies + 10 * HZ;
638 add_timer(&lp->mii_phy_timer);
639
640 spin_unlock_irqrestore(&lp->lock, flags);
641
642 }
643
644 #ifdef RC32434_REVISION
645 /* Ethernet Rx Overflow interrupt */
646 static irqreturn_t
647 rc32434_ovr_interrupt(int irq, void *dev_id)
648 {
649 struct net_device *dev = (struct net_device *)dev_id;
650 struct rc32434_local *lp;
651 unsigned int ovr;
652 irqreturn_t retval = IRQ_NONE;
653
654 ASSERT(dev != NULL);
655
656 lp = (struct rc32434_local *)dev->priv;
657 spin_lock(&lp->lock);
658 ovr = rc32434_readl(&lp->eth_regs->ethintfc);
659
660 if(ovr & ETHINTFC_ovr_m) {
661 netif_stop_queue(dev);
662
663 /* clear OVR bit */
664 rc32434_writel((ovr & ~ETHINTFC_ovr_m), &lp->eth_regs->ethintfc);
665
666 /* Restart interface */
667 rc32434_restart(dev);
668 retval = IRQ_HANDLED;
669 }
670 spin_unlock(&lp->lock);
671
672 return retval;
673 }
674
675 #endif
676
677
678 /* Ethernet Tx Underflow interrupt */
679 static irqreturn_t
680 rc32434_und_interrupt(int irq, void *dev_id)
681 {
682 struct net_device *dev = (struct net_device *)dev_id;
683 struct rc32434_local *lp;
684 unsigned int und;
685 irqreturn_t retval = IRQ_NONE;
686
687 ASSERT(dev != NULL);
688
689 lp = (struct rc32434_local *)dev->priv;
690
691 spin_lock(&lp->lock);
692
693 und = rc32434_readl(&lp->eth_regs->ethintfc);
694
695 if(und & ETHINTFC_und_m) {
696 netif_stop_queue(dev);
697
698 rc32434_writel((und & ~ETHINTFC_und_m), &lp->eth_regs->ethintfc);
699
700 /* Restart interface */
701 rc32434_restart(dev);
702 retval = IRQ_HANDLED;
703 }
704
705 spin_unlock(&lp->lock);
706
707 return retval;
708 }
709
710
711 /* Ethernet Rx DMA interrupt */
712 static irqreturn_t
713 rc32434_rx_dma_interrupt(int irq, void *dev_id)
714 {
715 struct net_device *dev = (struct net_device *)dev_id;
716 struct rc32434_local* lp;
717 volatile u32 dmas,dmasm;
718 irqreturn_t retval;
719
720 ASSERT(dev != NULL);
721
722 lp = (struct rc32434_local *)dev->priv;
723
724 spin_lock(&lp->lock);
725 dmas = rc32434_readl(&lp->rx_dma_regs->dmas);
726 if(dmas & (DMAS_d_m|DMAS_h_m|DMAS_e_m)) {
727 /* Mask D H E bit in Rx DMA */
728 dmasm = rc32434_readl(&lp->rx_dma_regs->dmasm);
729 rc32434_writel(dmasm | (DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm);
730 #ifdef CONFIG_IDT_USE_NAPI
731 if(netif_rx_schedule_prep(dev))
732 __netif_rx_schedule(dev);
733 #else
734 tasklet_hi_schedule(lp->rx_tasklet);
735 #endif
736
737 if (dmas & DMAS_e_m)
738 ERR(": DMA error\n");
739
740 retval = IRQ_HANDLED;
741 }
742 else
743 retval = IRQ_NONE;
744
745 spin_unlock(&lp->lock);
746 return retval;
747 }
748
749 #ifdef CONFIG_IDT_USE_NAPI
750 static int rc32434_poll(struct net_device *rx_data_dev, int *budget)
751 #else
752 static void rc32434_rx_tasklet(unsigned long rx_data_dev)
753 #endif
754 {
755 struct net_device *dev = (struct net_device *)rx_data_dev;
756 struct rc32434_local* lp = netdev_priv(dev);
757 volatile DMAD_t rd = &lp->rd_ring[lp->rx_next_done];
758 struct sk_buff *skb, *skb_new;
759 u8* pkt_buf;
760 u32 devcs, count, pkt_len, pktuncrc_len;
761 volatile u32 dmas;
762 #ifdef CONFIG_IDT_USE_NAPI
763 u32 received = 0;
764 int rx_work_limit = min(*budget,dev->quota);
765 #else
766 unsigned long flags;
767 spin_lock_irqsave(&lp->lock, flags);
768 #endif
769
770 while ( (count = RC32434_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
771 #ifdef CONFIG_IDT_USE_NAPI
772 if(--rx_work_limit <0)
773 {
774 break;
775 }
776 #endif
777 /* init the var. used for the later operations within the while loop */
778 skb_new = NULL;
779 devcs = rd->devcs;
780 pkt_len = RCVPKT_LENGTH(devcs);
781 skb = lp->rx_skb[lp->rx_next_done];
782
783 if (count < 64) {
784 lp->stats.rx_errors++;
785 lp->stats.rx_dropped++;
786 }
787 else if ((devcs & ( ETHRX_ld_m)) != ETHRX_ld_m) {
788 /* check that this is a whole packet */
789 /* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
790 lp->stats.rx_errors++;
791 lp->stats.rx_dropped++;
792 }
793 else if ( (devcs & ETHRX_rok_m) ) {
794
795 {
796 /* must be the (first and) last descriptor then */
797 pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
798
799 pktuncrc_len = pkt_len - 4;
800 /* invalidate the cache */
801 dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
802
803 /* Malloc up new buffer. */
804 skb_new = dev_alloc_skb(RC32434_RBSIZE + 2);
805
806 if (skb_new != NULL){
807 /* Make room */
808 skb_put(skb, pktuncrc_len);
809
810 skb->protocol = eth_type_trans(skb, dev);
811
812 /* pass the packet to upper layers */
813 #ifdef CONFIG_IDT_USE_NAPI
814 netif_receive_skb(skb);
815 #else
816 netif_rx(skb);
817 #endif
818
819 dev->last_rx = jiffies;
820 lp->stats.rx_packets++;
821 lp->stats.rx_bytes += pktuncrc_len;
822
823 if (IS_RCV_MP(devcs))
824 lp->stats.multicast++;
825
826 /* 16 bit align */
827 skb_reserve(skb_new, 2);
828
829 skb_new->dev = dev;
830 lp->rx_skb[lp->rx_next_done] = skb_new;
831 }
832 else {
833 ERR("no memory, dropping rx packet.\n");
834 lp->stats.rx_errors++;
835 lp->stats.rx_dropped++;
836 }
837 }
838
839 }
840 else {
841 /* This should only happen if we enable accepting broken packets */
842 lp->stats.rx_errors++;
843 lp->stats.rx_dropped++;
844
845 /* add statistics counters */
846 if (IS_RCV_CRC_ERR(devcs)) {
847 DBG(2, "RX CRC error\n");
848 lp->stats.rx_crc_errors++;
849 }
850 else if (IS_RCV_LOR_ERR(devcs)) {
851 DBG(2, "RX LOR error\n");
852 lp->stats.rx_length_errors++;
853 }
854 else if (IS_RCV_LE_ERR(devcs)) {
855 DBG(2, "RX LE error\n");
856 lp->stats.rx_length_errors++;
857 }
858 else if (IS_RCV_OVR_ERR(devcs)) {
859 lp->stats.rx_over_errors++;
860 }
861 else if (IS_RCV_CV_ERR(devcs)) {
862 /* code violation */
863 DBG(2, "RX CV error\n");
864 lp->stats.rx_frame_errors++;
865 }
866 else if (IS_RCV_CES_ERR(devcs)) {
867 DBG(2, "RX Preamble error\n");
868 }
869 }
870
871 rd->devcs = 0;
872
873 /* restore descriptor's curr_addr */
874 if(skb_new)
875 rd->ca = CPHYSADDR(skb_new->data);
876 else
877 rd->ca = CPHYSADDR(skb->data);
878
879 rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
880 lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &= ~(DMAD_cod_m);
881
882 lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
883 rd = &lp->rd_ring[lp->rx_next_done];
884 rc32434_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
885 }
886 #ifdef CONFIG_IDT_USE_NAPI
887 dev->quota -= received;
888 *budget =- received;
889 if(rx_work_limit < 0)
890 goto not_done;
891 #endif
892
893 dmas = rc32434_readl(&lp->rx_dma_regs->dmas);
894
895 if(dmas & DMAS_h_m) {
896 rc32434_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
897 #ifdef RC32434_PROC_DEBUG
898 lp->dma_halt_cnt++;
899 #endif
900 rd->devcs = 0;
901 skb = lp->rx_skb[lp->rx_next_done];
902 rd->ca = CPHYSADDR(skb->data);
903 rc32434_chain_rx(lp,rd);
904 }
905
906 #ifdef CONFIG_IDT_USE_NAPI
907 netif_rx_complete(dev);
908 #endif
909 /* Enable D H E bit in Rx DMA */
910 rc32434_writel(rc32434_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m |DMASM_e_m), &lp->rx_dma_regs->dmasm);
911 #ifdef CONFIG_IDT_USE_NAPI
912 return 0;
913 not_done:
914 return 1;
915 #else
916 spin_unlock_irqrestore(&lp->lock, flags);
917 return;
918 #endif
919
920
921 }
922
923
924
925 /* Ethernet Tx DMA interrupt */
926 static irqreturn_t
927 rc32434_tx_dma_interrupt(int irq, void *dev_id)
928 {
929 struct net_device *dev = (struct net_device *)dev_id;
930 struct rc32434_local *lp;
931 volatile u32 dmas,dmasm;
932 irqreturn_t retval;
933
934 ASSERT(dev != NULL);
935
936 lp = (struct rc32434_local *)dev->priv;
937
938 spin_lock(&lp->lock);
939
940 dmas = rc32434_readl(&lp->tx_dma_regs->dmas);
941
942 if (dmas & (DMAS_f_m | DMAS_e_m)) {
943 dmasm = rc32434_readl(&lp->tx_dma_regs->dmasm);
944 /* Mask F E bit in Tx DMA */
945 rc32434_writel(dmasm | (DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm);
946
947 tasklet_hi_schedule(lp->tx_tasklet);
948
949 if(lp->tx_chain_status == filled && (rc32434_readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
950 rc32434_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr));
951 lp->tx_chain_status = empty;
952 lp->tx_chain_head = lp->tx_chain_tail;
953 dev->trans_start = jiffies;
954 }
955
956 if (dmas & DMAS_e_m)
957 ERR(": DMA error\n");
958
959 retval = IRQ_HANDLED;
960 }
961 else
962 retval = IRQ_NONE;
963
964 spin_unlock(&lp->lock);
965
966 return retval;
967 }
968
969
970 static void rc32434_tx_tasklet(unsigned long tx_data_dev)
971 {
972 struct net_device *dev = (struct net_device *)tx_data_dev;
973 struct rc32434_local* lp = (struct rc32434_local *)dev->priv;
974 volatile DMAD_t td = &lp->td_ring[lp->tx_next_done];
975 u32 devcs;
976 unsigned long flags;
977 volatile u32 dmas;
978
979 spin_lock_irqsave(&lp->lock, flags);
980
981 /* process all desc that are done */
982 while(IS_DMA_FINISHED(td->control)) {
983 if(lp->tx_full == 1) {
984 netif_wake_queue(dev);
985 lp->tx_full = 0;
986 }
987
988 devcs = lp->td_ring[lp->tx_next_done].devcs;
989 if ((devcs & (ETHTX_fd_m | ETHTX_ld_m)) != (ETHTX_fd_m | ETHTX_ld_m)) {
990 lp->stats.tx_errors++;
991 lp->stats.tx_dropped++;
992
993 /* should never happen */
994 DBG(1, __FUNCTION__ ": split tx ignored\n");
995 }
996 else if (IS_TX_TOK(devcs)) {
997 lp->stats.tx_packets++;
998 }
999 else {
1000 lp->stats.tx_errors++;
1001 lp->stats.tx_dropped++;
1002
1003 /* underflow */
1004 if (IS_TX_UND_ERR(devcs))
1005 lp->stats.tx_fifo_errors++;
1006
1007 /* oversized frame */
1008 if (IS_TX_OF_ERR(devcs))
1009 lp->stats.tx_aborted_errors++;
1010
1011 /* excessive deferrals */
1012 if (IS_TX_ED_ERR(devcs))
1013 lp->stats.tx_carrier_errors++;
1014
1015 /* collisions: medium busy */
1016 if (IS_TX_EC_ERR(devcs))
1017 lp->stats.collisions++;
1018
1019 /* late collision */
1020 if (IS_TX_LC_ERR(devcs))
1021 lp->stats.tx_window_errors++;
1022
1023 }
1024
1025 /* We must always free the original skb */
1026 if (lp->tx_skb[lp->tx_next_done] != NULL) {
1027 dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
1028 lp->tx_skb[lp->tx_next_done] = NULL;
1029 }
1030
1031 lp->td_ring[lp->tx_next_done].control = DMAD_iof_m;
1032 lp->td_ring[lp->tx_next_done].devcs = ETHTX_fd_m | ETHTX_ld_m;
1033 lp->td_ring[lp->tx_next_done].link = 0;
1034 lp->td_ring[lp->tx_next_done].ca = 0;
1035 lp->tx_count --;
1036
1037 /* go on to next transmission */
1038 lp->tx_next_done = (lp->tx_next_done + 1) & RC32434_TDS_MASK;
1039 td = &lp->td_ring[lp->tx_next_done];
1040
1041 }
1042
1043 dmas = rc32434_readl(&lp->tx_dma_regs->dmas);
1044 rc32434_writel( ~dmas, &lp->tx_dma_regs->dmas);
1045
1046 /* Enable F E bit in Tx DMA */
1047 rc32434_writel(rc32434_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm);
1048 spin_unlock_irqrestore(&lp->lock, flags);
1049
1050 }
1051
1052
1053 static struct net_device_stats * rc32434_get_stats(struct net_device *dev)
1054 {
1055 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
1056 return &lp->stats;
1057 }
1058
1059
1060 /*
1061 * Set or clear the multicast filter for this adaptor.
1062 */
1063 static void rc32434_multicast_list(struct net_device *dev)
1064 {
1065 /* listen to broadcasts always and to treat */
1066 /* IFF bits independantly */
1067 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
1068 unsigned long flags;
1069 u32 recognise = ETHARC_ab_m; /* always accept broadcasts */
1070
1071 if (dev->flags & IFF_PROMISC) /* set promiscuous mode */
1072 recognise |= ETHARC_pro_m;
1073
1074 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15))
1075 recognise |= ETHARC_am_m; /* all multicast & bcast */
1076 else if (dev->mc_count > 0) {
1077 DBG(2, __FUNCTION__ ": mc_count %d\n", dev->mc_count);
1078 recognise |= ETHARC_am_m; /* for the time being */
1079 }
1080
1081 spin_lock_irqsave(&lp->lock, flags);
1082 rc32434_writel(recognise, &lp->eth_regs->etharc);
1083 spin_unlock_irqrestore(&lp->lock, flags);
1084 }
1085
1086
1087 static void rc32434_tx_timeout(struct net_device *dev)
1088 {
1089 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
1090 unsigned long flags;
1091
1092 spin_lock_irqsave(&lp->lock, flags);
1093 rc32434_restart(dev);
1094 spin_unlock_irqrestore(&lp->lock, flags);
1095
1096 }
1097
1098
1099 /*
1100 * Initialize the RC32434 ethernet controller.
1101 */
1102 static int rc32434_init(struct net_device *dev)
1103 {
1104 struct rc32434_local *lp = (struct rc32434_local *)dev->priv;
1105 int i, j;
1106
1107 /* Disable DMA */
1108 rc32434_abort_tx(dev);
1109 rc32434_abort_rx(dev);
1110
1111 /* reset ethernet logic */
1112 rc32434_writel(0, &lp->eth_regs->ethintfc);
1113 while((rc32434_readl(&lp->eth_regs->ethintfc) & ETHINTFC_rip_m))
1114 dev->trans_start = jiffies;
1115
1116 /* Enable Ethernet Interface */
1117 rc32434_writel(ETHINTFC_en_m, &lp->eth_regs->ethintfc);
1118
1119 #ifndef CONFIG_IDT_USE_NAPI
1120 tasklet_disable(lp->rx_tasklet);
1121 #endif
1122 tasklet_disable(lp->tx_tasklet);
1123
1124 /* Initialize the transmit Descriptors */
1125 for (i = 0; i < RC32434_NUM_TDS; i++) {
1126 lp->td_ring[i].control = DMAD_iof_m;
1127 lp->td_ring[i].devcs = ETHTX_fd_m | ETHTX_ld_m;
1128 lp->td_ring[i].ca = 0;
1129 lp->td_ring[i].link = 0;
1130 if (lp->tx_skb[i] != NULL) {
1131 dev_kfree_skb_any(lp->tx_skb[i]);
1132 lp->tx_skb[i] = NULL;
1133 }
1134 }
1135 lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail = lp->tx_full = lp->tx_count = 0;
1136 lp-> tx_chain_status = empty;
1137
1138 /*
1139 * Initialize the receive descriptors so that they
1140 * become a circular linked list, ie. let the last
1141 * descriptor point to the first again.
1142 */
1143 for (i=0; i<RC32434_NUM_RDS; i++) {
1144 struct sk_buff *skb = lp->rx_skb[i];
1145
1146 if (lp->rx_skb[i] == NULL) {
1147 skb = dev_alloc_skb(RC32434_RBSIZE + 2);
1148 if (skb == NULL) {
1149 ERR("No memory in the system\n");
1150 for (j = 0; j < RC32434_NUM_RDS; j ++)
1151 if (lp->rx_skb[j] != NULL)
1152 dev_kfree_skb_any(lp->rx_skb[j]);
1153
1154 return 1;
1155 }
1156 else {
1157 skb->dev = dev;
1158 skb_reserve(skb, 2);
1159 lp->rx_skb[i] = skb;
1160 lp->rd_ring[i].ca = CPHYSADDR(skb->data);
1161
1162 }
1163 }
1164 lp->rd_ring[i].control = DMAD_iod_m | DMA_COUNT(RC32434_RBSIZE);
1165 lp->rd_ring[i].devcs = 0;
1166 lp->rd_ring[i].ca = CPHYSADDR(skb->data);
1167 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
1168
1169 }
1170 /* loop back */
1171 lp->rd_ring[RC32434_NUM_RDS-1].link = CPHYSADDR(&lp->rd_ring[0]);
1172 lp->rx_next_done = 0;
1173
1174 lp->rd_ring[RC32434_NUM_RDS-1].control |= DMAD_cod_m;
1175 lp->rx_chain_head = 0;
1176 lp->rx_chain_tail = 0;
1177 lp->rx_chain_status = empty;
1178
1179 rc32434_writel(0, &lp->rx_dma_regs->dmas);
1180 /* Start Rx DMA */
1181 rc32434_start_rx(lp, &lp->rd_ring[0]);
1182
1183 /* Enable F E bit in Tx DMA */
1184 rc32434_writel(rc32434_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm);
1185 /* Enable D H E bit in Rx DMA */
1186 rc32434_writel(rc32434_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm);
1187
1188 /* Accept only packets destined for this Ethernet device address */
1189 rc32434_writel(ETHARC_ab_m, &lp->eth_regs->etharc);
1190
1191 /* Set all Ether station address registers to their initial values */
1192 rc32434_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
1193 rc32434_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
1194
1195 rc32434_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
1196 rc32434_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
1197
1198 rc32434_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
1199 rc32434_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
1200
1201 rc32434_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
1202 rc32434_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
1203
1204
1205 /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
1206 rc32434_writel(ETHMAC2_pe_m | ETHMAC2_cen_m | ETHMAC2_fd_m, &lp->eth_regs->ethmac2);
1207 //ETHMAC2_flc_m ETHMAC2_fd_m lp->duplex_mode
1208
1209 /* Back to back inter-packet-gap */
1210 rc32434_writel(0x15, &lp->eth_regs->ethipgt);
1211 /* Non - Back to back inter-packet-gap */
1212 rc32434_writel(0x12, &lp->eth_regs->ethipgr);
1213
1214 /* Management Clock Prescaler Divisor */
1215 /* Clock independent setting */
1216 rc32434_writel(((idt_cpu_freq)/MII_CLOCK+1) & ~1,
1217 &lp->eth_regs->ethmcp);
1218
1219 /* don't transmit until fifo contains 48b */
1220 rc32434_writel(48, &lp->eth_regs->ethfifott);
1221
1222 rc32434_writel(ETHMAC1_re_m, &lp->eth_regs->ethmac1);
1223
1224 #ifndef CONFIG_IDT_USE_NAPI
1225 tasklet_enable(lp->rx_tasklet);
1226 #endif
1227 tasklet_enable(lp->tx_tasklet);
1228
1229 netif_start_queue(dev);
1230
1231
1232 return 0;
1233
1234 }
1235
1236
1237 #ifndef MODULE
1238
1239 static int __init rc32434_setup(char *options)
1240 {
1241 /* no options yet */
1242 return 1;
1243 }
1244
1245 static int __init rc32434_setup_ethaddr0(char *options)
1246 {
1247 memcpy(mac0, options, 17);
1248 mac0[17]= '\0';
1249 return 1;
1250 }
1251
1252 __setup("rc32434eth=", rc32434_setup);
1253 __setup("ethaddr0=", rc32434_setup_ethaddr0);
1254
1255
1256 #endif /* MODULE */
1257
1258 module_init(rc32434_init_module);
1259 module_exit(rc32434_cleanup_module);
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
This page took 0.139714 seconds and 5 git commands to generate.