2 * ar2313.c: Linux driver for the Atheros AR231x Ethernet device.
4 * Copyright (C) 2004 by Sameer Dekate <sdekate@arubanetworks.com>
5 * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
6 * Copyright (C) 2006-2007 Felix Fietkau <nbd@openwrt.org>
8 * Thanks to Atheros for providing hardware and documentation
9 * enabling me to write this driver.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
17 * This code is taken from John Taylor's Sibyte driver and then
18 * modified for the AR2313.
21 #include <linux/autoconf.h>
22 #include <linux/module.h>
23 #include <linux/version.h>
24 #include <linux/types.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
34 #include <linux/highmem.h>
35 #include <linux/sockios.h>
36 #include <linux/pkt_sched.h>
37 #include <linux/compile.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/ctype.h>
41 #include <linux/platform_device.h>
46 #include <asm/system.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51 #include <asm/bootinfo.h>
53 #define AR2313_MTU 1692
54 #define AR2313_PRIOS 1
55 #define AR2313_QUEUES (2*AR2313_PRIOS)
56 #define AR2313_DESCR_ENTRIES 64
67 #define min(a,b) (((a)<(b))?(a):(b))
70 #ifndef SMP_CACHE_BYTES
71 #define SMP_CACHE_BYTES L1_CACHE_BYTES
74 #define AR2313_MBOX_SET_BIT 0x8
76 #define BOARD_IDX_STATIC 0
77 #define BOARD_IDX_OVERFLOW -1
83 * New interrupt handler strategy:
85 * An old interrupt handler worked using the traditional method of
86 * replacing an skbuff with a new one when a packet arrives. However
87 * the rx rings do not need to contain a static number of buffer
88 * descriptors, thus it makes sense to move the memory allocation out
89 * of the main interrupt handler and do it in a bottom half handler
90 * and only allocate new buffers when the number of buffers in the
91 * ring is below a certain threshold. In order to avoid starving the
92 * NIC under heavy load it is however necessary to force allocation
93 * when hitting a minimum threshold. The strategy for alloction is as
96 * RX_LOW_BUF_THRES - allocate buffers in the bottom half
97 * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
98 * the buffers in the interrupt handler
99 * RX_RING_THRES - maximum number of buffers in the rx ring
101 * One advantagous side effect of this allocation approach is that the
102 * entire rx processing can be done without holding any spin lock
103 * since the rx rings and registers are totally independent of the tx
104 * ring and its registers. This of course includes the kmalloc's of
105 * new skb's. Thus start_xmit can run in parallel with rx processing
106 * and the memory allocation on SMP systems.
108 * Note that running the skb reallocation in a bottom half opens up
109 * another can of races which needs to be handled properly. In
110 * particular it can happen that the interrupt handler tries to run
111 * the reallocation while the bottom half is either running on another
112 * CPU or was interrupted on the same CPU. To get around this the
113 * driver uses bitops to prevent the reallocation routines from being
116 * TX handling can also be done without holding any spin lock, wheee
117 * this is fun! since tx_csm is only written to by the interrupt
122 * Threshold values for RX buffer allocation - the low water marks for
123 * when to start refilling the rings are set to 75% of the ring
124 * sizes. It seems to make sense to refill the rings entirely from the
125 * intrrupt handler once it gets below the panic threshold, that way
126 * we don't risk that the refilling is moved to another CPU when the
127 * one running the interrupt handler just got the slab code hot in its
130 #define RX_RING_SIZE AR2313_DESCR_ENTRIES
131 #define RX_PANIC_THRES (RX_RING_SIZE/4)
132 #define RX_LOW_THRES ((3*RX_RING_SIZE)/4)
136 #define AR2313_BUFSIZE (AR2313_MTU + ETH_HLEN + CRC_LEN + RX_OFFSET)
139 MODULE_AUTHOR("Sameer Dekate <sdekate@arubanetworks.com>, Imre Kaloz <kaloz@openwrt.org>, Felix Fietkau <nbd@openwrt.org>");
140 MODULE_DESCRIPTION("AR2313 Ethernet driver");
143 #define virt_to_phys(x) ((u32)(x) & 0x1fffffff)
146 static short armiiread(struct net_device
*dev
, short phy
, short reg
);
147 static void armiiwrite(struct net_device
*dev
, short phy
, short reg
, short data
);
149 static void ar2313_tx_timeout(struct net_device
*dev
);
151 static void ar2313_halt(struct net_device
*dev
);
152 static void rx_tasklet_func(unsigned long data
);
153 static void ar2313_multicast_list(struct net_device
*dev
);
156 #define ERR(fmt, args...) printk("%s: " fmt, __func__, ##args)
160 int __init
ar2313_probe(struct platform_device
*pdev
)
162 struct net_device
*dev
;
163 struct ar2313_private
*sp
;
164 struct resource
*res
;
165 unsigned long ar_eth_base
;
168 dev
= alloc_etherdev(sizeof(struct ar2313_private
));
171 printk(KERN_ERR
"ar2313: Unable to allocate net_device structure!\n");
175 SET_MODULE_OWNER(dev
);
176 platform_set_drvdata(pdev
, dev
);
180 sp
->cfg
= pdev
->dev
.platform_data
;
182 sprintf(buf
, "eth%d_membase", pdev
->id
);
183 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, buf
);
188 ar_eth_base
= res
->start
;
189 sp
->phy
= sp
->cfg
->phy
;
191 sprintf(buf
, "eth%d_irq", pdev
->id
);
192 dev
->irq
= platform_get_irq_byname(pdev
, buf
);
194 spin_lock_init(&sp
->lock
);
196 /* initialize func pointers */
197 dev
->open
= &ar2313_open
;
198 dev
->stop
= &ar2313_close
;
199 dev
->hard_start_xmit
= &ar2313_start_xmit
;
201 dev
->get_stats
= &ar2313_get_stats
;
202 dev
->set_multicast_list
= &ar2313_multicast_list
;
204 dev
->tx_timeout
= ar2313_tx_timeout
;
205 dev
->watchdog_timeo
= AR2313_TX_TIMEOUT
;
207 dev
->do_ioctl
= &ar2313_ioctl
;
209 // SAMEER: do we need this?
210 dev
->features
|= NETIF_F_SG
| NETIF_F_HIGHDMA
;
212 tasklet_init(&sp
->rx_tasklet
, rx_tasklet_func
, (unsigned long) dev
);
213 tasklet_disable(&sp
->rx_tasklet
);
215 sp
->eth_regs
= ioremap_nocache(virt_to_phys(ar_eth_base
), sizeof(*sp
->eth_regs
));
217 printk("Can't remap eth registers\n");
222 * When there's only one MAC, PHY regs are typically on ENET0,
223 * even though the MAC might be on ENET1.
224 * Needto remap PHY regs separately in this case
226 if (virt_to_phys(ar_eth_base
) == virt_to_phys(sp
->phy_regs
))
227 sp
->phy_regs
= sp
->eth_regs
;
229 sp
->phy_regs
= ioremap_nocache(virt_to_phys(sp
->cfg
->phy_base
), sizeof(*sp
->phy_regs
));
231 printk("Can't remap phy registers\n");
236 sp
->dma_regs
= ioremap_nocache(virt_to_phys(ar_eth_base
+ 0x1000), sizeof(*sp
->dma_regs
));
237 dev
->base_addr
= (unsigned int) sp
->dma_regs
;
239 printk("Can't remap DMA registers\n");
243 sp
->int_regs
= ioremap_nocache(virt_to_phys(sp
->cfg
->reset_base
), 4);
245 printk("Can't remap INTERRUPT registers\n");
249 strncpy(sp
->name
, "Atheros AR231x", sizeof (sp
->name
) - 1);
250 sp
->name
[sizeof (sp
->name
) - 1] = '\0';
251 memcpy(dev
->dev_addr
, sp
->cfg
->macaddr
, 6);
252 sp
->board_idx
= BOARD_IDX_STATIC
;
254 if (ar2313_init(dev
)) {
256 * ar2313_init() calls ar2313_init_cleanup() on error.
262 if (register_netdev(dev
)){
263 printk("%s: register_netdev failed\n", __func__
);
267 printk("%s: %s: %02x:%02x:%02x:%02x:%02x:%02x, irq %d\n",
269 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
270 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5],
273 /* start link poll timer */
274 ar2313_setup_timer(dev
);
280 static void ar2313_dump_regs(struct net_device
*dev
)
282 unsigned int *ptr
, i
;
283 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
285 ptr
= (unsigned int *)sp
->eth_regs
;
286 for(i
=0; i
< (sizeof(ETHERNET_STRUCT
)/ sizeof(unsigned int)); i
++, ptr
++) {
287 printk("ENET: %08x = %08x\n", (int)ptr
, *ptr
);
290 ptr
= (unsigned int *)sp
->dma_regs
;
291 for(i
=0; i
< (sizeof(DMA
)/ sizeof(unsigned int)); i
++, ptr
++) {
292 printk("DMA: %08x = %08x\n", (int)ptr
, *ptr
);
295 ptr
= (unsigned int *)sp
->int_regs
;
296 for(i
=0; i
< (sizeof(INTERRUPT
)/ sizeof(unsigned int)); i
++, ptr
++){
297 printk("INT: %08x = %08x\n", (int)ptr
, *ptr
);
300 for (i
= 0; i
< AR2313_DESCR_ENTRIES
; i
++) {
301 ar2313_descr_t
*td
= &sp
->tx_ring
[i
];
302 printk("Tx desc %2d: %08x %08x %08x %08x\n", i
,
303 td
->status
, td
->devcs
, td
->addr
, td
->descr
);
310 ar2313_tx_timeout(struct net_device
*dev
)
312 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
316 printk("Tx timeout\n");
318 spin_lock_irqsave(&sp
->lock
, flags
);
320 spin_unlock_irqrestore(&sp
->lock
, flags
);
326 printMcList(struct net_device
*dev
)
328 struct dev_mc_list
*list
= dev
->mc_list
;
331 printk("%d MC ADDR ", num
);
332 for(i
=0;i
<list
->dmi_addrlen
;i
++) {
333 printk(":%02x", list
->dmi_addr
[i
]);
342 * Set or clear the multicast filter for this adaptor.
343 * THIS IS ABSOLUTE CRAP, disabled
346 ar2313_multicast_list(struct net_device
*dev
)
349 * Always listen to broadcasts and
350 * treat IFF bits independently
352 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
353 unsigned int recognise
;
355 recognise
= sp
->eth_regs
->mac_control
;
357 if (dev
->flags
& IFF_PROMISC
) { /* set promiscuous mode */
358 recognise
|= MAC_CONTROL_PR
;
360 recognise
&= ~MAC_CONTROL_PR
;
363 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 15)) {
366 printk("%s: all MULTICAST mc_count %d\n", __FUNCTION__
, dev
->mc_count
);
368 recognise
|= MAC_CONTROL_PM
;/* all multicast */
369 } else if (dev
->mc_count
> 0) {
372 printk("%s: mc_count %d\n", __FUNCTION__
, dev
->mc_count
);
374 recognise
|= MAC_CONTROL_PM
; /* for the time being */
377 printk("%s: setting %08x to %08x\n", __FUNCTION__
, (int)sp
->eth_regs
, recognise
);
380 sp
->eth_regs
->mac_control
= recognise
;
383 static void rx_tasklet_cleanup(struct net_device
*dev
)
385 struct ar2313_private
*sp
= dev
->priv
;
388 * Tasklet may be scheduled. Need to get it removed from the list
389 * since we're about to free the struct.
393 tasklet_enable(&sp
->rx_tasklet
);
394 tasklet_kill(&sp
->rx_tasklet
);
397 static int __exit
ar2313_remove(struct platform_device
*pdev
)
399 struct net_device
*dev
= platform_get_drvdata(pdev
);
400 rx_tasklet_cleanup(dev
);
401 ar2313_init_cleanup(dev
);
402 unregister_netdev(dev
);
409 * Restart the AR2313 ethernet controller.
411 static int ar2313_restart(struct net_device
*dev
)
413 /* disable interrupts */
414 disable_irq(dev
->irq
);
422 /* enable interrupts */
423 enable_irq(dev
->irq
);
428 static struct platform_driver ar2313_driver
= {
429 .driver
.name
= "ar531x-eth",
430 .probe
= ar2313_probe
,
431 .remove
= ar2313_remove
,
434 int __init
ar2313_module_init(void)
436 return platform_driver_register(&ar2313_driver
);
439 void __exit
ar2313_module_cleanup(void)
441 platform_driver_unregister(&ar2313_driver
);
444 module_init(ar2313_module_init
);
445 module_exit(ar2313_module_cleanup
);
448 static void ar2313_free_descriptors(struct net_device
*dev
)
450 struct ar2313_private
*sp
= dev
->priv
;
451 if (sp
->rx_ring
!= NULL
) {
452 kfree((void*)KSEG0ADDR(sp
->rx_ring
));
459 static int ar2313_allocate_descriptors(struct net_device
*dev
)
461 struct ar2313_private
*sp
= dev
->priv
;
464 ar2313_descr_t
*space
;
466 if(sp
->rx_ring
!= NULL
){
467 printk("%s: already done.\n", __FUNCTION__
);
471 size
= (sizeof(ar2313_descr_t
) * (AR2313_DESCR_ENTRIES
* AR2313_QUEUES
));
472 space
= kmalloc(size
, GFP_KERNEL
);
476 /* invalidate caches */
477 dma_cache_inv((unsigned int)space
, size
);
479 /* now convert pointer to KSEG1 */
480 space
= (ar2313_descr_t
*)KSEG1ADDR(space
);
482 memset((void *)space
, 0, size
);
485 space
+= AR2313_DESCR_ENTRIES
;
488 space
+= AR2313_DESCR_ENTRIES
;
490 /* Initialize the transmit Descriptors */
491 for (j
= 0; j
< AR2313_DESCR_ENTRIES
; j
++) {
492 ar2313_descr_t
*td
= &sp
->tx_ring
[j
];
494 td
->devcs
= DMA_TX1_CHAINED
;
496 td
->descr
= virt_to_phys(&sp
->tx_ring
[(j
+1) & (AR2313_DESCR_ENTRIES
-1)]);
504 * Generic cleanup handling data allocated during init. Used when the
505 * module is unloaded or if an error occurs during initialization
507 static void ar2313_init_cleanup(struct net_device
*dev
)
509 struct ar2313_private
*sp
= dev
->priv
;
513 ar2313_free_descriptors(dev
);
515 if (sp
->eth_regs
) iounmap((void*)sp
->eth_regs
);
516 if (sp
->dma_regs
) iounmap((void*)sp
->dma_regs
);
519 for (j
= 0; j
< AR2313_DESCR_ENTRIES
; j
++) {
522 sp
->rx_skb
[j
] = NULL
;
531 for (j
= 0; j
< AR2313_DESCR_ENTRIES
; j
++) {
534 sp
->tx_skb
[j
] = NULL
;
543 static int ar2313_setup_timer(struct net_device
*dev
)
545 struct ar2313_private
*sp
= dev
->priv
;
547 init_timer(&sp
->link_timer
);
549 sp
->link_timer
.function
= ar2313_link_timer_fn
;
550 sp
->link_timer
.data
= (int) dev
;
551 sp
->link_timer
.expires
= jiffies
+ HZ
;
553 add_timer(&sp
->link_timer
);
558 static void ar2313_link_timer_fn(unsigned long data
)
560 struct net_device
*dev
= (struct net_device
*) data
;
561 struct ar2313_private
*sp
= dev
->priv
;
563 // see if the link status changed
564 // This was needed to make sure we set the PHY to the
565 // autonegotiated value of half or full duplex.
566 ar2313_check_link(dev
);
568 // Loop faster when we don't have link.
569 // This was needed to speed up the AP bootstrap time.
571 mod_timer(&sp
->link_timer
, jiffies
+ HZ
/2);
573 mod_timer(&sp
->link_timer
, jiffies
+ LINK_TIMER
);
577 static void ar2313_check_link(struct net_device
*dev
)
579 struct ar2313_private
*sp
= dev
->priv
;
582 phyData
= armiiread(dev
, sp
->phy
, MII_BMSR
);
583 if (sp
->phyData
!= phyData
) {
584 if (phyData
& BMSR_LSTATUS
) {
585 /* link is present, ready link partner ability to deterine duplexity */
590 reg
= armiiread(dev
, sp
->phy
, MII_BMCR
);
591 if (reg
& BMCR_ANENABLE
) {
592 /* auto neg enabled */
593 reg
= armiiread(dev
, sp
->phy
, MII_LPA
);
594 duplex
= (reg
& (LPA_100FULL
|LPA_10FULL
))? 1:0;
596 /* no auto neg, just read duplex config */
597 duplex
= (reg
& BMCR_FULLDPLX
)? 1:0;
600 printk(KERN_INFO
"%s: Configuring MAC for %s duplex\n", dev
->name
,
601 (duplex
)? "full":"half");
605 sp
->eth_regs
->mac_control
= ((sp
->eth_regs
->mac_control
| MAC_CONTROL_F
) &
609 sp
->eth_regs
->mac_control
= ((sp
->eth_regs
->mac_control
| MAC_CONTROL_DRO
) &
616 sp
->phyData
= phyData
;
621 ar2313_reset_reg(struct net_device
*dev
)
623 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
624 unsigned int ethsal
, ethsah
;
627 *sp
->int_regs
|= sp
->cfg
->reset_mac
;
629 *sp
->int_regs
&= ~sp
->cfg
->reset_mac
;
631 *sp
->int_regs
|= sp
->cfg
->reset_phy
;
633 *sp
->int_regs
&= ~sp
->cfg
->reset_phy
;
636 sp
->dma_regs
->bus_mode
= (DMA_BUS_MODE_SWR
);
638 sp
->dma_regs
->bus_mode
= ((32 << DMA_BUS_MODE_PBL_SHIFT
) | DMA_BUS_MODE_BLE
);
640 /* enable interrupts */
641 sp
->dma_regs
->intr_ena
= (DMA_STATUS_AIS
|
646 sp
->dma_regs
->xmt_base
= virt_to_phys(sp
->tx_ring
);
647 sp
->dma_regs
->rcv_base
= virt_to_phys(sp
->rx_ring
);
648 sp
->dma_regs
->control
= (DMA_CONTROL_SR
| DMA_CONTROL_ST
| DMA_CONTROL_SF
);
650 sp
->eth_regs
->flow_control
= (FLOW_CONTROL_FCE
);
651 sp
->eth_regs
->vlan_tag
= (0x8100);
653 /* Enable Ethernet Interface */
654 flags
= (MAC_CONTROL_TE
| /* transmit enable */
655 MAC_CONTROL_PM
| /* pass mcast */
656 MAC_CONTROL_F
| /* full duplex */
657 MAC_CONTROL_HBD
); /* heart beat disabled */
659 if (dev
->flags
& IFF_PROMISC
) { /* set promiscuous mode */
660 flags
|= MAC_CONTROL_PR
;
662 sp
->eth_regs
->mac_control
= flags
;
664 /* Set all Ethernet station address registers to their initial values */
665 ethsah
= ((((u_int
)(dev
->dev_addr
[5]) << 8) & (u_int
)0x0000FF00) |
666 (((u_int
)(dev
->dev_addr
[4]) << 0) & (u_int
)0x000000FF));
668 ethsal
= ((((u_int
)(dev
->dev_addr
[3]) << 24) & (u_int
)0xFF000000) |
669 (((u_int
)(dev
->dev_addr
[2]) << 16) & (u_int
)0x00FF0000) |
670 (((u_int
)(dev
->dev_addr
[1]) << 8) & (u_int
)0x0000FF00) |
671 (((u_int
)(dev
->dev_addr
[0]) << 0) & (u_int
)0x000000FF) );
673 sp
->eth_regs
->mac_addr
[0] = ethsah
;
674 sp
->eth_regs
->mac_addr
[1] = ethsal
;
682 static int ar2313_init(struct net_device
*dev
)
684 struct ar2313_private
*sp
= dev
->priv
;
688 * Allocate descriptors
690 if (ar2313_allocate_descriptors(dev
)) {
691 printk("%s: %s: ar2313_allocate_descriptors failed\n",
692 dev
->name
, __FUNCTION__
);
698 * Get the memory for the skb rings.
700 if(sp
->rx_skb
== NULL
) {
701 sp
->rx_skb
= kmalloc(sizeof(struct sk_buff
*) * AR2313_DESCR_ENTRIES
, GFP_KERNEL
);
703 printk("%s: %s: rx_skb kmalloc failed\n",
704 dev
->name
, __FUNCTION__
);
709 memset(sp
->rx_skb
, 0, sizeof(struct sk_buff
*) * AR2313_DESCR_ENTRIES
);
711 if(sp
->tx_skb
== NULL
) {
712 sp
->tx_skb
= kmalloc(sizeof(struct sk_buff
*) * AR2313_DESCR_ENTRIES
, GFP_KERNEL
);
714 printk("%s: %s: tx_skb kmalloc failed\n",
715 dev
->name
, __FUNCTION__
);
720 memset(sp
->tx_skb
, 0, sizeof(struct sk_buff
*) * AR2313_DESCR_ENTRIES
);
723 * Set tx_csm before we start receiving interrupts, otherwise
724 * the interrupt handler might think it is supposed to process
725 * tx ints before we are up and running, which may cause a null
726 * pointer access in the int handler.
734 * Zero the stats before starting the interface
736 memset(&sp
->stats
, 0, sizeof(sp
->stats
));
739 * We load the ring here as there seem to be no way to tell the
740 * firmware to wipe the ring without re-initializing it.
742 ar2313_load_rx_ring(dev
, RX_RING_SIZE
);
747 ar2313_reset_reg(dev
);
752 ecode
= request_irq(dev
->irq
, &ar2313_interrupt
, IRQF_SHARED
| IRQF_DISABLED
| IRQF_SAMPLE_RANDOM
, dev
->name
, dev
);
754 printk(KERN_WARNING
"%s: %s: Requested IRQ %d is busy\n",
755 dev
->name
, __FUNCTION__
, dev
->irq
);
760 tasklet_enable(&sp
->rx_tasklet
);
765 ar2313_init_cleanup(dev
);
772 * Loading rings is safe without holding the spin lock since this is
773 * done only before the device is enabled, thus no interrupts are
774 * generated and by the interrupt handler/tasklet handler.
776 static void ar2313_load_rx_ring(struct net_device
*dev
, int nr_bufs
)
779 struct ar2313_private
*sp
= ((struct net_device
*)dev
)->priv
;
784 for (i
= 0; i
< nr_bufs
; i
++) {
788 if (sp
->rx_skb
[idx
]) {
790 printk(KERN_INFO
"ar2313 rx refill full\n");
795 // partha: create additional room for the second GRE fragment
796 skb
= alloc_skb(AR2313_BUFSIZE
+128, GFP_ATOMIC
);
798 printk("\n\n\n\n %s: No memory in system\n\n\n\n", __FUNCTION__
);
801 // partha: create additional room in the front for tx pkt capture
802 skb_reserve(skb
, 32);
805 * Make sure IP header starts on a fresh cache line.
808 skb_reserve(skb
, RX_OFFSET
);
809 sp
->rx_skb
[idx
] = skb
;
811 rd
= (ar2313_descr_t
*) &sp
->rx_ring
[idx
];
813 /* initialize dma descriptor */
814 rd
->devcs
= ((AR2313_BUFSIZE
<< DMA_RX1_BSIZE_SHIFT
) |
816 rd
->addr
= virt_to_phys(skb
->data
);
817 rd
->descr
= virt_to_phys(&sp
->rx_ring
[(idx
+1) & (AR2313_DESCR_ENTRIES
-1)]);
818 rd
->status
= DMA_RX_OWN
;
825 printk(KERN_INFO
"Out of memory when allocating standard receive buffers\n");
834 #define AR2313_MAX_PKTS_PER_CALL 64
836 static int ar2313_rx_int(struct net_device
*dev
)
838 struct ar2313_private
*sp
= dev
->priv
;
839 struct sk_buff
*skb
, *skb_new
;
840 ar2313_descr_t
*rxdesc
;
848 /* process at most the entire ring and then wait for another interrupt */
851 rxdesc
= &sp
->rx_ring
[idx
];
852 status
= rxdesc
->status
;
853 if (status
& DMA_RX_OWN
) {
854 /* SiByte owns descriptor or descr not yet filled in */
859 if (++pkts
> AR2313_MAX_PKTS_PER_CALL
) {
865 printk("index %d\n", idx
);
866 printk("RX status %08x\n", rxdesc
->status
);
867 printk("RX devcs %08x\n", rxdesc
->devcs
);
868 printk("RX addr %08x\n", rxdesc
->addr
);
869 printk("RX descr %08x\n", rxdesc
->descr
);
872 if ((status
& (DMA_RX_ERROR
|DMA_RX_ERR_LENGTH
)) &&
873 (!(status
& DMA_RX_LONG
))){
875 printk("%s: rx ERROR %08x\n", __FUNCTION__
, status
);
877 sp
->stats
.rx_errors
++;
878 sp
->stats
.rx_dropped
++;
880 /* add statistics counters */
881 if (status
& DMA_RX_ERR_CRC
) sp
->stats
.rx_crc_errors
++;
882 if (status
& DMA_RX_ERR_COL
) sp
->stats
.rx_over_errors
++;
883 if (status
& DMA_RX_ERR_LENGTH
)
884 sp
->stats
.rx_length_errors
++;
885 if (status
& DMA_RX_ERR_RUNT
) sp
->stats
.rx_over_errors
++;
886 if (status
& DMA_RX_ERR_DESC
) sp
->stats
.rx_over_errors
++;
889 /* alloc new buffer. */
890 skb_new
= dev_alloc_skb(AR2313_BUFSIZE
+ RX_OFFSET
+ 128);
891 if (skb_new
!= NULL
) {
893 skb
= sp
->rx_skb
[idx
];
895 skb_put(skb
, ((status
>> DMA_RX_LEN_SHIFT
) & 0x3fff) - CRC_LEN
);
897 sp
->stats
.rx_bytes
+= skb
->len
;
898 skb
->protocol
= eth_type_trans(skb
, dev
);
899 /* pass the packet to upper layers */
904 skb_reserve(skb_new
, RX_OFFSET
+32);
905 /* reset descriptor's curr_addr */
906 rxdesc
->addr
= virt_to_phys(skb_new
->data
);
908 sp
->stats
.rx_packets
++;
909 sp
->rx_skb
[idx
] = skb_new
;
911 sp
->stats
.rx_dropped
++;
915 rxdesc
->devcs
= ((AR2313_BUFSIZE
<< DMA_RX1_BSIZE_SHIFT
) |
917 rxdesc
->status
= DMA_RX_OWN
;
928 static void ar2313_tx_int(struct net_device
*dev
)
930 struct ar2313_private
*sp
= dev
->priv
;
933 ar2313_descr_t
*txdesc
;
934 unsigned int status
=0;
938 while (idx
!= sp
->tx_prd
) {
940 txdesc
= &sp
->tx_ring
[idx
];
943 printk("%s: TXINT: csm=%d idx=%d prd=%d status=%x devcs=%x addr=%08x descr=%x\n",
944 dev
->name
, sp
->tx_csm
, idx
, sp
->tx_prd
,
945 txdesc
->status
, txdesc
->devcs
, txdesc
->addr
, txdesc
->descr
);
948 if ((status
= txdesc
->status
) & DMA_TX_OWN
) {
949 /* ar2313 dma still owns descr */
952 /* done with this descriptor */
953 dma_unmap_single(NULL
, txdesc
->addr
, txdesc
->devcs
& DMA_TX1_BSIZE_MASK
, DMA_TO_DEVICE
);
956 if (status
& DMA_TX_ERROR
){
957 sp
->stats
.tx_errors
++;
958 sp
->stats
.tx_dropped
++;
959 if(status
& DMA_TX_ERR_UNDER
)
960 sp
->stats
.tx_fifo_errors
++;
961 if(status
& DMA_TX_ERR_HB
)
962 sp
->stats
.tx_heartbeat_errors
++;
963 if(status
& (DMA_TX_ERR_LOSS
|
965 sp
->stats
.tx_carrier_errors
++;
966 if (status
& (DMA_TX_ERR_LATE
|
970 sp
->stats
.tx_aborted_errors
++;
973 sp
->stats
.tx_packets
++;
976 skb
= sp
->tx_skb
[idx
];
977 sp
->tx_skb
[idx
] = NULL
;
979 sp
->stats
.tx_bytes
+= skb
->len
;
980 dev_kfree_skb_irq(skb
);
990 rx_tasklet_func(unsigned long data
)
992 struct net_device
*dev
= (struct net_device
*) data
;
993 struct ar2313_private
*sp
= dev
->priv
;
999 if (ar2313_rx_int(dev
)) {
1000 tasklet_hi_schedule(&sp
->rx_tasklet
);
1003 unsigned long flags
;
1004 spin_lock_irqsave(&sp
->lock
, flags
);
1005 sp
->dma_regs
->intr_ena
|= DMA_STATUS_RI
;
1006 spin_unlock_irqrestore(&sp
->lock
, flags
);
1011 rx_schedule(struct net_device
*dev
)
1013 struct ar2313_private
*sp
= dev
->priv
;
1015 sp
->dma_regs
->intr_ena
&= ~DMA_STATUS_RI
;
1017 tasklet_hi_schedule(&sp
->rx_tasklet
);
1020 static irqreturn_t
ar2313_interrupt(int irq
, void *dev_id
)
1022 struct net_device
*dev
= (struct net_device
*)dev_id
;
1023 struct ar2313_private
*sp
= dev
->priv
;
1024 unsigned int status
, enabled
;
1026 /* clear interrupt */
1028 * Don't clear RI bit if currently disabled.
1030 status
= sp
->dma_regs
->status
;
1031 enabled
= sp
->dma_regs
->intr_ena
;
1032 sp
->dma_regs
->status
= status
& enabled
;
1034 if (status
& DMA_STATUS_NIS
) {
1037 * Don't schedule rx processing if interrupt
1038 * is already disabled.
1040 if (status
& enabled
& DMA_STATUS_RI
) {
1041 /* receive interrupt */
1044 if (status
& DMA_STATUS_TI
) {
1045 /* transmit interrupt */
1050 if (status
& DMA_STATUS_AIS
) {
1052 printk("%s: AIS set %08x & %x\n", __FUNCTION__
,
1053 status
, (DMA_STATUS_FBE
| DMA_STATUS_TPS
));
1055 /* abnormal status */
1056 if (status
& (DMA_STATUS_FBE
| DMA_STATUS_TPS
)) {
1057 ar2313_restart(dev
);
1064 static int ar2313_open(struct net_device
*dev
)
1066 struct ar2313_private
*sp
;
1071 netif_start_queue(dev
);
1073 sp
->eth_regs
->mac_control
|= MAC_CONTROL_RE
;
1078 static void ar2313_halt(struct net_device
*dev
)
1080 struct ar2313_private
*sp
= dev
->priv
;
1083 tasklet_disable(&sp
->rx_tasklet
);
1086 sp
->eth_regs
->mac_control
&= ~(MAC_CONTROL_RE
| /* disable Receives */
1087 MAC_CONTROL_TE
); /* disable Transmits */
1089 sp
->dma_regs
->control
= 0;
1090 sp
->dma_regs
->bus_mode
= DMA_BUS_MODE_SWR
;
1092 /* place phy and MAC in reset */
1093 *sp
->int_regs
|= (sp
->cfg
->reset_mac
| sp
->cfg
->reset_phy
);
1095 /* free buffers on tx ring */
1096 for (j
= 0; j
< AR2313_DESCR_ENTRIES
; j
++) {
1097 struct sk_buff
*skb
;
1098 ar2313_descr_t
*txdesc
;
1100 txdesc
= &sp
->tx_ring
[j
];
1103 skb
= sp
->tx_skb
[j
];
1106 sp
->tx_skb
[j
] = NULL
;
1112 * close should do nothing. Here's why. It's called when
1113 * 'ifconfig bond0 down' is run. If it calls free_irq then
1114 * the irq is gone forever ! When bond0 is made 'up' again,
1115 * the ar2313_open () does not call request_irq (). Worse,
1116 * the call to ar2313_halt() generates a WDOG reset due to
1117 * the write to 'sp->int_regs' and the box reboots.
1118 * Commenting this out is good since it allows the
1119 * system to resume when bond0 is made up again.
1121 static int ar2313_close(struct net_device
*dev
)
1125 * Disable interrupts
1127 disable_irq(dev
->irq
);
1130 * Without (or before) releasing irq and stopping hardware, this
1131 * is an absolute non-sense, by the way. It will be reset instantly
1134 netif_stop_queue(dev
);
1136 /* stop the MAC and DMA engines */
1139 /* release the interrupt */
1140 free_irq(dev
->irq
, dev
);
1146 static int ar2313_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1148 struct ar2313_private
*sp
= dev
->priv
;
1153 td
= &sp
->tx_ring
[idx
];
1155 if (td
->status
& DMA_TX_OWN
) {
1157 printk("%s: No space left to Tx\n", __FUNCTION__
);
1159 /* free skbuf and lie to the caller that we sent it out */
1160 sp
->stats
.tx_dropped
++;
1163 /* restart transmitter in case locked */
1164 sp
->dma_regs
->xmt_poll
= 0;
1168 /* Setup the transmit descriptor. */
1169 td
->devcs
= ((skb
->len
<< DMA_TX1_BSIZE_SHIFT
) |
1170 (DMA_TX1_LS
|DMA_TX1_IC
|DMA_TX1_CHAINED
));
1171 td
->addr
= dma_map_single(NULL
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
1172 td
->status
= DMA_TX_OWN
;
1174 /* kick transmitter last */
1175 sp
->dma_regs
->xmt_poll
= 0;
1178 printk("index %d\n", idx
);
1179 printk("TX status %08x\n", td
->status
);
1180 printk("TX devcs %08x\n", td
->devcs
);
1181 printk("TX addr %08x\n", td
->addr
);
1182 printk("TX descr %08x\n", td
->descr
);
1185 sp
->tx_skb
[idx
] = skb
;
1186 idx
= DSC_NEXT(idx
);
1192 static int netdev_get_ecmd(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1194 struct ar2313_private
*np
= dev
->priv
;
1198 (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
1199 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
1200 SUPPORTED_Autoneg
| SUPPORTED_TP
| SUPPORTED_MII
);
1202 ecmd
->port
= PORT_TP
;
1203 /* only supports internal transceiver */
1204 ecmd
->transceiver
= XCVR_INTERNAL
;
1205 /* not sure what this is for */
1206 ecmd
->phy_address
= 1;
1208 ecmd
->advertising
= ADVERTISED_MII
;
1209 tmp
= armiiread(dev
, np
->phy
, MII_ADVERTISE
);
1210 if (tmp
& ADVERTISE_10HALF
)
1211 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
1212 if (tmp
& ADVERTISE_10FULL
)
1213 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
1214 if (tmp
& ADVERTISE_100HALF
)
1215 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
1216 if (tmp
& ADVERTISE_100FULL
)
1217 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
1219 tmp
= armiiread(dev
, np
->phy
, MII_BMCR
);
1220 if (tmp
& BMCR_ANENABLE
) {
1221 ecmd
->advertising
|= ADVERTISED_Autoneg
;
1222 ecmd
->autoneg
= AUTONEG_ENABLE
;
1224 ecmd
->autoneg
= AUTONEG_DISABLE
;
1227 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
1228 tmp
= armiiread(dev
, np
->phy
, MII_LPA
);
1229 if (tmp
& (LPA_100FULL
|LPA_10FULL
)) {
1230 ecmd
->duplex
= DUPLEX_FULL
;
1232 ecmd
->duplex
= DUPLEX_HALF
;
1234 if (tmp
& (LPA_100FULL
|LPA_100HALF
)) {
1235 ecmd
->speed
= SPEED_100
;
1237 ecmd
->speed
= SPEED_10
;
1240 if (tmp
& BMCR_FULLDPLX
) {
1241 ecmd
->duplex
= DUPLEX_FULL
;
1243 ecmd
->duplex
= DUPLEX_HALF
;
1245 if (tmp
& BMCR_SPEED100
) {
1246 ecmd
->speed
= SPEED_100
;
1248 ecmd
->speed
= SPEED_10
;
1252 /* ignore maxtxpkt, maxrxpkt for now */
1257 static int netdev_set_ecmd(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1259 struct ar2313_private
*np
= dev
->priv
;
1262 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
1264 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
1266 if (ecmd
->port
!= PORT_TP
)
1268 if (ecmd
->transceiver
!= XCVR_INTERNAL
)
1270 if (ecmd
->autoneg
!= AUTONEG_DISABLE
&& ecmd
->autoneg
!= AUTONEG_ENABLE
)
1272 /* ignore phy_address, maxtxpkt, maxrxpkt for now */
1274 /* WHEW! now lets bang some bits */
1276 tmp
= armiiread(dev
, np
->phy
, MII_BMCR
);
1277 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
1278 /* turn on autonegotiation */
1279 tmp
|= BMCR_ANENABLE
;
1280 printk("%s: Enabling auto-neg\n", dev
->name
);
1282 /* turn off auto negotiation, set speed and duplexity */
1283 tmp
&= ~(BMCR_ANENABLE
| BMCR_SPEED100
| BMCR_FULLDPLX
);
1284 if (ecmd
->speed
== SPEED_100
)
1285 tmp
|= BMCR_SPEED100
;
1286 if (ecmd
->duplex
== DUPLEX_FULL
)
1287 tmp
|= BMCR_FULLDPLX
;
1288 printk("%s: Hard coding %d/%s\n", dev
->name
,
1289 (ecmd
->speed
== SPEED_100
)? 100:10,
1290 (ecmd
->duplex
== DUPLEX_FULL
)? "full":"half");
1292 armiiwrite(dev
, np
->phy
, MII_BMCR
, tmp
);
1297 static int netdev_ethtool_ioctl(struct net_device
*dev
, void *useraddr
)
1299 struct ar2313_private
*np
= dev
->priv
;
1302 if (get_user(cmd
, (u32
*)useraddr
))
1307 case ETHTOOL_GSET
: {
1308 struct ethtool_cmd ecmd
= { ETHTOOL_GSET
};
1309 spin_lock_irq(&np
->lock
);
1310 netdev_get_ecmd(dev
, &ecmd
);
1311 spin_unlock_irq(&np
->lock
);
1312 if (copy_to_user(useraddr
, &ecmd
, sizeof(ecmd
)))
1317 case ETHTOOL_SSET
: {
1318 struct ethtool_cmd ecmd
;
1320 if (copy_from_user(&ecmd
, useraddr
, sizeof(ecmd
)))
1322 spin_lock_irq(&np
->lock
);
1323 r
= netdev_set_ecmd(dev
, &ecmd
);
1324 spin_unlock_irq(&np
->lock
);
1327 /* restart autonegotiation */
1328 case ETHTOOL_NWAY_RST
: {
1331 /* if autoneg is off, it's an error */
1332 tmp
= armiiread(dev
, np
->phy
, MII_BMCR
);
1333 if (tmp
& BMCR_ANENABLE
) {
1334 tmp
|= (BMCR_ANRESTART
);
1335 armiiwrite(dev
, np
->phy
, MII_BMCR
, tmp
);
1340 /* get link status */
1341 case ETHTOOL_GLINK
: {
1342 struct ethtool_value edata
= {ETHTOOL_GLINK
};
1343 edata
.data
= (armiiread(dev
, np
->phy
, MII_BMSR
)&BMSR_LSTATUS
) ? 1:0;
1344 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1353 static int ar2313_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1355 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&ifr
->ifr_data
;
1360 return netdev_ethtool_ioctl(dev
, (void *) ifr
->ifr_data
);
1362 case SIOCGMIIPHY
: /* Get address of MII PHY in use. */
1366 case SIOCGMIIREG
: /* Read MII PHY register. */
1367 data
->val_out
= armiiread(dev
, data
->phy_id
& 0x1f,
1368 data
->reg_num
& 0x1f);
1370 case SIOCSMIIREG
: /* Write MII PHY register. */
1371 if (!capable(CAP_NET_ADMIN
))
1373 armiiwrite(dev
, data
->phy_id
& 0x1f,
1374 data
->reg_num
& 0x1f, data
->val_in
);
1378 if (copy_from_user(dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
1383 if (copy_to_user(ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
1394 static struct net_device_stats
*ar2313_get_stats(struct net_device
*dev
)
1396 struct ar2313_private
*sp
= dev
->priv
;
1401 #define MII_ADDR(phy, reg) \
1402 ((reg << MII_ADDR_REG_SHIFT) | (phy << MII_ADDR_PHY_SHIFT))
1405 armiiread(struct net_device
*dev
, short phy
, short reg
)
1407 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
1408 volatile ETHERNET_STRUCT
*ethernet
= sp
->phy_regs
;
1410 ethernet
->mii_addr
= MII_ADDR(phy
, reg
);
1411 while (ethernet
->mii_addr
& MII_ADDR_BUSY
);
1412 return (ethernet
->mii_data
>> MII_DATA_SHIFT
);
1416 armiiwrite(struct net_device
*dev
, short phy
, short reg
, short data
)
1418 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
1419 volatile ETHERNET_STRUCT
*ethernet
= sp
->phy_regs
;
1421 while (ethernet
->mii_addr
& MII_ADDR_BUSY
);
1422 ethernet
->mii_data
= data
<< MII_DATA_SHIFT
;
1423 ethernet
->mii_addr
= MII_ADDR(phy
, reg
) | MII_ADDR_WRITE
;