2 * ADM5120 built in ethernet switch driver
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
9 * NAPI extensions by Thomas Langer (Thomas.Langer@infineon.com)
10 * and Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
12 * TODO: Add support of high prio queues (currently disabled)
15 #include <linux/autoconf.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <asm/mipsregs.h>
27 #include "adm5120sw.h"
29 #include <asm/mach-adm5120/adm5120_info.h>
30 #include <asm/mach-adm5120/adm5120_irq.h>
32 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
33 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
34 MODULE_LICENSE("GPL");
36 /* default settings - unlimited TX and RX on all ports, default shaper mode */
37 static unsigned char bw_matrix
[SW_DEVS
] = {
41 static int adm5120_nrdevs
;
43 static struct net_device
*adm5120_devs
[SW_DEVS
];
44 /* Lookup table port -> device */
45 static struct net_device
*adm5120_port
[SW_DEVS
];
47 static struct adm5120_dma
48 adm5120_dma_txh_v
[ADM5120_DMA_TXH
] __attribute__((aligned(16))),
49 adm5120_dma_txl_v
[ADM5120_DMA_TXL
] __attribute__((aligned(16))),
50 adm5120_dma_rxh_v
[ADM5120_DMA_RXH
] __attribute__((aligned(16))),
51 adm5120_dma_rxl_v
[ADM5120_DMA_RXL
] __attribute__((aligned(16))),
57 *adm5120_skb_rxh
[ADM5120_DMA_RXH
],
58 *adm5120_skb_rxl
[ADM5120_DMA_RXL
],
59 *adm5120_skb_txh
[ADM5120_DMA_TXH
],
60 *adm5120_skb_txl
[ADM5120_DMA_TXL
];
61 static int adm5120_rxli
= 0;
62 static int adm5120_txli
= 0;
63 /*static int adm5120_txhi = 0;*/
64 static int adm5120_if_open
= 0;
66 static inline void adm5120_set_reg(unsigned int reg
, unsigned long val
)
68 *(volatile unsigned long*)(SW_BASE
+reg
) = val
;
71 static inline unsigned long adm5120_get_reg(unsigned int reg
)
73 return *(volatile unsigned long*)(SW_BASE
+reg
);
76 static inline void adm5120_rx_dma_update(struct adm5120_dma
*dma
,
77 struct sk_buff
*skb
, int end
)
81 dma
->len
= ADM5120_DMA_RXSIZE
;
82 dma
->data
= ADM5120_DMA_ADDR(skb
->data
) |
83 ADM5120_DMA_OWN
| (end
? ADM5120_DMA_RINGEND
: 0);
86 static int adm5120_rx(struct net_device
*dev
,int *budget
)
88 struct sk_buff
*skb
, *skbn
;
89 struct adm5120_sw
*priv
;
90 struct net_device
*cdev
;
91 struct adm5120_dma
*dma
;
94 quota
= min(dev
->quota
, *budget
);
95 dma
= &adm5120_dma_rxl
[adm5120_rxli
];
96 while (!(dma
->data
& ADM5120_DMA_OWN
) && quota
) {
97 port
= (dma
->status
& ADM5120_DMA_PORTID
);
98 port
>>= ADM5120_DMA_PORTSHIFT
;
99 cdev
= adm5120_port
[port
];
100 if (cdev
!= dev
) { /* The current packet belongs to a different device */
101 if ((cdev
==NULL
) || !netif_running(cdev
)) {
102 /* discard (update with old skb) */
107 netif_rx_schedule(cdev
);/* Start polling next device */
108 return 1; /* return 1 -> More packets to process */
112 skb
= adm5120_skb_rxl
[adm5120_rxli
];
113 len
= (dma
->status
& ADM5120_DMA_LEN
);
114 len
>>= ADM5120_DMA_LENSHIFT
;
117 priv
= netdev_priv(dev
);
118 if (len
<= 0 || len
> ADM5120_DMA_RXSIZE
||
119 dma
->status
& ADM5120_DMA_FCSERR
) {
120 priv
->stats
.rx_errors
++;
123 skbn
= dev_alloc_skb(ADM5120_DMA_RXSIZE
+16);
127 skb
->protocol
= eth_type_trans(skb
, dev
);
128 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
129 dev
->last_rx
= jiffies
;
130 priv
->stats
.rx_packets
++;
131 priv
->stats
.rx_bytes
+= len
;
132 skb_reserve(skbn
, NET_IP_ALIGN
);
133 adm5120_skb_rxl
[adm5120_rxli
] = skbn
;
135 printk(KERN_INFO
"%s recycling!\n", dev
->name
);
139 adm5120_rx_dma_update(&adm5120_dma_rxl
[adm5120_rxli
],
140 adm5120_skb_rxl
[adm5120_rxli
],
141 (ADM5120_DMA_RXL
-1==adm5120_rxli
));
142 if (ADM5120_DMA_RXL
== ++adm5120_rxli
)
144 dma
= &adm5120_dma_rxl
[adm5120_rxli
];
146 netif_receive_skb(skb
);
152 /* If there are still packets to process, return 1 */
154 /* No more packets to process, so disable the polling and reenable the interrupts */
155 netif_rx_complete(dev
);
156 adm5120_set_reg(ADM5120_INT_MASK
,
157 adm5120_get_reg(ADM5120_INT_MASK
) &
158 ~(ADM5120_INT_RXL
|ADM5120_INT_LFULL
));
166 static irqreturn_t
adm5120_sw_irq(int irq
, void *dev_id
)
168 unsigned long intreg
, intmask
;
170 struct net_device
*dev
;
172 intmask
= adm5120_get_reg(ADM5120_INT_MASK
); /* Remember interrupt mask */
173 adm5120_set_reg(ADM5120_INT_MASK
, ADM5120_INTMASKALL
); /* Disable interrupts */
175 intreg
= adm5120_get_reg(ADM5120_INT_ST
); /* Read interrupt status */
176 adm5120_set_reg(ADM5120_INT_ST
, intreg
); /* Clear interrupt status */
178 /* In NAPI operation the interrupts are disabled and the polling mechanism
179 * is activated. The interrupts are finally enabled again in the polling routine.
181 if (intreg
& (ADM5120_INT_RXL
|ADM5120_INT_LFULL
)) {
182 /* check rx buffer for port number */
183 port
= adm5120_dma_rxl
[adm5120_rxli
].status
& ADM5120_DMA_PORTID
;
184 port
>>= ADM5120_DMA_PORTSHIFT
;
185 dev
= adm5120_port
[port
];
186 if ((dev
==NULL
) || !netif_running(dev
)) {
187 /* discard (update with old skb) */
188 adm5120_rx_dma_update(&adm5120_dma_rxl
[adm5120_rxli
],
189 adm5120_skb_rxl
[adm5120_rxli
],
190 (ADM5120_DMA_RXL
-1==adm5120_rxli
));
191 if (ADM5120_DMA_RXL
== ++adm5120_rxli
)
195 netif_rx_schedule(dev
);
196 intmask
|= (ADM5120_INT_RXL
|ADM5120_INT_LFULL
); /* Disable RX interrupts */
200 if (intreg
& ~(intmask
))
201 printk(KERN_INFO
"adm5120sw: IRQ 0x%08X unexpected!\n", (unsigned int)(intreg
& ~(intmask
)));
204 adm5120_set_reg(ADM5120_INT_MASK
, intmask
);
209 static void adm5120_set_vlan(char *matrix
)
214 val
= matrix
[0] + (matrix
[1]<<8) + (matrix
[2]<<16) + (matrix
[3]<<24);
215 adm5120_set_reg(ADM5120_VLAN_GI
, val
);
216 val
= matrix
[4] + (matrix
[5]<<8);
217 adm5120_set_reg(ADM5120_VLAN_GII
, val
);
218 /* Now set/update the port vs. device lookup table */
219 for (port
=0; port
<SW_DEVS
; port
++) {
220 for (vlan_port
=0; vlan_port
<SW_DEVS
&& !(matrix
[vlan_port
] & (0x00000001 << port
)); vlan_port
++);
221 if (vlan_port
<SW_DEVS
)
222 adm5120_port
[port
] = adm5120_devs
[vlan_port
];
224 adm5120_port
[port
] = NULL
;
228 static void adm5120_set_bw(char *matrix
)
232 /* Port 0 to 3 are set using the bandwidth control 0 register */
233 val
= matrix
[0] + (matrix
[1]<<8) + (matrix
[2]<<16) + (matrix
[3]<<24);
234 adm5120_set_reg(ADM5120_BW_CTL0
, val
);
236 /* Port 4 and 5 are set using the bandwidth control 1 register */
239 adm5120_set_reg(ADM5120_BW_CTL1
, val
| 0x80000000);
241 adm5120_set_reg(ADM5120_BW_CTL1
, val
& ~0x8000000);
243 printk(KERN_DEBUG
"D: ctl0 0x%lx, ctl1 0x%lx\n",
244 adm5120_get_reg(ADM5120_BW_CTL0
),
245 adm5120_get_reg(ADM5120_BW_CTL1
));
248 static int adm5120_sw_open(struct net_device
*dev
)
253 netif_start_queue(dev
);
254 if (!adm5120_if_open
++) {
255 /* enable interrupts on first open */
256 adm5120_set_reg(ADM5120_INT_MASK
,
257 adm5120_get_reg(ADM5120_INT_MASK
) &
258 ~(ADM5120_INT_RXL
|ADM5120_INT_LFULL
));
260 /* enable (additional) port */
261 val
= adm5120_get_reg(ADM5120_PORT_CONF0
);
262 for (i
=0; i
<SW_DEVS
; i
++) {
263 if (dev
== adm5120_devs
[i
])
264 val
&= ~adm5120_eth_vlans
[i
];
266 adm5120_set_reg(ADM5120_PORT_CONF0
, val
);
270 static int adm5120_sw_stop(struct net_device
*dev
)
275 if (!--adm5120_if_open
) {
276 adm5120_set_reg(ADM5120_INT_MASK
, ADM5120_INTMASKALL
);
278 /* disable port if not assigned to other devices */
279 val
= adm5120_get_reg(ADM5120_PORT_CONF0
) | ADM5120_PORTDISALL
;
280 for (i
=0; i
<SW_DEVS
; i
++) {
281 if ((dev
!= adm5120_devs
[i
]) && netif_running(adm5120_devs
[i
]))
282 val
&= ~adm5120_eth_vlans
[i
];
284 adm5120_set_reg(ADM5120_PORT_CONF0
, val
);
285 netif_stop_queue(dev
);
289 static int adm5120_sw_tx(struct sk_buff
*skb
, struct net_device
*dev
)
291 struct adm5120_dma
*dma
;
292 struct sk_buff
**skbl
= adm5120_skb_txl
;
293 struct adm5120_sw
*priv
= netdev_priv(dev
);
296 dev
->trans_start
= jiffies
;
297 dma
= &adm5120_dma_txl
[adm5120_txli
];
298 if (dma
->data
& ADM5120_DMA_OWN
) {
299 /* We want to write a packet but the TX queue is still
300 * occupied by the DMA. We are faster than the DMA... */
302 priv
->stats
.tx_dropped
++;
305 data
= ADM5120_DMA_ADDR(skb
->data
) | ADM5120_DMA_OWN
;
306 if (adm5120_txli
== ADM5120_DMA_TXL
-1)
307 data
|= ADM5120_DMA_RINGEND
;
309 ((skb
->len
<ETH_ZLEN
?ETH_ZLEN
:skb
->len
) << ADM5120_DMA_LENSHIFT
) |
312 dma
->len
= skb
->len
< ETH_ZLEN
? ETH_ZLEN
: skb
->len
;
313 priv
->stats
.tx_packets
++;
314 priv
->stats
.tx_bytes
+= skb
->len
;
316 /* free old skbs here instead of tx completion interrupt:
317 * will hold some more memory allocated but reduces interrupts */
318 if (skbl
[adm5120_txli
]){
319 dev_kfree_skb(skbl
[adm5120_txli
]);
321 skbl
[adm5120_txli
] = skb
;
323 dma
->data
= data
; /* Here we enable the buffer for the TX DMA machine */
324 adm5120_set_reg(ADM5120_SEND_TRIG
, ADM5120_SEND_TRIG_L
);
325 if (++adm5120_txli
== ADM5120_DMA_TXL
)
330 static void adm5120_tx_timeout(struct net_device
*dev
)
332 printk(KERN_INFO
"%s: TX timeout\n",dev
->name
);
335 static struct net_device_stats
*adm5120_sw_stats(struct net_device
*dev
)
337 struct adm5120_sw
*priv
= netdev_priv(dev
);
339 unsigned long adm5120_cpup_conf_reg
;
341 portmask
= adm5120_eth_vlans
[priv
->port
] & 0x3f;
343 adm5120_cpup_conf_reg
= adm5120_get_reg(ADM5120_CPUP_CONF
);
345 if (dev
->flags
& IFF_PROMISC
)
346 adm5120_cpup_conf_reg
&= ~((portmask
<< ADM5120_DISUNSHIFT
) & ADM5120_DISUNALL
);
348 adm5120_cpup_conf_reg
|= (portmask
<< ADM5120_DISUNSHIFT
);
350 if (dev
->flags
& IFF_PROMISC
|| dev
->flags
& IFF_ALLMULTI
|| dev
->mc_count
)
351 adm5120_cpup_conf_reg
&= ~((portmask
<< ADM5120_DISMCSHIFT
) & ADM5120_DISMCALL
);
353 adm5120_cpup_conf_reg
|= (portmask
<< ADM5120_DISMCSHIFT
);
355 /* If there is any port configured to be in promiscuous mode, then the */
356 /* Bridge Test Mode has to be activated. This will result in */
357 /* transporting also packets learned in another VLAN to be forwarded */
359 /* The difficult scenario is when we want to build a bridge on the CPU.*/
360 /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
361 /* CPU port in VLAN1. Now we build a bridge on the CPU between */
362 /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
363 /* Now assume a packet with ethernet source address 99 enters port 0 */
364 /* It will be forwarded to the CPU because it is unknown. Then the */
365 /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
366 /* When now a packet with ethernet destination address 99 comes in at */
367 /* port 1 in VLAN1, then the switch has learned that this address is */
368 /* located at port 0 in VLAN0. Therefore the switch will drop */
369 /* this packet. In order to avoid this and to send the packet still */
370 /* to the CPU, the Bridge Test Mode has to be activated. */
372 /* Check if there is any vlan in promisc mode. */
373 if (~adm5120_cpup_conf_reg
& ADM5120_DISUNALL
)
374 adm5120_cpup_conf_reg
|= ADM5120_BTM
; /* Set the BTM */
376 adm5120_cpup_conf_reg
&= ~ADM5120_BTM
; /* Disable the BTM */
378 adm5120_set_reg(ADM5120_CPUP_CONF
,adm5120_cpup_conf_reg
);
380 return &((struct adm5120_sw
*)netdev_priv(dev
))->stats
;
383 static void adm5120_set_multicast_list(struct net_device
*dev
)
385 struct adm5120_sw
*priv
= netdev_priv(dev
);
388 portmask
= adm5120_eth_vlans
[priv
->port
] & 0x3f;
390 if (dev
->flags
& IFF_PROMISC
)
391 adm5120_set_reg(ADM5120_CPUP_CONF
,
392 adm5120_get_reg(ADM5120_CPUP_CONF
) &
393 ~((portmask
<< ADM5120_DISUNSHIFT
) & ADM5120_DISUNALL
));
395 adm5120_set_reg(ADM5120_CPUP_CONF
,
396 adm5120_get_reg(ADM5120_CPUP_CONF
) |
397 (portmask
<< ADM5120_DISUNSHIFT
));
399 if (dev
->flags
& IFF_PROMISC
|| dev
->flags
& IFF_ALLMULTI
||
401 adm5120_set_reg(ADM5120_CPUP_CONF
,
402 adm5120_get_reg(ADM5120_CPUP_CONF
) &
403 ~((portmask
<< ADM5120_DISMCSHIFT
) & ADM5120_DISMCALL
));
405 adm5120_set_reg(ADM5120_CPUP_CONF
,
406 adm5120_get_reg(ADM5120_CPUP_CONF
) |
407 (portmask
<< ADM5120_DISMCSHIFT
));
410 static void adm5120_write_mac(struct net_device
*dev
)
412 struct adm5120_sw
*priv
= netdev_priv(dev
);
413 unsigned char *mac
= dev
->dev_addr
;
415 adm5120_set_reg(ADM5120_MAC_WT1
,
416 mac
[2] | (mac
[3]<<8) | (mac
[4]<<16) | (mac
[5]<<24));
417 adm5120_set_reg(ADM5120_MAC_WT0
, (priv
->port
<<3) |
418 (mac
[0]<<16) | (mac
[1]<<24) | ADM5120_MAC_WRITE
| ADM5120_VLAN_EN
);
420 while (!(adm5120_get_reg(ADM5120_MAC_WT0
) & ADM5120_MAC_WRITE_DONE
));
423 static int adm5120_sw_set_mac_address(struct net_device
*dev
, void *p
)
425 struct sockaddr
*addr
= p
;
427 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
428 adm5120_write_mac(dev
);
432 static int adm5120_do_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
435 struct adm5120_sw_info info
;
436 struct adm5120_sw
*priv
= netdev_priv(dev
);
441 info
.ports
= adm5120_nrdevs
;
442 info
.vlan
= priv
->port
;
443 err
= copy_to_user(rq
->ifr_data
, &info
, sizeof(info
));
448 if (!capable(CAP_NET_ADMIN
))
450 err
= copy_from_user(adm5120_eth_vlans
, rq
->ifr_data
,
451 sizeof(adm5120_eth_vlans
));
454 adm5120_set_vlan(adm5120_eth_vlans
);
457 err
= copy_to_user(rq
->ifr_data
, adm5120_eth_vlans
,
458 sizeof(adm5120_eth_vlans
));
463 err
= copy_to_user(rq
->ifr_data
, bw_matrix
, sizeof(bw_matrix
));
468 if (!capable(CAP_NET_ADMIN
))
470 err
= copy_from_user(bw_matrix
, rq
->ifr_data
, sizeof(bw_matrix
));
473 adm5120_set_bw(bw_matrix
);
481 static void adm5120_dma_tx_init(struct adm5120_dma
*dma
, struct sk_buff
**skbl
,
484 memset(dma
, 0, sizeof(struct adm5120_dma
)*num
);
485 dma
[num
-1].data
|= ADM5120_DMA_RINGEND
;
486 memset(skbl
, 0, sizeof(struct skb
*)*num
);
489 static void adm5120_dma_rx_init(struct adm5120_dma
*dma
, struct sk_buff
**skbl
,
494 memset(dma
, 0, sizeof(struct adm5120_dma
)*num
);
495 for (i
=0; i
<num
; i
++) {
496 skbl
[i
] = dev_alloc_skb(ADM5120_DMA_RXSIZE
+16);
501 skb_reserve(skbl
[i
], NET_IP_ALIGN
);
502 adm5120_rx_dma_update(&dma
[i
], skbl
[i
], (num
-1==i
));
506 static int __init
adm5120_sw_init(void)
509 struct net_device
*dev
;
511 err
= request_irq(ADM5120_IRQ_SWITCH
, adm5120_sw_irq
, 0, "ethernet switch", NULL
);
515 adm5120_nrdevs
= adm5120_eth_num_ports
;
517 adm5120_set_reg(ADM5120_CPUP_CONF
,
518 ADM5120_DISCCPUPORT
| ADM5120_CRC_PADDING
|
519 ADM5120_DISUNALL
| ADM5120_DISMCALL
);
520 adm5120_set_reg(ADM5120_PORT_CONF0
, ADM5120_ENMC
| ADM5120_ENBP
| ADM5120_PORTDISALL
);
522 adm5120_set_reg(ADM5120_PHY_CNTL2
, adm5120_get_reg(ADM5120_PHY_CNTL2
) |
523 ADM5120_AUTONEG
| ADM5120_NORMAL
| ADM5120_AUTOMDIX
);
524 adm5120_set_reg(ADM5120_PHY_CNTL3
, adm5120_get_reg(ADM5120_PHY_CNTL3
) |
527 /* Force all the packets from all ports are low priority */
528 adm5120_set_reg(ADM5120_PRI_CNTL
, 0);
530 adm5120_set_reg(ADM5120_INT_MASK
, ADM5120_INTMASKALL
);
531 adm5120_set_reg(ADM5120_INT_ST
, ADM5120_INTMASKALL
);
533 adm5120_dma_txh
= (void *)KSEG1ADDR((u32
)adm5120_dma_txh_v
);
534 adm5120_dma_txl
= (void *)KSEG1ADDR((u32
)adm5120_dma_txl_v
);
535 adm5120_dma_rxh
= (void *)KSEG1ADDR((u32
)adm5120_dma_rxh_v
);
536 adm5120_dma_rxl
= (void *)KSEG1ADDR((u32
)adm5120_dma_rxl_v
);
538 adm5120_dma_tx_init(adm5120_dma_txh
, adm5120_skb_txh
, ADM5120_DMA_TXH
);
539 adm5120_dma_tx_init(adm5120_dma_txl
, adm5120_skb_txl
, ADM5120_DMA_TXL
);
540 adm5120_dma_rx_init(adm5120_dma_rxh
, adm5120_skb_rxh
, ADM5120_DMA_RXH
);
541 adm5120_dma_rx_init(adm5120_dma_rxl
, adm5120_skb_rxl
, ADM5120_DMA_RXL
);
542 adm5120_set_reg(ADM5120_SEND_HBADDR
, KSEG1ADDR(adm5120_dma_txh
));
543 adm5120_set_reg(ADM5120_SEND_LBADDR
, KSEG1ADDR(adm5120_dma_txl
));
544 adm5120_set_reg(ADM5120_RECEIVE_HBADDR
, KSEG1ADDR(adm5120_dma_rxh
));
545 adm5120_set_reg(ADM5120_RECEIVE_LBADDR
, KSEG1ADDR(adm5120_dma_rxl
));
547 for (i
= 0; i
< SW_DEVS
; i
++) {
548 adm5120_devs
[i
] = alloc_etherdev(sizeof(struct adm5120_sw
));
549 if (!adm5120_devs
[i
]) {
554 dev
= adm5120_devs
[i
];
555 SET_MODULE_OWNER(dev
);
556 memset(netdev_priv(dev
), 0, sizeof(struct adm5120_sw
));
557 ((struct adm5120_sw
*)netdev_priv(dev
))->port
= i
;
558 dev
->base_addr
= SW_BASE
;
559 dev
->irq
= ADM5120_IRQ_SWITCH
;
560 dev
->open
= adm5120_sw_open
;
561 dev
->hard_start_xmit
= adm5120_sw_tx
;
562 dev
->stop
= adm5120_sw_stop
;
563 dev
->get_stats
= adm5120_sw_stats
;
564 dev
->set_multicast_list
= adm5120_set_multicast_list
;
565 dev
->do_ioctl
= adm5120_do_ioctl
;
566 dev
->tx_timeout
= adm5120_tx_timeout
;
567 dev
->watchdog_timeo
= ETH_TX_TIMEOUT
;
568 dev
->set_mac_address
= adm5120_sw_set_mac_address
;
569 dev
->poll
= adm5120_rx
;
572 memcpy(dev
->dev_addr
, adm5120_eth_macs
[i
], 6);
573 adm5120_write_mac(dev
);
575 if ((err
= register_netdev(dev
))) {
579 printk(KERN_INFO
"%s: ADM5120 switch port%d\n", dev
->name
, i
);
581 /* setup vlan/port mapping after devs are filled up */
582 adm5120_set_vlan(adm5120_eth_vlans
);
584 adm5120_set_reg(ADM5120_CPUP_CONF
,
585 ADM5120_CRC_PADDING
| ADM5120_DISUNALL
| ADM5120_DISMCALL
);
590 /* Undo everything that did succeed */
592 unregister_netdev(adm5120_devs
[i
-1]);
593 free_netdev(adm5120_devs
[i
-1]);
595 free_irq(ADM5120_IRQ_SWITCH
, NULL
);
597 printk(KERN_ERR
"ADM5120 Ethernet switch init failed\n");
601 static void __exit
adm5120_sw_exit(void)
605 for (i
= 0; i
< SW_DEVS
; i
++) {
606 unregister_netdev(adm5120_devs
[i
]);
607 free_netdev(adm5120_devs
[i
]);
610 free_irq(ADM5120_IRQ_SWITCH
, NULL
);
612 for (i
= 0; i
< ADM5120_DMA_RXH
; i
++) {
613 if (!adm5120_skb_rxh
[i
])
615 kfree_skb(adm5120_skb_rxh
[i
]);
617 for (i
= 0; i
< ADM5120_DMA_RXL
; i
++) {
618 if (!adm5120_skb_rxl
[i
])
620 kfree_skb(adm5120_skb_rxl
[i
]);
624 module_init(adm5120_sw_init
);
625 module_exit(adm5120_sw_exit
);