2 * ADM5120 built in ethernet switch driver
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
9 * NAPI extensions by Thomas Langer (Thomas.Langer@infineon.com)
10 * and Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
12 * TODO: Add support of high prio queues (currently disabled)
15 #include <linux/autoconf.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <asm/mipsregs.h>
27 #include "adm5120sw.h"
29 #include <asm/mach-adm5120/adm5120_info.h>
30 #include <asm/mach-adm5120/adm5120_irq.h>
32 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
33 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
34 MODULE_LICENSE("GPL");
37 * The ADM5120 uses an internal matrix to determine which ports
38 * belong to which VLAN.
39 * The default generates a VLAN (and device) for each port
40 * (including MII port) and the CPU port is part of all of them.
42 * Another example, one big switch and everything mapped to eth0:
43 * 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00
45 static unsigned char vlan_matrix
[SW_DEVS
] = {
46 0x41, 0x42, 0x44, 0x48, 0x50, 0x60
49 /* default settings - unlimited TX and RX on all ports, default shaper mode */
50 static unsigned char bw_matrix
[SW_DEVS
] = {
54 static int adm5120_nrdevs
;
56 static struct net_device
*adm5120_devs
[SW_DEVS
];
57 /* Lookup table port -> device */
58 static struct net_device
*adm5120_port
[SW_DEVS
];
60 static struct adm5120_dma
61 adm5120_dma_txh_v
[ADM5120_DMA_TXH
] __attribute__((aligned(16))),
62 adm5120_dma_txl_v
[ADM5120_DMA_TXL
] __attribute__((aligned(16))),
63 adm5120_dma_rxh_v
[ADM5120_DMA_RXH
] __attribute__((aligned(16))),
64 adm5120_dma_rxl_v
[ADM5120_DMA_RXL
] __attribute__((aligned(16))),
70 *adm5120_skb_rxh
[ADM5120_DMA_RXH
],
71 *adm5120_skb_rxl
[ADM5120_DMA_RXL
],
72 *adm5120_skb_txh
[ADM5120_DMA_TXH
],
73 *adm5120_skb_txl
[ADM5120_DMA_TXL
];
74 static int adm5120_rxli
= 0;
75 static int adm5120_txli
= 0;
76 /*static int adm5120_txhi = 0;*/
77 static int adm5120_if_open
= 0;
79 static inline void adm5120_set_reg(unsigned int reg
, unsigned long val
)
81 *(volatile unsigned long*)(SW_BASE
+reg
) = val
;
84 static inline unsigned long adm5120_get_reg(unsigned int reg
)
86 return *(volatile unsigned long*)(SW_BASE
+reg
);
89 static inline void adm5120_rx_dma_update(struct adm5120_dma
*dma
,
90 struct sk_buff
*skb
, int end
)
94 dma
->len
= ADM5120_DMA_RXSIZE
;
95 dma
->data
= ADM5120_DMA_ADDR(skb
->data
) |
96 ADM5120_DMA_OWN
| (end
? ADM5120_DMA_RINGEND
: 0);
99 static int adm5120_rx(struct net_device
*dev
,int *budget
)
101 struct sk_buff
*skb
, *skbn
;
102 struct adm5120_sw
*priv
;
103 struct net_device
*cdev
;
104 struct adm5120_dma
*dma
;
105 int port
, len
, quota
;
107 quota
= min(dev
->quota
, *budget
);
108 dma
= &adm5120_dma_rxl
[adm5120_rxli
];
109 while (!(dma
->data
& ADM5120_DMA_OWN
) && quota
) {
110 port
= (dma
->status
& ADM5120_DMA_PORTID
);
111 port
>>= ADM5120_DMA_PORTSHIFT
;
112 cdev
= adm5120_port
[port
];
113 if (cdev
!= dev
) { /* The current packet belongs to a different device */
114 if ((cdev
==NULL
) || !netif_running(cdev
)) {
115 /* discard (update with old skb) */
120 netif_rx_schedule(cdev
);/* Start polling next device */
121 return 1; /* return 1 -> More packets to process */
125 skb
= adm5120_skb_rxl
[adm5120_rxli
];
126 len
= (dma
->status
& ADM5120_DMA_LEN
);
127 len
>>= ADM5120_DMA_LENSHIFT
;
130 priv
= netdev_priv(dev
);
131 if (len
<= 0 || len
> ADM5120_DMA_RXSIZE
||
132 dma
->status
& ADM5120_DMA_FCSERR
) {
133 priv
->stats
.rx_errors
++;
136 skbn
= dev_alloc_skb(ADM5120_DMA_RXSIZE
+16);
140 skb
->protocol
= eth_type_trans(skb
, dev
);
141 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
142 dev
->last_rx
= jiffies
;
143 priv
->stats
.rx_packets
++;
144 priv
->stats
.rx_bytes
+= len
;
145 skb_reserve(skbn
, NET_IP_ALIGN
);
146 adm5120_skb_rxl
[adm5120_rxli
] = skbn
;
148 printk(KERN_INFO
"%s recycling!\n", dev
->name
);
152 adm5120_rx_dma_update(&adm5120_dma_rxl
[adm5120_rxli
],
153 adm5120_skb_rxl
[adm5120_rxli
],
154 (ADM5120_DMA_RXL
-1==adm5120_rxli
));
155 if (ADM5120_DMA_RXL
== ++adm5120_rxli
)
157 dma
= &adm5120_dma_rxl
[adm5120_rxli
];
159 netif_receive_skb(skb
);
165 /* If there are still packets to process, return 1 */
167 /* No more packets to process, so disable the polling and reenable the interrupts */
168 netif_rx_complete(dev
);
169 adm5120_set_reg(ADM5120_INT_MASK
,
170 adm5120_get_reg(ADM5120_INT_MASK
) &
171 ~(ADM5120_INT_RXL
|ADM5120_INT_LFULL
));
179 static irqreturn_t
adm5120_sw_irq(int irq
, void *dev_id
)
181 unsigned long intreg
, intmask
;
183 struct net_device
*dev
;
185 intmask
= adm5120_get_reg(ADM5120_INT_MASK
); /* Remember interrupt mask */
186 adm5120_set_reg(ADM5120_INT_MASK
, ADM5120_INTMASKALL
); /* Disable interrupts */
188 intreg
= adm5120_get_reg(ADM5120_INT_ST
); /* Read interrupt status */
189 adm5120_set_reg(ADM5120_INT_ST
, intreg
); /* Clear interrupt status */
191 /* In NAPI operation the interrupts are disabled and the polling mechanism
192 * is activated. The interrupts are finally enabled again in the polling routine.
194 if (intreg
& (ADM5120_INT_RXL
|ADM5120_INT_LFULL
)) {
195 /* check rx buffer for port number */
196 port
= adm5120_dma_rxl
[adm5120_rxli
].status
& ADM5120_DMA_PORTID
;
197 port
>>= ADM5120_DMA_PORTSHIFT
;
198 dev
= adm5120_port
[port
];
199 if ((dev
==NULL
) || !netif_running(dev
)) {
200 /* discard (update with old skb) */
201 adm5120_rx_dma_update(&adm5120_dma_rxl
[adm5120_rxli
],
202 adm5120_skb_rxl
[adm5120_rxli
],
203 (ADM5120_DMA_RXL
-1==adm5120_rxli
));
204 if (ADM5120_DMA_RXL
== ++adm5120_rxli
)
208 netif_rx_schedule(dev
);
209 intmask
|= (ADM5120_INT_RXL
|ADM5120_INT_LFULL
); /* Disable RX interrupts */
213 if (intreg
& ~(intmask
))
214 printk(KERN_INFO
"adm5120sw: IRQ 0x%08X unexpected!\n", (unsigned int)(intreg
& ~(intmask
)));
217 adm5120_set_reg(ADM5120_INT_MASK
, intmask
);
222 static void adm5120_set_vlan(char *matrix
)
227 val
= matrix
[0] + (matrix
[1]<<8) + (matrix
[2]<<16) + (matrix
[3]<<24);
228 adm5120_set_reg(ADM5120_VLAN_GI
, val
);
229 val
= matrix
[4] + (matrix
[5]<<8);
230 adm5120_set_reg(ADM5120_VLAN_GII
, val
);
231 /* Now set/update the port vs. device lookup table */
232 for (port
=0; port
<SW_DEVS
; port
++) {
233 for (vlan_port
=0; vlan_port
<SW_DEVS
&& !(matrix
[vlan_port
] & (0x00000001 << port
)); vlan_port
++);
234 if (vlan_port
<SW_DEVS
)
235 adm5120_port
[port
] = adm5120_devs
[vlan_port
];
237 adm5120_port
[port
] = NULL
;
241 static void adm5120_set_bw(char *matrix
)
245 /* Port 0 to 3 are set using the bandwidth control 0 register */
246 val
= matrix
[0] + (matrix
[1]<<8) + (matrix
[2]<<16) + (matrix
[3]<<24);
247 adm5120_set_reg(ADM5120_BW_CTL0
, val
);
249 /* Port 4 and 5 are set using the bandwidth control 1 register */
252 adm5120_set_reg(ADM5120_BW_CTL1
, val
| 0x80000000);
254 adm5120_set_reg(ADM5120_BW_CTL1
, val
& ~0x8000000);
256 printk(KERN_DEBUG
"D: ctl0 0x%lx, ctl1 0x%lx\n",
257 adm5120_get_reg(ADM5120_BW_CTL0
),
258 adm5120_get_reg(ADM5120_BW_CTL1
));
261 static int adm5120_sw_open(struct net_device
*dev
)
266 netif_start_queue(dev
);
267 if (!adm5120_if_open
++) {
268 /* enable interrupts on first open */
269 adm5120_set_reg(ADM5120_INT_MASK
,
270 adm5120_get_reg(ADM5120_INT_MASK
) &
271 ~(ADM5120_INT_RXL
|ADM5120_INT_LFULL
));
273 /* enable (additional) port */
274 val
= adm5120_get_reg(ADM5120_PORT_CONF0
);
275 for (i
=0; i
<SW_DEVS
; i
++) {
276 if (dev
== adm5120_devs
[i
])
277 val
&= ~vlan_matrix
[i
];
279 adm5120_set_reg(ADM5120_PORT_CONF0
, val
);
283 static int adm5120_sw_stop(struct net_device
*dev
)
288 if (!--adm5120_if_open
) {
289 adm5120_set_reg(ADM5120_INT_MASK
, ADM5120_INTMASKALL
);
291 /* disable port if not assigned to other devices */
292 val
= adm5120_get_reg(ADM5120_PORT_CONF0
) | ADM5120_PORTDISALL
;
293 for (i
=0; i
<SW_DEVS
; i
++) {
294 if ((dev
!= adm5120_devs
[i
]) && netif_running(adm5120_devs
[i
]))
295 val
&= ~vlan_matrix
[i
];
297 adm5120_set_reg(ADM5120_PORT_CONF0
, val
);
298 netif_stop_queue(dev
);
302 static int adm5120_sw_tx(struct sk_buff
*skb
, struct net_device
*dev
)
304 struct adm5120_dma
*dma
;
305 struct sk_buff
**skbl
= adm5120_skb_txl
;
306 struct adm5120_sw
*priv
= netdev_priv(dev
);
309 dev
->trans_start
= jiffies
;
310 dma
= &adm5120_dma_txl
[adm5120_txli
];
311 if (dma
->data
& ADM5120_DMA_OWN
) {
312 /* We want to write a packet but the TX queue is still
313 * occupied by the DMA. We are faster than the DMA... */
315 priv
->stats
.tx_dropped
++;
318 data
= ADM5120_DMA_ADDR(skb
->data
) | ADM5120_DMA_OWN
;
319 if (adm5120_txli
== ADM5120_DMA_TXL
-1)
320 data
|= ADM5120_DMA_RINGEND
;
322 ((skb
->len
<ETH_ZLEN
?ETH_ZLEN
:skb
->len
) << ADM5120_DMA_LENSHIFT
) |
325 dma
->len
= skb
->len
< ETH_ZLEN
? ETH_ZLEN
: skb
->len
;
326 priv
->stats
.tx_packets
++;
327 priv
->stats
.tx_bytes
+= skb
->len
;
329 /* free old skbs here instead of tx completion interrupt:
330 * will hold some more memory allocated but reduces interrupts */
331 if (skbl
[adm5120_txli
]){
332 dev_kfree_skb(skbl
[adm5120_txli
]);
334 skbl
[adm5120_txli
] = skb
;
336 dma
->data
= data
; /* Here we enable the buffer for the TX DMA machine */
337 adm5120_set_reg(ADM5120_SEND_TRIG
, ADM5120_SEND_TRIG_L
);
338 if (++adm5120_txli
== ADM5120_DMA_TXL
)
343 static void adm5120_tx_timeout(struct net_device
*dev
)
345 printk(KERN_INFO
"%s: TX timeout\n",dev
->name
);
348 static struct net_device_stats
*adm5120_sw_stats(struct net_device
*dev
)
350 struct adm5120_sw
*priv
= netdev_priv(dev
);
352 unsigned long adm5120_cpup_conf_reg
;
354 portmask
= vlan_matrix
[priv
->port
] & 0x3f;
356 adm5120_cpup_conf_reg
= adm5120_get_reg(ADM5120_CPUP_CONF
);
358 if (dev
->flags
& IFF_PROMISC
)
359 adm5120_cpup_conf_reg
&= ~((portmask
<< ADM5120_DISUNSHIFT
) & ADM5120_DISUNALL
);
361 adm5120_cpup_conf_reg
|= (portmask
<< ADM5120_DISUNSHIFT
);
363 if (dev
->flags
& IFF_PROMISC
|| dev
->flags
& IFF_ALLMULTI
|| dev
->mc_count
)
364 adm5120_cpup_conf_reg
&= ~((portmask
<< ADM5120_DISMCSHIFT
) & ADM5120_DISMCALL
);
366 adm5120_cpup_conf_reg
|= (portmask
<< ADM5120_DISMCSHIFT
);
368 /* If there is any port configured to be in promiscuous mode, then the */
369 /* Bridge Test Mode has to be activated. This will result in */
370 /* transporting also packets learned in another VLAN to be forwarded */
372 /* The difficult scenario is when we want to build a bridge on the CPU.*/
373 /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
374 /* CPU port in VLAN1. Now we build a bridge on the CPU between */
375 /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
376 /* Now assume a packet with ethernet source address 99 enters port 0 */
377 /* It will be forwarded to the CPU because it is unknown. Then the */
378 /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
379 /* When now a packet with ethernet destination address 99 comes in at */
380 /* port 1 in VLAN1, then the switch has learned that this address is */
381 /* located at port 0 in VLAN0. Therefore the switch will drop */
382 /* this packet. In order to avoid this and to send the packet still */
383 /* to the CPU, the Bridge Test Mode has to be activated. */
385 /* Check if there is any vlan in promisc mode. */
386 if (~adm5120_cpup_conf_reg
& ADM5120_DISUNALL
)
387 adm5120_cpup_conf_reg
|= ADM5120_BTM
; /* Set the BTM */
389 adm5120_cpup_conf_reg
&= ~ADM5120_BTM
; /* Disable the BTM */
391 adm5120_set_reg(ADM5120_CPUP_CONF
,adm5120_cpup_conf_reg
);
393 return &((struct adm5120_sw
*)netdev_priv(dev
))->stats
;
396 static void adm5120_set_multicast_list(struct net_device
*dev
)
398 struct adm5120_sw
*priv
= netdev_priv(dev
);
401 portmask
= vlan_matrix
[priv
->port
] & 0x3f;
403 if (dev
->flags
& IFF_PROMISC
)
404 adm5120_set_reg(ADM5120_CPUP_CONF
,
405 adm5120_get_reg(ADM5120_CPUP_CONF
) &
406 ~((portmask
<< ADM5120_DISUNSHIFT
) & ADM5120_DISUNALL
));
408 adm5120_set_reg(ADM5120_CPUP_CONF
,
409 adm5120_get_reg(ADM5120_CPUP_CONF
) |
410 (portmask
<< ADM5120_DISUNSHIFT
));
412 if (dev
->flags
& IFF_PROMISC
|| dev
->flags
& IFF_ALLMULTI
||
414 adm5120_set_reg(ADM5120_CPUP_CONF
,
415 adm5120_get_reg(ADM5120_CPUP_CONF
) &
416 ~((portmask
<< ADM5120_DISMCSHIFT
) & ADM5120_DISMCALL
));
418 adm5120_set_reg(ADM5120_CPUP_CONF
,
419 adm5120_get_reg(ADM5120_CPUP_CONF
) |
420 (portmask
<< ADM5120_DISMCSHIFT
));
423 static void adm5120_write_mac(struct net_device
*dev
)
425 struct adm5120_sw
*priv
= netdev_priv(dev
);
426 unsigned char *mac
= dev
->dev_addr
;
428 adm5120_set_reg(ADM5120_MAC_WT1
,
429 mac
[2] | (mac
[3]<<8) | (mac
[4]<<16) | (mac
[5]<<24));
430 adm5120_set_reg(ADM5120_MAC_WT0
, (priv
->port
<<3) |
431 (mac
[0]<<16) | (mac
[1]<<24) | ADM5120_MAC_WRITE
| ADM5120_VLAN_EN
);
433 while (!(adm5120_get_reg(ADM5120_MAC_WT0
) & ADM5120_MAC_WRITE_DONE
));
436 static int adm5120_sw_set_mac_address(struct net_device
*dev
, void *p
)
438 struct sockaddr
*addr
= p
;
440 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
441 adm5120_write_mac(dev
);
445 static int adm5120_do_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
448 struct adm5120_sw_info info
;
449 struct adm5120_sw
*priv
= netdev_priv(dev
);
454 info
.ports
= adm5120_nrdevs
;
455 info
.vlan
= priv
->port
;
456 err
= copy_to_user(rq
->ifr_data
, &info
, sizeof(info
));
461 if (!capable(CAP_NET_ADMIN
))
463 err
= copy_from_user(vlan_matrix
, rq
->ifr_data
,
464 sizeof(vlan_matrix
));
467 adm5120_set_vlan(vlan_matrix
);
470 err
= copy_to_user(rq
->ifr_data
, vlan_matrix
,
471 sizeof(vlan_matrix
));
476 err
= copy_to_user(rq
->ifr_data
, bw_matrix
, sizeof(bw_matrix
));
481 if (!capable(CAP_NET_ADMIN
))
483 err
= copy_from_user(bw_matrix
, rq
->ifr_data
, sizeof(bw_matrix
));
486 adm5120_set_bw(bw_matrix
);
494 static void adm5120_dma_tx_init(struct adm5120_dma
*dma
, struct sk_buff
**skbl
,
497 memset(dma
, 0, sizeof(struct adm5120_dma
)*num
);
498 dma
[num
-1].data
|= ADM5120_DMA_RINGEND
;
499 memset(skbl
, 0, sizeof(struct skb
*)*num
);
502 static void adm5120_dma_rx_init(struct adm5120_dma
*dma
, struct sk_buff
**skbl
,
507 memset(dma
, 0, sizeof(struct adm5120_dma
)*num
);
508 for (i
=0; i
<num
; i
++) {
509 skbl
[i
] = dev_alloc_skb(ADM5120_DMA_RXSIZE
+16);
514 skb_reserve(skbl
[i
], NET_IP_ALIGN
);
515 adm5120_rx_dma_update(&dma
[i
], skbl
[i
], (num
-1==i
));
519 static int __init
adm5120_sw_init(void)
522 struct net_device
*dev
;
524 err
= request_irq(ADM5120_IRQ_SWITCH
, adm5120_sw_irq
, 0, "ethernet switch", NULL
);
528 adm5120_nrdevs
= adm5120_eth_num_ports
;
530 adm5120_set_reg(ADM5120_CPUP_CONF
,
531 ADM5120_DISCCPUPORT
| ADM5120_CRC_PADDING
|
532 ADM5120_DISUNALL
| ADM5120_DISMCALL
);
533 adm5120_set_reg(ADM5120_PORT_CONF0
, ADM5120_ENMC
| ADM5120_ENBP
| ADM5120_PORTDISALL
);
535 adm5120_set_reg(ADM5120_PHY_CNTL2
, adm5120_get_reg(ADM5120_PHY_CNTL2
) |
536 ADM5120_AUTONEG
| ADM5120_NORMAL
| ADM5120_AUTOMDIX
);
537 adm5120_set_reg(ADM5120_PHY_CNTL3
, adm5120_get_reg(ADM5120_PHY_CNTL3
) |
540 adm5120_set_reg(ADM5120_INT_MASK
, ADM5120_INTMASKALL
);
541 adm5120_set_reg(ADM5120_INT_ST
, ADM5120_INTMASKALL
);
543 adm5120_dma_txh
= (void *)KSEG1ADDR((u32
)adm5120_dma_txh_v
);
544 adm5120_dma_txl
= (void *)KSEG1ADDR((u32
)adm5120_dma_txl_v
);
545 adm5120_dma_rxh
= (void *)KSEG1ADDR((u32
)adm5120_dma_rxh_v
);
546 adm5120_dma_rxl
= (void *)KSEG1ADDR((u32
)adm5120_dma_rxl_v
);
548 adm5120_dma_tx_init(adm5120_dma_txh
, adm5120_skb_txh
, ADM5120_DMA_TXH
);
549 adm5120_dma_tx_init(adm5120_dma_txl
, adm5120_skb_txl
, ADM5120_DMA_TXL
);
550 adm5120_dma_rx_init(adm5120_dma_rxh
, adm5120_skb_rxh
, ADM5120_DMA_RXH
);
551 adm5120_dma_rx_init(adm5120_dma_rxl
, adm5120_skb_rxl
, ADM5120_DMA_RXL
);
552 adm5120_set_reg(ADM5120_SEND_HBADDR
, KSEG1ADDR(adm5120_dma_txh
));
553 adm5120_set_reg(ADM5120_SEND_LBADDR
, KSEG1ADDR(adm5120_dma_txl
));
554 adm5120_set_reg(ADM5120_RECEIVE_HBADDR
, KSEG1ADDR(adm5120_dma_rxh
));
555 adm5120_set_reg(ADM5120_RECEIVE_LBADDR
, KSEG1ADDR(adm5120_dma_rxl
));
557 for (i
=0; i
<adm5120_nrdevs
; i
++) {
558 adm5120_devs
[i
] = alloc_etherdev(sizeof(struct adm5120_sw
));
559 if (!adm5120_devs
[i
]) {
564 dev
= adm5120_devs
[i
];
565 SET_MODULE_OWNER(dev
);
566 memset(netdev_priv(dev
), 0, sizeof(struct adm5120_sw
));
567 ((struct adm5120_sw
*)netdev_priv(dev
))->port
= i
;
568 dev
->base_addr
= SW_BASE
;
569 dev
->irq
= ADM5120_IRQ_SWITCH
;
570 dev
->open
= adm5120_sw_open
;
571 dev
->hard_start_xmit
= adm5120_sw_tx
;
572 dev
->stop
= adm5120_sw_stop
;
573 dev
->get_stats
= adm5120_sw_stats
;
574 dev
->set_multicast_list
= adm5120_set_multicast_list
;
575 dev
->do_ioctl
= adm5120_do_ioctl
;
576 dev
->tx_timeout
= adm5120_tx_timeout
;
577 dev
->watchdog_timeo
= ETH_TX_TIMEOUT
;
578 dev
->set_mac_address
= adm5120_sw_set_mac_address
;
579 dev
->poll
= adm5120_rx
;
582 memcpy(dev
->dev_addr
, adm5120_eth_macs
[i
], 6);
583 adm5120_write_mac(dev
);
585 if ((err
= register_netdev(dev
))) {
589 printk(KERN_INFO
"%s: ADM5120 switch port%d\n", dev
->name
, i
);
591 /* setup vlan/port mapping after devs are filled up */
592 adm5120_set_vlan(vlan_matrix
);
594 adm5120_set_reg(ADM5120_CPUP_CONF
,
595 ADM5120_CRC_PADDING
| ADM5120_DISUNALL
| ADM5120_DISMCALL
);
600 /* Undo everything that did succeed */
602 unregister_netdev(adm5120_devs
[i
-1]);
603 free_netdev(adm5120_devs
[i
-1]);
605 free_irq(ADM5120_IRQ_SWITCH
, NULL
);
607 printk(KERN_ERR
"ADM5120 Ethernet switch init failed\n");
611 static void __exit
adm5120_sw_exit(void)
615 for (i
= 0; i
< adm5120_nrdevs
; i
++) {
616 unregister_netdev(adm5120_devs
[i
]);
617 free_netdev(adm5120_devs
[i
-1]);
620 free_irq(ADM5120_IRQ_SWITCH
, NULL
);
622 for (i
= 0; i
< ADM5120_DMA_RXH
; i
++) {
623 if (!adm5120_skb_rxh
[i
])
625 kfree_skb(adm5120_skb_rxh
[i
]);
627 for (i
= 0; i
< ADM5120_DMA_RXL
; i
++) {
628 if (!adm5120_skb_rxl
[i
])
630 kfree_skb(adm5120_skb_rxl
[i
]);
634 module_init(adm5120_sw_init
);
635 module_exit(adm5120_sw_exit
);