Replace hardcoded values with their correct definitions
[openwrt.git] / target / linux / adm5120-2.6 / files / drivers / net / adm5120sw.c
1 /*
2 * ADM5120 built in ethernet switch driver
3 *
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
5 *
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
8 *
9 * NAPI extensions by Thomas Langer (Thomas.Langer@infineon.com)
10 * and Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
11 *
12 * TODO: Add support of high prio queues (currently disabled)
13 *
14 */
15 #include <linux/autoconf.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <asm/mipsregs.h>
25 #include <asm/irq.h>
26 #include <asm/io.h>
27 #include "adm5120sw.h"
28
29 #include <asm/mach-adm5120/adm5120_info.h>
30 #include <asm/mach-adm5120/adm5120_irq.h>
31
32 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
33 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
34 MODULE_LICENSE("GPL");
35
36 /*
37 * The ADM5120 uses an internal matrix to determine which ports
38 * belong to which VLAN.
39 * The default generates a VLAN (and device) for each port
40 * (including MII port) and the CPU port is part of all of them.
41 *
42 * Another example, one big switch and everything mapped to eth0:
43 * 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00
44 */
45 static unsigned char vlan_matrix[SW_DEVS] = {
46 0x41, 0x42, 0x44, 0x48, 0x50, 0x60
47 };
48
49 /* default settings - unlimited TX and RX on all ports, default shaper mode */
50 static unsigned char bw_matrix[SW_DEVS] = {
51 0, 0, 0, 0, 0, 0
52 };
53
54 static int adm5120_nrdevs;
55
56 static struct net_device *adm5120_devs[SW_DEVS];
57 /* Lookup table port -> device */
58 static struct net_device *adm5120_port[SW_DEVS];
59
60 static struct adm5120_dma
61 adm5120_dma_txh_v[ADM5120_DMA_TXH] __attribute__((aligned(16))),
62 adm5120_dma_txl_v[ADM5120_DMA_TXL] __attribute__((aligned(16))),
63 adm5120_dma_rxh_v[ADM5120_DMA_RXH] __attribute__((aligned(16))),
64 adm5120_dma_rxl_v[ADM5120_DMA_RXL] __attribute__((aligned(16))),
65 *adm5120_dma_txh,
66 *adm5120_dma_txl,
67 *adm5120_dma_rxh,
68 *adm5120_dma_rxl;
69 static struct sk_buff
70 *adm5120_skb_rxh[ADM5120_DMA_RXH],
71 *adm5120_skb_rxl[ADM5120_DMA_RXL],
72 *adm5120_skb_txh[ADM5120_DMA_TXH],
73 *adm5120_skb_txl[ADM5120_DMA_TXL];
74 static int adm5120_rxli = 0;
75 static int adm5120_txli = 0;
76 /*static int adm5120_txhi = 0;*/
77 static int adm5120_if_open = 0;
78
79 static inline void adm5120_set_reg(unsigned int reg, unsigned long val)
80 {
81 *(volatile unsigned long*)(SW_BASE+reg) = val;
82 }
83
84 static inline unsigned long adm5120_get_reg(unsigned int reg)
85 {
86 return *(volatile unsigned long*)(SW_BASE+reg);
87 }
88
89 static inline void adm5120_rx_dma_update(struct adm5120_dma *dma,
90 struct sk_buff *skb, int end)
91 {
92 dma->status = 0;
93 dma->cntl = 0;
94 dma->len = ADM5120_DMA_RXSIZE;
95 dma->data = ADM5120_DMA_ADDR(skb->data) |
96 ADM5120_DMA_OWN | (end ? ADM5120_DMA_RINGEND : 0);
97 }
98
99 static int adm5120_rx(struct net_device *dev,int *budget)
100 {
101 struct sk_buff *skb, *skbn;
102 struct adm5120_sw *priv;
103 struct net_device *cdev;
104 struct adm5120_dma *dma;
105 int port, len, quota;
106
107 quota = min(dev->quota, *budget);
108 dma = &adm5120_dma_rxl[adm5120_rxli];
109 while (!(dma->data & ADM5120_DMA_OWN) && quota) {
110 port = (dma->status & ADM5120_DMA_PORTID);
111 port >>= ADM5120_DMA_PORTSHIFT;
112 cdev = adm5120_port[port];
113 if (cdev != dev) { /* The current packet belongs to a different device */
114 if ((cdev==NULL) || !netif_running(cdev)) {
115 /* discard (update with old skb) */
116 skb = skbn = NULL;
117 goto rx_skip;
118 }
119 else {
120 netif_rx_schedule(cdev);/* Start polling next device */
121 return 1; /* return 1 -> More packets to process */
122 }
123
124 }
125 skb = adm5120_skb_rxl[adm5120_rxli];
126 len = (dma->status & ADM5120_DMA_LEN);
127 len >>= ADM5120_DMA_LENSHIFT;
128 len -= ETH_FCS;
129
130 priv = netdev_priv(dev);
131 if (len <= 0 || len > ADM5120_DMA_RXSIZE ||
132 dma->status & ADM5120_DMA_FCSERR) {
133 priv->stats.rx_errors++;
134 skbn = NULL;
135 } else {
136 skbn = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
137 if (skbn) {
138 skb_put(skb, len);
139 skb->dev = dev;
140 skb->protocol = eth_type_trans(skb, dev);
141 skb->ip_summed = CHECKSUM_UNNECESSARY;
142 dev->last_rx = jiffies;
143 priv->stats.rx_packets++;
144 priv->stats.rx_bytes += len;
145 skb_reserve(skbn, NET_IP_ALIGN);
146 adm5120_skb_rxl[adm5120_rxli] = skbn;
147 } else {
148 printk(KERN_INFO "%s recycling!\n", dev->name);
149 }
150 }
151 rx_skip:
152 adm5120_rx_dma_update(&adm5120_dma_rxl[adm5120_rxli],
153 adm5120_skb_rxl[adm5120_rxli],
154 (ADM5120_DMA_RXL-1==adm5120_rxli));
155 if (ADM5120_DMA_RXL == ++adm5120_rxli)
156 adm5120_rxli = 0;
157 dma = &adm5120_dma_rxl[adm5120_rxli];
158 if (skbn){
159 netif_receive_skb(skb);
160 dev->quota--;
161 (*budget)--;
162 quota--;
163 }
164 } /* while */
165 /* If there are still packets to process, return 1 */
166 if (quota){
167 /* No more packets to process, so disable the polling and reenable the interrupts */
168 netif_rx_complete(dev);
169 adm5120_set_reg(ADM5120_INT_MASK,
170 adm5120_get_reg(ADM5120_INT_MASK) &
171 ~(ADM5120_INT_RXL|ADM5120_INT_LFULL));
172 return 0;
173
174
175 }
176 return 1;
177 }
178
179 static irqreturn_t adm5120_sw_irq(int irq, void *dev_id)
180 {
181 unsigned long intreg, intmask;
182 int port;
183 struct net_device *dev;
184
185 intmask = adm5120_get_reg(ADM5120_INT_MASK); /* Remember interrupt mask */
186 adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL); /* Disable interrupts */
187
188 intreg = adm5120_get_reg(ADM5120_INT_ST); /* Read interrupt status */
189 adm5120_set_reg(ADM5120_INT_ST, intreg); /* Clear interrupt status */
190
191 /* In NAPI operation the interrupts are disabled and the polling mechanism
192 * is activated. The interrupts are finally enabled again in the polling routine.
193 */
194 if (intreg & (ADM5120_INT_RXL|ADM5120_INT_LFULL)) {
195 /* check rx buffer for port number */
196 port = adm5120_dma_rxl[adm5120_rxli].status & ADM5120_DMA_PORTID;
197 port >>= ADM5120_DMA_PORTSHIFT;
198 dev = adm5120_port[port];
199 if ((dev==NULL) || !netif_running(dev)) {
200 /* discard (update with old skb) */
201 adm5120_rx_dma_update(&adm5120_dma_rxl[adm5120_rxli],
202 adm5120_skb_rxl[adm5120_rxli],
203 (ADM5120_DMA_RXL-1==adm5120_rxli));
204 if (ADM5120_DMA_RXL == ++adm5120_rxli)
205 adm5120_rxli = 0;
206 }
207 else {
208 netif_rx_schedule(dev);
209 intmask |= (ADM5120_INT_RXL|ADM5120_INT_LFULL); /* Disable RX interrupts */
210 }
211 }
212 #ifdef CONFIG_DEBUG
213 if (intreg & ~(intmask))
214 printk(KERN_INFO "adm5120sw: IRQ 0x%08X unexpected!\n", (unsigned int)(intreg & ~(intmask)));
215 #endif
216
217 adm5120_set_reg(ADM5120_INT_MASK, intmask);
218
219 return IRQ_HANDLED;
220 }
221
222 static void adm5120_set_vlan(char *matrix)
223 {
224 unsigned long val;
225 int vlan_port, port;
226
227 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
228 adm5120_set_reg(ADM5120_VLAN_GI, val);
229 val = matrix[4] + (matrix[5]<<8);
230 adm5120_set_reg(ADM5120_VLAN_GII, val);
231 /* Now set/update the port vs. device lookup table */
232 for (port=0; port<SW_DEVS; port++) {
233 for (vlan_port=0; vlan_port<SW_DEVS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++);
234 if (vlan_port <SW_DEVS)
235 adm5120_port[port] = adm5120_devs[vlan_port];
236 else
237 adm5120_port[port] = NULL;
238 }
239 }
240
241 static void adm5120_set_bw(char *matrix)
242 {
243 unsigned long val;
244
245 /* Port 0 to 3 are set using the bandwidth control 0 register */
246 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
247 adm5120_set_reg(ADM5120_BW_CTL0, val);
248
249 /* Port 4 and 5 are set using the bandwidth control 1 register */
250 val = matrix[4];
251 if (matrix[5] == 1)
252 adm5120_set_reg(ADM5120_BW_CTL1, val | 0x80000000);
253 else
254 adm5120_set_reg(ADM5120_BW_CTL1, val & ~0x8000000);
255
256 printk(KERN_DEBUG "D: ctl0 0x%lx, ctl1 0x%lx\n",
257 adm5120_get_reg(ADM5120_BW_CTL0),
258 adm5120_get_reg(ADM5120_BW_CTL1));
259 }
260
261 static int adm5120_sw_open(struct net_device *dev)
262 {
263 unsigned long val;
264 int i;
265
266 netif_start_queue(dev);
267 if (!adm5120_if_open++) {
268 /* enable interrupts on first open */
269 adm5120_set_reg(ADM5120_INT_MASK,
270 adm5120_get_reg(ADM5120_INT_MASK) &
271 ~(ADM5120_INT_RXL|ADM5120_INT_LFULL));
272 }
273 /* enable (additional) port */
274 val = adm5120_get_reg(ADM5120_PORT_CONF0);
275 for (i=0; i<SW_DEVS; i++) {
276 if (dev == adm5120_devs[i])
277 val &= ~vlan_matrix[i];
278 }
279 adm5120_set_reg(ADM5120_PORT_CONF0, val);
280 return 0;
281 }
282
283 static int adm5120_sw_stop(struct net_device *dev)
284 {
285 unsigned long val;
286 int i;
287
288 if (!--adm5120_if_open) {
289 adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL);
290 }
291 /* disable port if not assigned to other devices */
292 val = adm5120_get_reg(ADM5120_PORT_CONF0) | ADM5120_PORTDISALL;
293 for (i=0; i<SW_DEVS; i++) {
294 if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i]))
295 val &= ~vlan_matrix[i];
296 }
297 adm5120_set_reg(ADM5120_PORT_CONF0, val);
298 netif_stop_queue(dev);
299 return 0;
300 }
301
302 static int adm5120_sw_tx(struct sk_buff *skb, struct net_device *dev)
303 {
304 struct adm5120_dma *dma;
305 struct sk_buff **skbl = adm5120_skb_txl;
306 struct adm5120_sw *priv = netdev_priv(dev);
307 unsigned long data;
308
309 dev->trans_start = jiffies;
310 dma = &adm5120_dma_txl[adm5120_txli];
311 if (dma->data & ADM5120_DMA_OWN) {
312 /* We want to write a packet but the TX queue is still
313 * occupied by the DMA. We are faster than the DMA... */
314 dev_kfree_skb(skb);
315 priv->stats.tx_dropped++;
316 return 0;
317 }
318 data = ADM5120_DMA_ADDR(skb->data) | ADM5120_DMA_OWN;
319 if (adm5120_txli == ADM5120_DMA_TXL-1)
320 data |= ADM5120_DMA_RINGEND;
321 dma->status =
322 ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << ADM5120_DMA_LENSHIFT) |
323 (0x1 << priv->port);
324
325 dma->len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
326 priv->stats.tx_packets++;
327 priv->stats.tx_bytes += skb->len;
328
329 /* free old skbs here instead of tx completion interrupt:
330 * will hold some more memory allocated but reduces interrupts */
331 if (skbl[adm5120_txli]){
332 dev_kfree_skb(skbl[adm5120_txli]);
333 }
334 skbl[adm5120_txli] = skb;
335
336 dma->data = data; /* Here we enable the buffer for the TX DMA machine */
337 adm5120_set_reg(ADM5120_SEND_TRIG, ADM5120_SEND_TRIG_L);
338 if (++adm5120_txli == ADM5120_DMA_TXL)
339 adm5120_txli = 0;
340 return 0;
341 }
342
343 static void adm5120_tx_timeout(struct net_device *dev)
344 {
345 printk(KERN_INFO "%s: TX timeout\n",dev->name);
346 }
347
348 static struct net_device_stats *adm5120_sw_stats(struct net_device *dev)
349 {
350 struct adm5120_sw *priv = netdev_priv(dev);
351 int portmask;
352 unsigned long adm5120_cpup_conf_reg;
353
354 portmask = vlan_matrix[priv->port] & 0x3f;
355
356 adm5120_cpup_conf_reg = adm5120_get_reg(ADM5120_CPUP_CONF);
357
358 if (dev->flags & IFF_PROMISC)
359 adm5120_cpup_conf_reg &= ~((portmask << ADM5120_DISUNSHIFT) & ADM5120_DISUNALL);
360 else
361 adm5120_cpup_conf_reg |= (portmask << ADM5120_DISUNSHIFT);
362
363 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI || dev->mc_count)
364 adm5120_cpup_conf_reg &= ~((portmask << ADM5120_DISMCSHIFT) & ADM5120_DISMCALL);
365 else
366 adm5120_cpup_conf_reg |= (portmask << ADM5120_DISMCSHIFT);
367
368 /* If there is any port configured to be in promiscuous mode, then the */
369 /* Bridge Test Mode has to be activated. This will result in */
370 /* transporting also packets learned in another VLAN to be forwarded */
371 /* to the CPU. */
372 /* The difficult scenario is when we want to build a bridge on the CPU.*/
373 /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
374 /* CPU port in VLAN1. Now we build a bridge on the CPU between */
375 /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
376 /* Now assume a packet with ethernet source address 99 enters port 0 */
377 /* It will be forwarded to the CPU because it is unknown. Then the */
378 /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
379 /* When now a packet with ethernet destination address 99 comes in at */
380 /* port 1 in VLAN1, then the switch has learned that this address is */
381 /* located at port 0 in VLAN0. Therefore the switch will drop */
382 /* this packet. In order to avoid this and to send the packet still */
383 /* to the CPU, the Bridge Test Mode has to be activated. */
384
385 /* Check if there is any vlan in promisc mode. */
386 if (~adm5120_cpup_conf_reg & ADM5120_DISUNALL)
387 adm5120_cpup_conf_reg |= ADM5120_BTM; /* Set the BTM */
388 else
389 adm5120_cpup_conf_reg &= ~ADM5120_BTM; /* Disable the BTM */
390
391 adm5120_set_reg(ADM5120_CPUP_CONF,adm5120_cpup_conf_reg);
392
393 return &((struct adm5120_sw *)netdev_priv(dev))->stats;
394 }
395
396 static void adm5120_set_multicast_list(struct net_device *dev)
397 {
398 struct adm5120_sw *priv = netdev_priv(dev);
399 int portmask;
400
401 portmask = vlan_matrix[priv->port] & 0x3f;
402
403 if (dev->flags & IFF_PROMISC)
404 adm5120_set_reg(ADM5120_CPUP_CONF,
405 adm5120_get_reg(ADM5120_CPUP_CONF) &
406 ~((portmask << ADM5120_DISUNSHIFT) & ADM5120_DISUNALL));
407 else
408 adm5120_set_reg(ADM5120_CPUP_CONF,
409 adm5120_get_reg(ADM5120_CPUP_CONF) |
410 (portmask << ADM5120_DISUNSHIFT));
411
412 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
413 dev->mc_count)
414 adm5120_set_reg(ADM5120_CPUP_CONF,
415 adm5120_get_reg(ADM5120_CPUP_CONF) &
416 ~((portmask << ADM5120_DISMCSHIFT) & ADM5120_DISMCALL));
417 else
418 adm5120_set_reg(ADM5120_CPUP_CONF,
419 adm5120_get_reg(ADM5120_CPUP_CONF) |
420 (portmask << ADM5120_DISMCSHIFT));
421 }
422
423 static void adm5120_write_mac(struct net_device *dev)
424 {
425 struct adm5120_sw *priv = netdev_priv(dev);
426 unsigned char *mac = dev->dev_addr;
427
428 adm5120_set_reg(ADM5120_MAC_WT1,
429 mac[2] | (mac[3]<<8) | (mac[4]<<16) | (mac[5]<<24));
430 adm5120_set_reg(ADM5120_MAC_WT0, (priv->port<<3) |
431 (mac[0]<<16) | (mac[1]<<24) | ADM5120_MAC_WRITE | ADM5120_VLAN_EN);
432
433 while (!(adm5120_get_reg(ADM5120_MAC_WT0) & ADM5120_MAC_WRITE_DONE));
434 }
435
436 static int adm5120_sw_set_mac_address(struct net_device *dev, void *p)
437 {
438 struct sockaddr *addr = p;
439
440 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
441 adm5120_write_mac(dev);
442 return 0;
443 }
444
445 static int adm5120_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
446 {
447 int err;
448 struct adm5120_sw_info info;
449 struct adm5120_sw *priv = netdev_priv(dev);
450
451 switch(cmd) {
452 case SIOCGADMINFO:
453 info.magic = 0x5120;
454 info.ports = adm5120_nrdevs;
455 info.vlan = priv->port;
456 err = copy_to_user(rq->ifr_data, &info, sizeof(info));
457 if (err)
458 return -EFAULT;
459 break;
460 case SIOCSMATRIX:
461 if (!capable(CAP_NET_ADMIN))
462 return -EPERM;
463 err = copy_from_user(vlan_matrix, rq->ifr_data,
464 sizeof(vlan_matrix));
465 if (err)
466 return -EFAULT;
467 adm5120_set_vlan(vlan_matrix);
468 break;
469 case SIOCGMATRIX:
470 err = copy_to_user(rq->ifr_data, vlan_matrix,
471 sizeof(vlan_matrix));
472 if (err)
473 return -EFAULT;
474 break;
475 case SIOCGETBW:
476 err = copy_to_user(rq->ifr_data, bw_matrix, sizeof(bw_matrix));
477 if (err)
478 return -EFAULT;
479 break;
480 case SIOCSETBW:
481 if (!capable(CAP_NET_ADMIN))
482 return -EPERM;
483 err = copy_from_user(bw_matrix, rq->ifr_data, sizeof(bw_matrix));
484 if (err)
485 return -EFAULT;
486 adm5120_set_bw(bw_matrix);
487 break;
488 default:
489 return -EOPNOTSUPP;
490 }
491 return 0;
492 }
493
494 static void adm5120_dma_tx_init(struct adm5120_dma *dma, struct sk_buff **skbl,
495 int num)
496 {
497 memset(dma, 0, sizeof(struct adm5120_dma)*num);
498 dma[num-1].data |= ADM5120_DMA_RINGEND;
499 memset(skbl, 0, sizeof(struct skb*)*num);
500 }
501
502 static void adm5120_dma_rx_init(struct adm5120_dma *dma, struct sk_buff **skbl,
503 int num)
504 {
505 int i;
506
507 memset(dma, 0, sizeof(struct adm5120_dma)*num);
508 for (i=0; i<num; i++) {
509 skbl[i] = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
510 if (!skbl[i]) {
511 i=num;
512 break;
513 }
514 skb_reserve(skbl[i], NET_IP_ALIGN);
515 adm5120_rx_dma_update(&dma[i], skbl[i], (num-1==i));
516 }
517 }
518
519 static int __init adm5120_sw_init(void)
520 {
521 int i, err;
522 struct net_device *dev;
523
524 err = request_irq(ADM5120_IRQ_SWITCH, adm5120_sw_irq, 0, "ethernet switch", NULL);
525 if (err)
526 goto out;
527
528 adm5120_nrdevs = adm5120_eth_num_ports;
529
530 adm5120_set_reg(ADM5120_CPUP_CONF,
531 ADM5120_DISCCPUPORT | ADM5120_CRC_PADDING |
532 ADM5120_DISUNALL | ADM5120_DISMCALL);
533 adm5120_set_reg(ADM5120_PORT_CONF0, ADM5120_ENMC | ADM5120_ENBP | ADM5120_PORTDISALL);
534
535 adm5120_set_reg(ADM5120_PHY_CNTL2, adm5120_get_reg(ADM5120_PHY_CNTL2) |
536 ADM5120_AUTONEG | ADM5120_NORMAL | ADM5120_AUTOMDIX);
537 adm5120_set_reg(ADM5120_PHY_CNTL3, adm5120_get_reg(ADM5120_PHY_CNTL3) |
538 ADM5120_PHY_NTH);
539
540 adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL);
541 adm5120_set_reg(ADM5120_INT_ST, ADM5120_INTMASKALL);
542
543 adm5120_dma_txh = (void *)KSEG1ADDR((u32)adm5120_dma_txh_v);
544 adm5120_dma_txl = (void *)KSEG1ADDR((u32)adm5120_dma_txl_v);
545 adm5120_dma_rxh = (void *)KSEG1ADDR((u32)adm5120_dma_rxh_v);
546 adm5120_dma_rxl = (void *)KSEG1ADDR((u32)adm5120_dma_rxl_v);
547
548 adm5120_dma_tx_init(adm5120_dma_txh, adm5120_skb_txh, ADM5120_DMA_TXH);
549 adm5120_dma_tx_init(adm5120_dma_txl, adm5120_skb_txl, ADM5120_DMA_TXL);
550 adm5120_dma_rx_init(adm5120_dma_rxh, adm5120_skb_rxh, ADM5120_DMA_RXH);
551 adm5120_dma_rx_init(adm5120_dma_rxl, adm5120_skb_rxl, ADM5120_DMA_RXL);
552 adm5120_set_reg(ADM5120_SEND_HBADDR, KSEG1ADDR(adm5120_dma_txh));
553 adm5120_set_reg(ADM5120_SEND_LBADDR, KSEG1ADDR(adm5120_dma_txl));
554 adm5120_set_reg(ADM5120_RECEIVE_HBADDR, KSEG1ADDR(adm5120_dma_rxh));
555 adm5120_set_reg(ADM5120_RECEIVE_LBADDR, KSEG1ADDR(adm5120_dma_rxl));
556
557 for (i=0; i<adm5120_nrdevs; i++) {
558 adm5120_devs[i] = alloc_etherdev(sizeof(struct adm5120_sw));
559 if (!adm5120_devs[i]) {
560 err = -ENOMEM;
561 goto out_int;
562 }
563
564 dev = adm5120_devs[i];
565 SET_MODULE_OWNER(dev);
566 memset(netdev_priv(dev), 0, sizeof(struct adm5120_sw));
567 ((struct adm5120_sw*)netdev_priv(dev))->port = i;
568 dev->base_addr = SW_BASE;
569 dev->irq = ADM5120_IRQ_SWITCH;
570 dev->open = adm5120_sw_open;
571 dev->hard_start_xmit = adm5120_sw_tx;
572 dev->stop = adm5120_sw_stop;
573 dev->get_stats = adm5120_sw_stats;
574 dev->set_multicast_list = adm5120_set_multicast_list;
575 dev->do_ioctl = adm5120_do_ioctl;
576 dev->tx_timeout = adm5120_tx_timeout;
577 dev->watchdog_timeo = ETH_TX_TIMEOUT;
578 dev->set_mac_address = adm5120_sw_set_mac_address;
579 dev->poll = adm5120_rx;
580 dev->weight = 64;
581
582 memcpy(dev->dev_addr, adm5120_eth_macs[i], 6);
583 adm5120_write_mac(dev);
584
585 if ((err = register_netdev(dev))) {
586 free_netdev(dev);
587 goto out_int;
588 }
589 printk(KERN_INFO "%s: ADM5120 switch port%d\n", dev->name, i);
590 }
591 /* setup vlan/port mapping after devs are filled up */
592 adm5120_set_vlan(vlan_matrix);
593
594 adm5120_set_reg(ADM5120_CPUP_CONF,
595 ADM5120_CRC_PADDING | ADM5120_DISUNALL | ADM5120_DISMCALL);
596
597 return 0;
598
599 out_int:
600 /* Undo everything that did succeed */
601 for (; i; i--) {
602 unregister_netdev(adm5120_devs[i-1]);
603 free_netdev(adm5120_devs[i-1]);
604 }
605 free_irq(ADM5120_IRQ_SWITCH, NULL);
606 out:
607 printk(KERN_ERR "ADM5120 Ethernet switch init failed\n");
608 return err;
609 }
610
611 static void __exit adm5120_sw_exit(void)
612 {
613 int i;
614
615 for (i = 0; i < adm5120_nrdevs; i++) {
616 unregister_netdev(adm5120_devs[i]);
617 free_netdev(adm5120_devs[i-1]);
618 }
619
620 free_irq(ADM5120_IRQ_SWITCH, NULL);
621
622 for (i = 0; i < ADM5120_DMA_RXH; i++) {
623 if (!adm5120_skb_rxh[i])
624 break;
625 kfree_skb(adm5120_skb_rxh[i]);
626 }
627 for (i = 0; i < ADM5120_DMA_RXL; i++) {
628 if (!adm5120_skb_rxl[i])
629 break;
630 kfree_skb(adm5120_skb_rxl[i]);
631 }
632 }
633
634 module_init(adm5120_sw_init);
635 module_exit(adm5120_sw_exit);
This page took 0.069826 seconds and 5 git commands to generate.