[adm5120] refactor kernel code (part 1), mark it as broken now
[openwrt.git] / target / linux / adm5120-2.6 / files / drivers / net / adm5120sw.c
1 /*
2 * ADM5120 built in ethernet switch driver
3 *
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
5 *
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
8 */
9 #include <linux/autoconf.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/ioport.h>
18 #include <asm/mipsregs.h>
19 #include <asm/irq.h>
20 #include <asm/io.h>
21 #include "adm5120sw.h"
22
23 #include <asm/mach-adm5120/adm5120_info.h>
24 #include <asm/mach-adm5120/adm5120_irq.h>
25
26 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
27 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
28 MODULE_LICENSE("GPL");
29
30 /*
31 * The ADM5120 uses an internal matrix to determine which ports
32 * belong to which VLAN.
33 * The default generates a VLAN (and device) for each port
34 * (including MII port) and the CPU port is part of all of them.
35 *
36 * Another example, one big switch and everything mapped to eth0:
37 * 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00
38 */
39 static unsigned char vlan_matrix[SW_DEVS] = {
40 0x41, 0x42, 0x44, 0x48, 0x50, 0x60
41 };
42
43 /* default settings - unlimited TX and RX on all ports, default shaper mode */
44 static unsigned char bw_matrix[SW_DEVS] = {
45 0, 0, 0, 0, 0, 0
46 };
47
48 static int adm5120_nrdevs;
49
50 static struct net_device *adm5120_devs[SW_DEVS];
51 static struct adm5120_dma
52 adm5120_dma_txh_v[ADM5120_DMA_TXH] __attribute__((aligned(16))),
53 adm5120_dma_txl_v[ADM5120_DMA_TXL] __attribute__((aligned(16))),
54 adm5120_dma_rxh_v[ADM5120_DMA_RXH] __attribute__((aligned(16))),
55 adm5120_dma_rxl_v[ADM5120_DMA_RXL] __attribute__((aligned(16))),
56 *adm5120_dma_txh,
57 *adm5120_dma_txl,
58 *adm5120_dma_rxh,
59 *adm5120_dma_rxl;
60 static struct sk_buff
61 *adm5120_skb_rxh[ADM5120_DMA_RXH],
62 *adm5120_skb_rxl[ADM5120_DMA_RXL],
63 *adm5120_skb_txh[ADM5120_DMA_TXH],
64 *adm5120_skb_txl[ADM5120_DMA_TXL];
65 static int adm5120_rxhi = 0;
66 static int adm5120_rxli = 0;
67 /* We don't use high priority tx for now */
68 /*static int adm5120_txhi = 0;*/
69 static int adm5120_txli = 0;
70 static int adm5120_txhit = 0;
71 static int adm5120_txlit = 0;
72 static int adm5120_if_open = 0;
73
74 static inline void adm5120_set_reg(unsigned int reg, unsigned long val)
75 {
76 *(volatile unsigned long*)(SW_BASE+reg) = val;
77 }
78
79 static inline unsigned long adm5120_get_reg(unsigned int reg)
80 {
81 return *(volatile unsigned long*)(SW_BASE+reg);
82 }
83
84 static inline void adm5120_rxfixup(struct adm5120_dma *dma,
85 struct sk_buff **skbl, int num)
86 {
87 int i;
88
89 /* Resubmit the entire ring */
90 for (i=0; i<num; i++) {
91 dma[i].status = 0;
92 dma[i].cntl = 0;
93 dma[i].len = ADM5120_DMA_RXSIZE;
94 dma[i].data = ADM5120_DMA_ADDR(skbl[i]->data) |
95 ADM5120_DMA_OWN | (i==num-1 ? ADM5120_DMA_RINGEND : 0);
96 }
97 }
98
99 static inline void adm5120_rx(struct adm5120_dma *dma, struct sk_buff **skbl,
100 int *index, int num)
101 {
102 struct sk_buff *skb, *skbn;
103 struct adm5120_sw *priv;
104 struct net_device *dev;
105 int port, vlan, len;
106
107 while (!(dma[*index].data & ADM5120_DMA_OWN)) {
108 port = (dma[*index].status & ADM5120_DMA_PORTID);
109 port >>= ADM5120_DMA_PORTSHIFT;
110 for (vlan = 0; vlan < adm5120_nrdevs; vlan++) {
111 if ((1<<port) & vlan_matrix[vlan])
112 break;
113 }
114 if (vlan == adm5120_nrdevs)
115 vlan = 0;
116 dev = adm5120_devs[vlan];
117 skb = skbl[*index];
118 len = (dma[*index].status & ADM5120_DMA_LEN);
119 len >>= ADM5120_DMA_LENSHIFT;
120 len -= ETH_FCS;
121
122 priv = netdev_priv(dev);
123 if (len <= 0 || len > ADM5120_DMA_RXSIZE ||
124 dma[*index].status & ADM5120_DMA_FCSERR) {
125 priv->stats.rx_errors++;
126 skbn = NULL;
127 } else {
128 skbn = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
129 if (skbn) {
130 skb_put(skb, len);
131 skb->dev = dev;
132 skb->protocol = eth_type_trans(skb, dev);
133 skb->ip_summed = CHECKSUM_UNNECESSARY;
134 dev->last_rx = jiffies;
135 priv->stats.rx_packets++;
136 priv->stats.rx_bytes+=len;
137 skb_reserve(skbn, 2);
138 skbl[*index] = skbn;
139 } else {
140 printk(KERN_INFO "%s recycling!\n", dev->name);
141 }
142 }
143
144 dma[*index].status = 0;
145 dma[*index].cntl = 0;
146 dma[*index].len = ADM5120_DMA_RXSIZE;
147 dma[*index].data = ADM5120_DMA_ADDR(skbl[*index]->data) |
148 ADM5120_DMA_OWN |
149 (num-1==*index ? ADM5120_DMA_RINGEND : 0);
150 if (num == ++*index)
151 *index = 0;
152 if (skbn)
153 netif_rx(skb);
154 }
155 }
156
157 static inline void adm5120_tx(struct adm5120_dma *dma, struct sk_buff **skbl,
158 int *index, int num)
159 {
160 while((dma[*index].data & ADM5120_DMA_OWN) == 0 && skbl[*index]) {
161 dev_kfree_skb_irq(skbl[*index]);
162 skbl[*index] = NULL;
163 if (++*index == num)
164 *index = 0;
165 }
166 }
167
168 static irqreturn_t adm5120_sw_irq(int irq, void *dev_id)
169 {
170 unsigned long intreg;
171
172 adm5120_set_reg(ADM5120_INT_MASK,
173 adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTHANDLE);
174
175 intreg = adm5120_get_reg(ADM5120_INT_ST);
176 adm5120_set_reg(ADM5120_INT_ST, intreg);
177
178 if (intreg & ADM5120_INT_RXH)
179 adm5120_rx(adm5120_dma_rxh, adm5120_skb_rxh, &adm5120_rxhi,
180 ADM5120_DMA_RXH);
181 if (intreg & ADM5120_INT_HFULL)
182 adm5120_rxfixup(adm5120_dma_rxh, adm5120_skb_rxh,
183 ADM5120_DMA_RXH);
184 if (intreg & ADM5120_INT_RXL)
185 adm5120_rx(adm5120_dma_rxl, adm5120_skb_rxl, &adm5120_rxli,
186 ADM5120_DMA_RXL);
187 if (intreg & ADM5120_INT_LFULL)
188 adm5120_rxfixup(adm5120_dma_rxl, adm5120_skb_rxl,
189 ADM5120_DMA_RXL);
190 if (intreg & ADM5120_INT_TXH)
191 adm5120_tx(adm5120_dma_txh, adm5120_skb_txh, &adm5120_txhit,
192 ADM5120_DMA_TXH);
193 if (intreg & ADM5120_INT_TXL)
194 adm5120_tx(adm5120_dma_txl, adm5120_skb_txl, &adm5120_txlit,
195 ADM5120_DMA_TXL);
196
197 adm5120_set_reg(ADM5120_INT_MASK,
198 adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE);
199
200 return IRQ_HANDLED;
201 }
202
203 static void adm5120_set_vlan(char *matrix)
204 {
205 unsigned long val;
206
207 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
208 adm5120_set_reg(ADM5120_VLAN_GI, val);
209 val = matrix[4] + (matrix[5]<<8);
210 adm5120_set_reg(ADM5120_VLAN_GII, val);
211 }
212
213 static void adm5120_set_bw(char *matrix)
214 {
215 unsigned long val;
216
217 /* Port 0 to 3 are set using the bandwidth control 0 register */
218 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
219 adm5120_set_reg(ADM5120_BW_CTL0, val);
220
221 /* Port 4 and 5 are set using the bandwidth control 1 register */
222 val = matrix[4];
223 if (matrix[5] == 1)
224 adm5120_set_reg(ADM5120_BW_CTL1, val | 0x80000000);
225 else
226 adm5120_set_reg(ADM5120_BW_CTL1, val & ~0x8000000);
227
228 printk(KERN_DEBUG "D: ctl0 0x%x, ctl1 0x%x\n",
229 adm5120_get_reg(ADM5120_BW_CTL0),
230 adm5120_get_reg(ADM5120_BW_CTL1));
231 }
232
233 static int adm5120_sw_open(struct net_device *dev)
234 {
235 if (!adm5120_if_open++)
236 adm5120_set_reg(ADM5120_INT_MASK,
237 adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE);
238 netif_start_queue(dev);
239 return 0;
240 }
241
242 static int adm5120_sw_stop(struct net_device *dev)
243 {
244 netif_stop_queue(dev);
245 if (!--adm5120_if_open)
246 adm5120_set_reg(ADM5120_INT_MASK,
247 adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTMASKALL);
248 return 0;
249 }
250
251 static int adm5120_sw_tx(struct sk_buff *skb, struct net_device *dev)
252 {
253 struct adm5120_dma *dma = adm5120_dma_txl;
254 struct sk_buff **skbl = adm5120_skb_txl;
255 struct adm5120_sw *priv = netdev_priv(dev);
256 int *index = &adm5120_txli;
257 int num = ADM5120_DMA_TXL;
258 int trigger = ADM5120_SEND_TRIG_L;
259
260 dev->trans_start = jiffies;
261 if (dma[*index].data & ADM5120_DMA_OWN) {
262 dev_kfree_skb(skb);
263 priv->stats.tx_dropped++;
264 return 0;
265 }
266
267 dma[*index].data = ADM5120_DMA_ADDR(skb->data) | ADM5120_DMA_OWN;
268 if (*index == num-1)
269 dma[*index].data |= ADM5120_DMA_RINGEND;
270 dma[*index].status =
271 ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << ADM5120_DMA_LENSHIFT) |
272 (0x1 << priv->port);
273 dma[*index].len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
274 priv->stats.tx_packets++;
275 priv->stats.tx_bytes += skb->len;
276 skbl[*index]=skb;
277
278 if (++*index == num)
279 *index = 0;
280 adm5120_set_reg(ADM5120_SEND_TRIG, trigger);
281
282 return 0;
283 }
284
285 static void adm5120_tx_timeout(struct net_device *dev)
286 {
287 netif_wake_queue(dev);
288 }
289
290 static struct net_device_stats *adm5120_sw_stats(struct net_device *dev)
291 {
292 return &((struct adm5120_sw *)netdev_priv(dev))->stats;
293 }
294
295 static void adm5120_set_multicast_list(struct net_device *dev)
296 {
297 struct adm5120_sw *priv = netdev_priv(dev);
298 int portmask;
299
300 portmask = vlan_matrix[priv->port] & 0x3f;
301
302 if (dev->flags & IFF_PROMISC)
303 adm5120_set_reg(ADM5120_CPUP_CONF,
304 adm5120_get_reg(ADM5120_CPUP_CONF) &
305 ~((portmask << ADM5120_DISUNSHIFT) & ADM5120_DISUNALL));
306 else
307 adm5120_set_reg(ADM5120_CPUP_CONF,
308 adm5120_get_reg(ADM5120_CPUP_CONF) |
309 (portmask << ADM5120_DISUNSHIFT));
310
311 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
312 dev->mc_count)
313 adm5120_set_reg(ADM5120_CPUP_CONF,
314 adm5120_get_reg(ADM5120_CPUP_CONF) &
315 ~((portmask << ADM5120_DISMCSHIFT) & ADM5120_DISMCALL));
316 else
317 adm5120_set_reg(ADM5120_CPUP_CONF,
318 adm5120_get_reg(ADM5120_CPUP_CONF) |
319 (portmask << ADM5120_DISMCSHIFT));
320 }
321
322 static void adm5120_write_mac(struct net_device *dev)
323 {
324 struct adm5120_sw *priv = netdev_priv(dev);
325 unsigned char *mac = dev->dev_addr;
326
327 adm5120_set_reg(ADM5120_MAC_WT1,
328 mac[2] | (mac[3]<<8) | (mac[4]<<16) | (mac[5]<<24));
329 adm5120_set_reg(ADM5120_MAC_WT0, (priv->port<<3) |
330 (mac[0]<<16) | (mac[1]<<24) | ADM5120_MAC_WRITE | ADM5120_VLAN_EN);
331
332 while (!(adm5120_get_reg(ADM5120_MAC_WT0) & ADM5120_MAC_WRITE_DONE));
333 }
334
335 static int adm5120_sw_set_mac_address(struct net_device *dev, void *p)
336 {
337 struct sockaddr *addr = p;
338
339 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
340 adm5120_write_mac(dev);
341 return 0;
342 }
343
344 static int adm5120_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
345 {
346 int err;
347 struct adm5120_sw_info info;
348 struct adm5120_sw *priv = netdev_priv(dev);
349
350 switch(cmd) {
351 case SIOCGADMINFO:
352 info.magic = 0x5120;
353 info.ports = adm5120_nrdevs;
354 info.vlan = priv->port;
355 err = copy_to_user(rq->ifr_data, &info, sizeof(info));
356 if (err)
357 return -EFAULT;
358 break;
359 case SIOCSMATRIX:
360 if (!capable(CAP_NET_ADMIN))
361 return -EPERM;
362 err = copy_from_user(vlan_matrix, rq->ifr_data,
363 sizeof(vlan_matrix));
364 if (err)
365 return -EFAULT;
366 adm5120_set_vlan(vlan_matrix);
367 break;
368 case SIOCGMATRIX:
369 err = copy_to_user(rq->ifr_data, vlan_matrix,
370 sizeof(vlan_matrix));
371 if (err)
372 return -EFAULT;
373 break;
374 case SIOCGETBW:
375 err = copy_to_user(rq->ifr_data, bw_matrix, sizeof(bw_matrix));
376 if (err)
377 return -EFAULT;
378 break;
379 case SIOCSETBW:
380 if (!capable(CAP_NET_ADMIN))
381 return -EPERM;
382 err = copy_from_user(bw_matrix, rq->ifr_data, sizeof(bw_matrix));
383 if (err)
384 return -EFAULT;
385 adm5120_set_bw(bw_matrix);
386 break;
387 default:
388 return -EOPNOTSUPP;
389 }
390 return 0;
391 }
392
393 static void adm5120_dma_tx_init(struct adm5120_dma *dma, struct sk_buff **skb,
394 int num)
395 {
396 memset(dma, 0, sizeof(struct adm5120_dma)*num);
397 dma[num-1].data |= ADM5120_DMA_RINGEND;
398 memset(skb, 0, sizeof(struct skb*)*num);
399 }
400
401 static void adm5120_dma_rx_init(struct adm5120_dma *dma, struct sk_buff **skb,
402 int num)
403 {
404 int i;
405
406 memset(dma, 0, sizeof(struct adm5120_dma)*num);
407 for (i=0; i<num; i++) {
408 skb[i] = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
409 if (!skb[i]) {
410 i=num;
411 break;
412 }
413 skb_reserve(skb[i], 2);
414 dma[i].data = ADM5120_DMA_ADDR(skb[i]->data) | ADM5120_DMA_OWN;
415 dma[i].cntl = 0;
416 dma[i].len = ADM5120_DMA_RXSIZE;
417 dma[i].status = 0;
418 }
419 dma[i-1].data |= ADM5120_DMA_RINGEND;
420 }
421
422 static int __init adm5120_sw_init(void)
423 {
424 int i, err;
425 struct net_device *dev;
426
427 err = request_irq(ADM5120_IRQ_SWITCH, adm5120_sw_irq, 0, "ethernet switch", NULL);
428 if (err)
429 goto out;
430
431 adm5120_nrdevs = adm5120_eth_num_ports;
432
433 adm5120_set_reg(ADM5120_CPUP_CONF,
434 ADM5120_DISCCPUPORT | ADM5120_CRC_PADDING |
435 ADM5120_DISUNALL | ADM5120_DISMCALL);
436 adm5120_set_reg(ADM5120_PORT_CONF0, ADM5120_ENMC | ADM5120_ENBP);
437
438 adm5120_set_reg(ADM5120_PHY_CNTL2, adm5120_get_reg(ADM5120_PHY_CNTL2) |
439 ADM5120_AUTONEG | ADM5120_NORMAL | ADM5120_AUTOMDIX);
440 adm5120_set_reg(ADM5120_PHY_CNTL3, adm5120_get_reg(ADM5120_PHY_CNTL3) |
441 ADM5120_PHY_NTH);
442
443 adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL);
444 adm5120_set_reg(ADM5120_INT_ST, ADM5120_INTMASKALL);
445
446 adm5120_dma_txh = (void *)KSEG1ADDR((u32)adm5120_dma_txh_v);
447 adm5120_dma_txl = (void *)KSEG1ADDR((u32)adm5120_dma_txl_v);
448 adm5120_dma_rxh = (void *)KSEG1ADDR((u32)adm5120_dma_rxh_v);
449 adm5120_dma_rxl = (void *)KSEG1ADDR((u32)adm5120_dma_rxl_v);
450
451 adm5120_dma_tx_init(adm5120_dma_txh, adm5120_skb_txh, ADM5120_DMA_TXH);
452 adm5120_dma_tx_init(adm5120_dma_txl, adm5120_skb_txl, ADM5120_DMA_TXL);
453 adm5120_dma_rx_init(adm5120_dma_rxh, adm5120_skb_rxh, ADM5120_DMA_RXH);
454 adm5120_dma_rx_init(adm5120_dma_rxl, adm5120_skb_rxl, ADM5120_DMA_RXL);
455 adm5120_set_reg(ADM5120_SEND_HBADDR, KSEG1ADDR(adm5120_dma_txh));
456 adm5120_set_reg(ADM5120_SEND_LBADDR, KSEG1ADDR(adm5120_dma_txl));
457 adm5120_set_reg(ADM5120_RECEIVE_HBADDR, KSEG1ADDR(adm5120_dma_rxh));
458 adm5120_set_reg(ADM5120_RECEIVE_LBADDR, KSEG1ADDR(adm5120_dma_rxl));
459
460 adm5120_set_vlan(vlan_matrix);
461
462 for (i=0; i<adm5120_nrdevs; i++) {
463 adm5120_devs[i] = alloc_etherdev(sizeof(struct adm5120_sw));
464 if (!adm5120_devs[i]) {
465 err = -ENOMEM;
466 goto out_int;
467 }
468
469 dev = adm5120_devs[i];
470 SET_MODULE_OWNER(dev);
471 memset(netdev_priv(dev), 0, sizeof(struct adm5120_sw));
472 ((struct adm5120_sw*)netdev_priv(dev))->port = i;
473 dev->base_addr = SW_BASE;
474 dev->irq = ADM5120_IRQ_SWITCH;
475 dev->open = adm5120_sw_open;
476 dev->hard_start_xmit = adm5120_sw_tx;
477 dev->stop = adm5120_sw_stop;
478 dev->get_stats = adm5120_sw_stats;
479 dev->set_multicast_list = adm5120_set_multicast_list;
480 dev->do_ioctl = adm5120_do_ioctl;
481 dev->tx_timeout = adm5120_tx_timeout;
482 dev->watchdog_timeo = ETH_TX_TIMEOUT;
483 dev->set_mac_address = adm5120_sw_set_mac_address;
484
485 memcpy(dev->dev_addr, adm5120_eth_macs[i], 6);
486 adm5120_write_mac(dev);
487
488 if ((err = register_netdev(dev))) {
489 free_netdev(dev);
490 goto out_int;
491 }
492 printk(KERN_INFO "%s: ADM5120 switch port%d\n", dev->name, i);
493 }
494 adm5120_set_reg(ADM5120_CPUP_CONF,
495 ADM5120_CRC_PADDING | ADM5120_DISUNALL | ADM5120_DISMCALL);
496
497 return 0;
498
499 out_int:
500 /* Undo everything that did succeed */
501 for (; i; i--) {
502 unregister_netdev(adm5120_devs[i-1]);
503 free_netdev(adm5120_devs[i-1]);
504 }
505 free_irq(ADM5120_IRQ_SWITCH, NULL);
506 out:
507 printk(KERN_ERR "ADM5120 Ethernet switch init failed\n");
508 return err;
509 }
510
511 static void __exit adm5120_sw_exit(void)
512 {
513 int i;
514
515 for (i = 0; i < adm5120_nrdevs; i++) {
516 unregister_netdev(adm5120_devs[i]);
517 free_netdev(adm5120_devs[i-1]);
518 }
519
520 free_irq(ADM5120_IRQ_SWITCH, NULL);
521
522 for (i = 0; i < ADM5120_DMA_RXH; i++) {
523 if (!adm5120_skb_rxh[i])
524 break;
525 kfree_skb(adm5120_skb_rxh[i]);
526 }
527 for (i = 0; i < ADM5120_DMA_RXL; i++) {
528 if (!adm5120_skb_rxl[i])
529 break;
530 kfree_skb(adm5120_skb_rxl[i]);
531 }
532 }
533
534 module_init(adm5120_sw_init);
535 module_exit(adm5120_sw_exit);
This page took 0.085772 seconds and 5 git commands to generate.