Preliminary ADM5120 support, marked as broken
[openwrt.git] / target / linux / adm5120-2.6 / files / drivers / net / adm5120sw.c
1 /*
2 * ADM5120 built in ethernet switch driver
3 *
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
5 *
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
8 */
9 #include <linux/autoconf.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/ioport.h>
18 #include <asm/mipsregs.h>
19 #include <asm/irq.h>
20 #include <asm/io.h>
21 #include "adm5120sw.h"
22
23 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
24 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
25 MODULE_LICENSE("GPL");
26
27 /*
28 * The ADM5120 uses an internal matrix to determine which ports
29 * belong to which VLAN.
30 * The default generates a VLAN (and device) for each port
31 * (including MII port) and the CPU port is part of all of them.
32 *
33 * Another example, one big switch and everything mapped to eth0:
34 * 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00
35 */
36 static unsigned char vlan_matrix[SW_DEVS] = {
37 0x41, 0x42, 0x44, 0x48, 0x50, 0x60
38 };
39
40 static int adm5120_nrdevs;
41
42 static struct net_device *adm5120_devs[SW_DEVS];
43 static struct adm5120_dma
44 adm5120_dma_txh_v[ADM5120_DMA_TXH] __attribute__((aligned(16))),
45 adm5120_dma_txl_v[ADM5120_DMA_TXL] __attribute__((aligned(16))),
46 adm5120_dma_rxh_v[ADM5120_DMA_RXH] __attribute__((aligned(16))),
47 adm5120_dma_rxl_v[ADM5120_DMA_RXL] __attribute__((aligned(16))),
48 *adm5120_dma_txh,
49 *adm5120_dma_txl,
50 *adm5120_dma_rxh,
51 *adm5120_dma_rxl;
52 static struct sk_buff
53 *adm5120_skb_rxh[ADM5120_DMA_RXH],
54 *adm5120_skb_rxl[ADM5120_DMA_RXL],
55 *adm5120_skb_txh[ADM5120_DMA_TXH],
56 *adm5120_skb_txl[ADM5120_DMA_TXL];
57 static int adm5120_rxhi = 0;
58 static int adm5120_rxli = 0;
59 /* We don't use high priority tx for now */
60 /*static int adm5120_txhi = 0;*/
61 static int adm5120_txli = 0;
62 static int adm5120_txhit = 0;
63 static int adm5120_txlit = 0;
64 static int adm5120_if_open = 0;
65
66 static inline void adm5120_set_reg(unsigned int reg, unsigned long val)
67 {
68 *(volatile unsigned long*)(SW_BASE+reg) = val;
69 }
70
71 static inline unsigned long adm5120_get_reg(unsigned int reg)
72 {
73 return *(volatile unsigned long*)(SW_BASE+reg);
74 }
75
76 static inline void adm5120_rxfixup(struct adm5120_dma *dma,
77 struct sk_buff **skbl, int num)
78 {
79 int i;
80
81 /* Resubmit the entire ring */
82 for (i=0; i<num; i++) {
83 dma[i].status = 0;
84 dma[i].cntl = 0;
85 dma[i].len = ADM5120_DMA_RXSIZE;
86 dma[i].data = ADM5120_DMA_ADDR(skbl[i]->data) |
87 ADM5120_DMA_OWN | (i==num-1 ? ADM5120_DMA_RINGEND : 0);
88 }
89 }
90
91 static inline void adm5120_rx(struct adm5120_dma *dma, struct sk_buff **skbl,
92 int *index, int num)
93 {
94 struct sk_buff *skb, *skbn;
95 struct adm5120_sw *priv;
96 struct net_device *dev;
97 int port, vlan, len;
98
99 while (!(dma[*index].data & ADM5120_DMA_OWN)) {
100 port = (dma[*index].status & ADM5120_DMA_PORTID);
101 port >>= ADM5120_DMA_PORTSHIFT;
102 for (vlan = 0; vlan < adm5120_nrdevs; vlan++) {
103 if ((1<<port) & vlan_matrix[vlan])
104 break;
105 }
106 if (vlan == adm5120_nrdevs)
107 vlan = 0;
108 dev = adm5120_devs[vlan];
109 skb = skbl[*index];
110 len = (dma[*index].status & ADM5120_DMA_LEN);
111 len >>= ADM5120_DMA_LENSHIFT;
112 len -= ETH_FCS;
113
114 priv = netdev_priv(dev);
115 if (len <= 0 || len > ADM5120_DMA_RXSIZE ||
116 dma[*index].status & ADM5120_DMA_FCSERR) {
117 priv->stats.rx_errors++;
118 skbn = NULL;
119 } else {
120 skbn = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
121 if (skbn) {
122 skb_put(skb, len);
123 skb->dev = dev;
124 skb->protocol = eth_type_trans(skb, dev);
125 skb->ip_summed = CHECKSUM_UNNECESSARY;
126 dev->last_rx = jiffies;
127 priv->stats.rx_packets++;
128 priv->stats.rx_bytes+=len;
129 skb_reserve(skbn, 2);
130 skbl[*index] = skbn;
131 } else {
132 printk(KERN_INFO "%s recycling!\n", dev->name);
133 }
134 }
135
136 dma[*index].status = 0;
137 dma[*index].cntl = 0;
138 dma[*index].len = ADM5120_DMA_RXSIZE;
139 dma[*index].data = ADM5120_DMA_ADDR(skbl[*index]->data) |
140 ADM5120_DMA_OWN |
141 (num-1==*index ? ADM5120_DMA_RINGEND : 0);
142 if (num == ++*index)
143 *index = 0;
144 if (skbn)
145 netif_rx(skb);
146 }
147 }
148
149 static inline void adm5120_tx(struct adm5120_dma *dma, struct sk_buff **skbl,
150 int *index, int num)
151 {
152 while((dma[*index].data & ADM5120_DMA_OWN) == 0 && skbl[*index]) {
153 dev_kfree_skb_irq(skbl[*index]);
154 skbl[*index] = NULL;
155 if (++*index == num)
156 *index = 0;
157 }
158 }
159
160 irqreturn_t adm5120_sw_irq(int irq, void *dev_id, struct pt_regs *regs)
161 {
162 unsigned long intreg;
163
164 adm5120_set_reg(ADM5120_INT_MASK,
165 adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTHANDLE);
166
167 intreg = adm5120_get_reg(ADM5120_INT_ST);
168 adm5120_set_reg(ADM5120_INT_ST, intreg);
169
170 if (intreg & ADM5120_INT_RXH)
171 adm5120_rx(adm5120_dma_rxh, adm5120_skb_rxh, &adm5120_rxhi,
172 ADM5120_DMA_RXH);
173 if (intreg & ADM5120_INT_HFULL)
174 adm5120_rxfixup(adm5120_dma_rxh, adm5120_skb_rxh,
175 ADM5120_DMA_RXH);
176 if (intreg & ADM5120_INT_RXL)
177 adm5120_rx(adm5120_dma_rxl, adm5120_skb_rxl, &adm5120_rxli,
178 ADM5120_DMA_RXL);
179 if (intreg & ADM5120_INT_LFULL)
180 adm5120_rxfixup(adm5120_dma_rxl, adm5120_skb_rxl,
181 ADM5120_DMA_RXL);
182 if (intreg & ADM5120_INT_TXH)
183 adm5120_tx(adm5120_dma_txh, adm5120_skb_txh, &adm5120_txhit,
184 ADM5120_DMA_TXH);
185 if (intreg & ADM5120_INT_TXL)
186 adm5120_tx(adm5120_dma_txl, adm5120_skb_txl, &adm5120_txlit,
187 ADM5120_DMA_TXL);
188
189 adm5120_set_reg(ADM5120_INT_MASK,
190 adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE);
191
192 return IRQ_HANDLED;
193 }
194
195 static void adm5120_set_vlan(char *matrix)
196 {
197 unsigned long val;
198
199 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
200 adm5120_set_reg(ADM5120_VLAN_GI, val);
201 val = matrix[4] + (matrix[5]<<8);
202 adm5120_set_reg(ADM5120_VLAN_GII, val);
203 }
204
205 static int adm5120_sw_open(struct net_device *dev)
206 {
207 if (!adm5120_if_open++)
208 adm5120_set_reg(ADM5120_INT_MASK,
209 adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE);
210 netif_start_queue(dev);
211 return 0;
212 }
213
214 static int adm5120_sw_stop(struct net_device *dev)
215 {
216 netif_stop_queue(dev);
217 if (!--adm5120_if_open)
218 adm5120_set_reg(ADM5120_INT_MASK,
219 adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTMASKALL);
220 return 0;
221 }
222
223 static int adm5120_sw_tx(struct sk_buff *skb, struct net_device *dev)
224 {
225 struct adm5120_dma *dma = adm5120_dma_txl;
226 struct sk_buff **skbl = adm5120_skb_txl;
227 struct adm5120_sw *priv = netdev_priv(dev);
228 int *index = &adm5120_txli;
229 int num = ADM5120_DMA_TXL;
230 int trigger = ADM5120_SEND_TRIG_L;
231
232 dev->trans_start = jiffies;
233 if (dma[*index].data & ADM5120_DMA_OWN) {
234 dev_kfree_skb(skb);
235 priv->stats.tx_dropped++;
236 return 0;
237 }
238
239 dma[*index].data = ADM5120_DMA_ADDR(skb->data) | ADM5120_DMA_OWN;
240 if (*index == num-1)
241 dma[*index].data |= ADM5120_DMA_RINGEND;
242 dma[*index].status =
243 ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << ADM5120_DMA_LENSHIFT) |
244 (0x1 << priv->port);
245 dma[*index].len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
246 priv->stats.tx_packets++;
247 priv->stats.tx_bytes += skb->len;
248 skbl[*index]=skb;
249
250 if (++*index == num)
251 *index = 0;
252 adm5120_set_reg(ADM5120_SEND_TRIG, trigger);
253
254 return 0;
255 }
256
257 static void adm5120_tx_timeout(struct net_device *dev)
258 {
259 netif_wake_queue(dev);
260 }
261
262 static struct net_device_stats *adm5120_sw_stats(struct net_device *dev)
263 {
264 return &((struct adm5120_sw *)netdev_priv(dev))->stats;
265 }
266
267 static void adm5120_set_multicast_list(struct net_device *dev)
268 {
269 struct adm5120_sw *priv = netdev_priv(dev);
270 int portmask;
271
272 portmask = vlan_matrix[priv->port] & 0x3f;
273
274 if (dev->flags & IFF_PROMISC)
275 adm5120_set_reg(ADM5120_CPUP_CONF,
276 adm5120_get_reg(ADM5120_CPUP_CONF) &
277 ~((portmask << ADM5120_DISUNSHIFT) & ADM5120_DISUNALL));
278 else
279 adm5120_set_reg(ADM5120_CPUP_CONF,
280 adm5120_get_reg(ADM5120_CPUP_CONF) |
281 (portmask << ADM5120_DISUNSHIFT));
282
283 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
284 dev->mc_count)
285 adm5120_set_reg(ADM5120_CPUP_CONF,
286 adm5120_get_reg(ADM5120_CPUP_CONF) &
287 ~((portmask << ADM5120_DISMCSHIFT) & ADM5120_DISMCALL));
288 else
289 adm5120_set_reg(ADM5120_CPUP_CONF,
290 adm5120_get_reg(ADM5120_CPUP_CONF) |
291 (portmask << ADM5120_DISMCSHIFT));
292 }
293
294 static void adm5120_write_mac(struct net_device *dev)
295 {
296 struct adm5120_sw *priv = netdev_priv(dev);
297 unsigned char *mac = dev->dev_addr;
298
299 adm5120_set_reg(ADM5120_MAC_WT1,
300 mac[2] | (mac[3]<<8) | (mac[4]<<16) | (mac[5]<<24));
301 adm5120_set_reg(ADM5120_MAC_WT0, (priv->port<<3) |
302 (mac[0]<<16) | (mac[1]<<24) | ADM5120_MAC_WRITE | ADM5120_VLAN_EN);
303
304 while (!(adm5120_get_reg(ADM5120_MAC_WT0) & ADM5120_MAC_WRITE_DONE));
305 }
306
307 static int adm5120_sw_set_mac_address(struct net_device *dev, void *p)
308 {
309 struct sockaddr *addr = p;
310
311 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
312 adm5120_write_mac(dev);
313 return 0;
314 }
315
316 static int adm5120_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
317 {
318 int err;
319 struct adm5120_info info;
320 struct adm5120_sw *priv = netdev_priv(dev);
321
322 switch(cmd) {
323 case SIOCGADMINFO:
324 info.magic = 0x5120;
325 info.ports = adm5120_nrdevs;
326 info.vlan = priv->port;
327 err = copy_to_user(rq->ifr_data, &info, sizeof(info));
328 if (err)
329 return -EFAULT;
330 break;
331 case SIOCSMATRIX:
332 if (!capable(CAP_NET_ADMIN))
333 return -EPERM;
334 err = copy_from_user(vlan_matrix, rq->ifr_data,
335 sizeof(vlan_matrix));
336 if (err)
337 return -EFAULT;
338 adm5120_set_vlan(vlan_matrix);
339 break;
340 case SIOCGMATRIX:
341 err = copy_to_user(rq->ifr_data, vlan_matrix,
342 sizeof(vlan_matrix));
343 if (err)
344 return -EFAULT;
345 break;
346 default:
347 return -EOPNOTSUPP;
348 }
349 return 0;
350 }
351
352 static void adm5120_dma_tx_init(struct adm5120_dma *dma, struct sk_buff **skb,
353 int num)
354 {
355 memset(dma, 0, sizeof(struct adm5120_dma)*num);
356 dma[num-1].data |= ADM5120_DMA_RINGEND;
357 memset(skb, 0, sizeof(struct skb*)*num);
358 }
359
360 static void adm5120_dma_rx_init(struct adm5120_dma *dma, struct sk_buff **skb,
361 int num)
362 {
363 int i;
364
365 memset(dma, 0, sizeof(struct adm5120_dma)*num);
366 for (i=0; i<num; i++) {
367 skb[i] = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
368 if (!skb[i]) {
369 i=num;
370 break;
371 }
372 skb_reserve(skb[i], 2);
373 dma[i].data = ADM5120_DMA_ADDR(skb[i]->data) | ADM5120_DMA_OWN;
374 dma[i].cntl = 0;
375 dma[i].len = ADM5120_DMA_RXSIZE;
376 dma[i].status = 0;
377 }
378 dma[i-1].data |= ADM5120_DMA_RINGEND;
379 }
380
381 static int __init adm5120_sw_init(void)
382 {
383 int i, err;
384 struct net_device *dev;
385
386 err = request_irq(SW_IRQ, adm5120_sw_irq, 0, "ethernet switch", NULL);
387 if (err)
388 goto out;
389
390 /* MII port? */
391 if (adm5120_get_reg(ADM5120_CODE) & ADM5120_CODE_PQFP)
392 adm5120_nrdevs = 5;
393 else
394 adm5120_nrdevs = 6;
395
396 adm5120_set_reg(ADM5120_CPUP_CONF,
397 ADM5120_DISCCPUPORT | ADM5120_CRC_PADDING |
398 ADM5120_DISUNALL | ADM5120_DISMCALL);
399 adm5120_set_reg(ADM5120_PORT_CONF0, ADM5120_ENMC | ADM5120_ENBP);
400
401 adm5120_set_reg(ADM5120_PHY_CNTL2, adm5120_get_reg(ADM5120_PHY_CNTL2) |
402 ADM5120_AUTONEG | ADM5120_NORMAL | ADM5120_AUTOMDIX);
403 adm5120_set_reg(ADM5120_PHY_CNTL3, adm5120_get_reg(ADM5120_PHY_CNTL3) |
404 ADM5120_PHY_NTH);
405
406 adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL);
407 adm5120_set_reg(ADM5120_INT_ST, ADM5120_INTMASKALL);
408
409 adm5120_dma_txh = (void *)KSEG1ADDR((u32)adm5120_dma_txh_v);
410 adm5120_dma_txl = (void *)KSEG1ADDR((u32)adm5120_dma_txl_v);
411 adm5120_dma_rxh = (void *)KSEG1ADDR((u32)adm5120_dma_rxh_v);
412 adm5120_dma_rxl = (void *)KSEG1ADDR((u32)adm5120_dma_rxl_v);
413
414 adm5120_dma_tx_init(adm5120_dma_txh, adm5120_skb_txh, ADM5120_DMA_TXH);
415 adm5120_dma_tx_init(adm5120_dma_txl, adm5120_skb_txl, ADM5120_DMA_TXL);
416 adm5120_dma_rx_init(adm5120_dma_rxh, adm5120_skb_rxh, ADM5120_DMA_RXH);
417 adm5120_dma_rx_init(adm5120_dma_rxl, adm5120_skb_rxl, ADM5120_DMA_RXL);
418 adm5120_set_reg(ADM5120_SEND_HBADDR, KSEG1ADDR(adm5120_dma_txh));
419 adm5120_set_reg(ADM5120_SEND_LBADDR, KSEG1ADDR(adm5120_dma_txl));
420 adm5120_set_reg(ADM5120_RECEIVE_HBADDR, KSEG1ADDR(adm5120_dma_rxh));
421 adm5120_set_reg(ADM5120_RECEIVE_LBADDR, KSEG1ADDR(adm5120_dma_rxl));
422
423 adm5120_set_vlan(vlan_matrix);
424
425 for (i=0; i<adm5120_nrdevs; i++) {
426 adm5120_devs[i] = alloc_etherdev(sizeof(struct adm5120_sw));
427 if (!adm5120_devs[i]) {
428 err = -ENOMEM;
429 goto out_int;
430 }
431
432 dev = adm5120_devs[i];
433 SET_MODULE_OWNER(dev);
434 memset(netdev_priv(dev), 0, sizeof(struct adm5120_sw));
435 ((struct adm5120_sw*)netdev_priv(dev))->port = i;
436 dev->base_addr = SW_BASE;
437 dev->irq = SW_IRQ;
438 dev->open = adm5120_sw_open;
439 dev->hard_start_xmit = adm5120_sw_tx;
440 dev->stop = adm5120_sw_stop;
441 dev->get_stats = adm5120_sw_stats;
442 dev->set_multicast_list = adm5120_set_multicast_list;
443 dev->do_ioctl = adm5120_do_ioctl;
444 dev->tx_timeout = adm5120_tx_timeout;
445 dev->watchdog_timeo = ETH_TX_TIMEOUT;
446 dev->set_mac_address = adm5120_sw_set_mac_address;
447 /* HACK alert!!! In the original admtek driver it is asumed
448 that you can read the MAC addressess from flash, but edimax
449 decided to leave that space intentionally blank...
450 */
451 memcpy(dev->dev_addr, "\x00\x50\xfc\x11\x22\x01", 6);
452 dev->dev_addr[5] += i;
453 adm5120_write_mac(dev);
454
455 if ((err = register_netdev(dev))) {
456 free_netdev(dev);
457 goto out_int;
458 }
459 printk(KERN_INFO "%s: ADM5120 switch port%d\n", dev->name, i);
460 }
461 adm5120_set_reg(ADM5120_CPUP_CONF,
462 ADM5120_CRC_PADDING | ADM5120_DISUNALL | ADM5120_DISMCALL);
463
464 return 0;
465
466 out_int:
467 /* Undo everything that did succeed */
468 for (; i; i--) {
469 unregister_netdev(adm5120_devs[i-1]);
470 free_netdev(adm5120_devs[i-1]);
471 }
472 free_irq(SW_IRQ, NULL);
473 out:
474 printk(KERN_ERR "ADM5120 Ethernet switch init failed\n");
475 return err;
476 }
477
478 static void __exit adm5120_sw_exit(void)
479 {
480 int i;
481
482 for (i = 0; i < adm5120_nrdevs; i++) {
483 unregister_netdev(adm5120_devs[i]);
484 free_netdev(adm5120_devs[i-1]);
485 }
486
487 free_irq(SW_IRQ, NULL);
488
489 for (i = 0; i < ADM5120_DMA_RXH; i++) {
490 if (!adm5120_skb_rxh[i])
491 break;
492 kfree_skb(adm5120_skb_rxh[i]);
493 }
494 for (i = 0; i < ADM5120_DMA_RXL; i++) {
495 if (!adm5120_skb_rxl[i])
496 break;
497 kfree_skb(adm5120_skb_rxl[i]);
498 }
499 }
500
501 module_init(adm5120_sw_init);
502 module_exit(adm5120_sw_exit);
This page took 0.073292 seconds and 5 git commands to generate.