ar8216: rename the vlan attribute to enable_vlan to keep it consistent with other...
[openwrt.git] / target / linux / generic-2.6 / files / drivers / net / phy / mvswitch.c
1 /*
2 * Marvell 88E6060 switch driver
3 * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License v2 as published by the
7 * Free Software Foundation
8 */
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/unistd.h>
13 #include <linux/slab.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/delay.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/spinlock.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/mii.h>
24 #include <linux/ethtool.h>
25 #include <linux/phy.h>
26 #include <linux/if_vlan.h>
27
28 #include <asm/io.h>
29 #include <asm/irq.h>
30 #include <asm/uaccess.h>
31 #include "mvswitch.h"
32
33 /* Undefine this to use trailer mode instead.
34 * I don't know if header mode works with all chips */
35 #define HEADER_MODE 1
36
37 MODULE_DESCRIPTION("Marvell 88E6060 Switch driver");
38 MODULE_AUTHOR("Felix Fietkau");
39 MODULE_LICENSE("GPL");
40
41 #define MVSWITCH_MAGIC 0x88E6060
42
43 struct mvswitch_priv {
44 /* the driver's tx function */
45 int (*hardstart)(struct sk_buff *skb, struct net_device *dev);
46 struct vlan_group *grp;
47 u8 vlans[16];
48 };
49
50 #define to_mvsw(_phy) ((struct mvswitch_priv *) (_phy)->priv)
51
52 static inline u16
53 r16(struct phy_device *phydev, int addr, int reg)
54 {
55 return phydev->bus->read(phydev->bus, addr, reg);
56 }
57
58 static inline void
59 w16(struct phy_device *phydev, int addr, int reg, u16 val)
60 {
61 phydev->bus->write(phydev->bus, addr, reg, val);
62 }
63
64
65 static int
66 mvswitch_mangle_tx(struct sk_buff *skb, struct net_device *dev)
67 {
68 struct mvswitch_priv *priv;
69 char *buf = NULL;
70 u16 vid;
71
72 priv = dev->phy_ptr;
73 if (unlikely(!priv))
74 goto error;
75
76 if (unlikely(skb->len < 16))
77 goto error;
78
79 #ifdef HEADER_MODE
80 if (__vlan_hwaccel_get_tag(skb, &vid))
81 goto error;
82
83 if (skb_cloned(skb) || (skb->len <= 62) || (skb_headroom(skb) < MV_HEADER_SIZE)) {
84 if (pskb_expand_head(skb, MV_HEADER_SIZE, (skb->len < 62 ? 62 - skb->len : 0), GFP_ATOMIC))
85 goto error_expand;
86 if (skb->len < 62)
87 skb->len = 62;
88 }
89 buf = skb_push(skb, MV_HEADER_SIZE);
90 #else
91 if (__vlan_get_tag(skb, &vid))
92 goto error;
93
94 if (unlikely((vid > 15 || !priv->vlans[vid])))
95 goto error;
96
97 if (skb->len <= 64) {
98 if (pskb_expand_head(skb, 0, 64 + MV_TRAILER_SIZE - skb->len, GFP_ATOMIC))
99 goto error_expand;
100
101 buf = skb->data + 64;
102 skb->len = 64 + MV_TRAILER_SIZE;
103 } else {
104 if (skb_cloned(skb) || unlikely(skb_tailroom(skb) < 4)) {
105 if (pskb_expand_head(skb, 0, 4, GFP_ATOMIC))
106 goto error_expand;
107 }
108 buf = skb_put(skb, 4);
109 }
110
111 /* move the ethernet header 4 bytes forward, overwriting the vlan tag */
112 memmove(skb->data + 4, skb->data, 12);
113 skb->data += 4;
114 skb->len -= 4;
115 skb->mac_header += 4;
116 #endif
117
118 if (!buf)
119 goto error;
120
121
122 #ifdef HEADER_MODE
123 /* prepend the tag */
124 *((__be16 *) buf) = cpu_to_be16(
125 ((vid << MV_HEADER_VLAN_S) & MV_HEADER_VLAN_M) |
126 ((priv->vlans[vid] << MV_HEADER_PORTS_S) & MV_HEADER_PORTS_M)
127 );
128 #else
129 /* append the tag */
130 *((__be32 *) buf) = cpu_to_be32((
131 (MV_TRAILER_OVERRIDE << MV_TRAILER_FLAGS_S) |
132 ((priv->vlans[vid] & MV_TRAILER_PORTS_M) << MV_TRAILER_PORTS_S)
133 ));
134 #endif
135
136 return priv->hardstart(skb, dev);
137
138 error_expand:
139 if (net_ratelimit())
140 printk("%s: failed to expand/update skb for the switch\n", dev->name);
141
142 error:
143 /* any errors? drop the packet! */
144 dev_kfree_skb_any(skb);
145 return 0;
146 }
147
148 static int
149 mvswitch_mangle_rx(struct sk_buff *skb, int napi)
150 {
151 struct mvswitch_priv *priv;
152 struct net_device *dev;
153 int vlan = -1;
154 unsigned char *buf;
155 int i;
156
157 dev = skb->dev;
158 if (!dev)
159 goto error;
160
161 priv = dev->phy_ptr;
162 if (!priv)
163 goto error;
164
165 if (!priv->grp)
166 goto error;
167
168 #ifdef HEADER_MODE
169 buf = skb->data;
170 skb_pull(skb, MV_HEADER_SIZE);
171 #else
172 buf = skb->data + skb->len - MV_TRAILER_SIZE;
173 if (buf[0] != 0x80)
174 goto error;
175 #endif
176
177 /* look for the vlan matching the incoming port */
178 for (i = 0; i < ARRAY_SIZE(priv->vlans); i++) {
179 if ((1 << buf[1]) & priv->vlans[i])
180 vlan = i;
181 }
182
183 if (vlan == -1)
184 goto error;
185
186 skb->protocol = eth_type_trans(skb, skb->dev);
187
188 if (napi)
189 return vlan_hwaccel_receive_skb(skb, priv->grp, vlan);
190 else
191 return vlan_hwaccel_rx(skb, priv->grp, vlan);
192
193 error:
194 /* no vlan? eat the packet! */
195 dev_kfree_skb_any(skb);
196 return 0;
197 }
198
199
200 static int
201 mvswitch_netif_rx(struct sk_buff *skb)
202 {
203 return mvswitch_mangle_rx(skb, 0);
204 }
205
206 static int
207 mvswitch_netif_receive_skb(struct sk_buff *skb)
208 {
209 return mvswitch_mangle_rx(skb, 1);
210 }
211
212
213 static void
214 mvswitch_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
215 {
216 struct mvswitch_priv *priv = dev->phy_ptr;
217 priv->grp = grp;
218 }
219
220
221 static int
222 mvswitch_wait_mask(struct phy_device *pdev, int addr, int reg, u16 mask, u16 val)
223 {
224 int i = 100;
225 u16 r;
226
227 do {
228 r = r16(pdev, addr, reg) & mask;
229 if (r == val)
230 return 0;
231 } while(--i > 0);
232 return -ETIMEDOUT;
233 }
234
235 static int
236 mvswitch_config_init(struct phy_device *pdev)
237 {
238 struct mvswitch_priv *priv = to_mvsw(pdev);
239 struct net_device *dev = pdev->attached_dev;
240 u8 vlmap = 0;
241 int i;
242
243 if (!dev)
244 return -EINVAL;
245
246 printk("%s: Marvell 88E6060 PHY driver attached.\n", dev->name);
247 pdev->supported = ADVERTISED_100baseT_Full;
248 pdev->advertising = ADVERTISED_100baseT_Full;
249 dev->phy_ptr = priv;
250 dev->irq = PHY_POLL;
251
252 /* initialize default vlans */
253 for (i = 0; i < MV_PORTS; i++)
254 priv->vlans[(i == MV_WANPORT ? 2 : 1)] |= (1 << i);
255
256 /* before entering reset, disable all ports */
257 for (i = 0; i < MV_PORTS; i++)
258 w16(pdev, MV_PORTREG(CONTROL, i), 0x00);
259
260 msleep(2); /* wait for the status change to settle in */
261
262 /* put the ATU in reset */
263 w16(pdev, MV_SWITCHREG(ATU_CTRL), MV_ATUCTL_RESET);
264
265 i = mvswitch_wait_mask(pdev, MV_SWITCHREG(ATU_CTRL), MV_ATUCTL_RESET, 0);
266 if (i < 0) {
267 printk("%s: Timeout waiting for the switch to reset.\n", dev->name);
268 return i;
269 }
270
271 /* set the ATU flags */
272 w16(pdev, MV_SWITCHREG(ATU_CTRL),
273 MV_ATUCTL_NO_LEARN |
274 MV_ATUCTL_ATU_1K |
275 MV_ATUCTL_AGETIME(MV_ATUCTL_AGETIME_MIN) /* minimum without disabling ageing */
276 );
277
278 /* initialize the cpu port */
279 w16(pdev, MV_PORTREG(CONTROL, MV_CPUPORT),
280 #ifdef HEADER_MODE
281 MV_PORTCTRL_HEADER |
282 #else
283 MV_PORTCTRL_RXTR |
284 MV_PORTCTRL_TXTR |
285 #endif
286 MV_PORTCTRL_ENABLED
287 );
288 /* wait for the phy change to settle in */
289 msleep(2);
290 for (i = 0; i < MV_PORTS; i++) {
291 u8 pvid = 0;
292 int j;
293
294 vlmap = 0;
295
296 /* look for the matching vlan */
297 for (j = 0; j < ARRAY_SIZE(priv->vlans); j++) {
298 if (priv->vlans[j] & (1 << i)) {
299 vlmap = priv->vlans[j];
300 pvid = j;
301 }
302 }
303 /* leave port unconfigured if it's not part of a vlan */
304 if (!vlmap)
305 continue;
306
307 /* add the cpu port to the allowed destinations list */
308 vlmap |= (1 << MV_CPUPORT);
309
310 /* take port out of its own vlan destination map */
311 vlmap &= ~(1 << i);
312
313 /* apply vlan settings */
314 w16(pdev, MV_PORTREG(VLANMAP, i),
315 MV_PORTVLAN_PORTS(vlmap) |
316 MV_PORTVLAN_ID(i)
317 );
318
319 /* re-enable port */
320 w16(pdev, MV_PORTREG(CONTROL, i),
321 MV_PORTCTRL_ENABLED
322 );
323 }
324
325 w16(pdev, MV_PORTREG(VLANMAP, MV_CPUPORT),
326 MV_PORTVLAN_ID(MV_CPUPORT)
327 );
328
329 /* set the port association vector */
330 for (i = 0; i <= MV_PORTS; i++) {
331 w16(pdev, MV_PORTREG(ASSOC, i),
332 MV_PORTASSOC_PORTS(1 << i)
333 );
334 }
335
336 /* init switch control */
337 w16(pdev, MV_SWITCHREG(CTRL),
338 MV_SWITCHCTL_MSIZE |
339 MV_SWITCHCTL_DROP
340 );
341
342 /* hook into the tx function */
343 pdev->pkt_align = 2;
344 priv->hardstart = dev->hard_start_xmit;
345 pdev->netif_receive_skb = mvswitch_netif_receive_skb;
346 pdev->netif_rx = mvswitch_netif_rx;
347 dev->hard_start_xmit = mvswitch_mangle_tx;
348 dev->vlan_rx_register = mvswitch_vlan_rx_register;
349 #ifdef HEADER_MODE
350 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
351 #else
352 dev->features |= NETIF_F_HW_VLAN_RX;
353 #endif
354
355 return 0;
356 }
357
358 static int
359 mvswitch_read_status(struct phy_device *pdev)
360 {
361 pdev->speed = SPEED_100;
362 pdev->duplex = DUPLEX_FULL;
363 pdev->state = PHY_UP;
364
365 /* XXX ugly workaround: we can't force the switch
366 * to gracefully handle hosts moving from one port to another,
367 * so we have to regularly clear the ATU database */
368
369 /* wait for the ATU to become available */
370 mvswitch_wait_mask(pdev, MV_SWITCHREG(ATU_OP), MV_ATUOP_INPROGRESS, 0);
371
372 /* flush the ATU */
373 w16(pdev, MV_SWITCHREG(ATU_OP),
374 MV_ATUOP_INPROGRESS |
375 MV_ATUOP_FLUSH_ALL
376 );
377
378 /* wait for operation to complete */
379 mvswitch_wait_mask(pdev, MV_SWITCHREG(ATU_OP), MV_ATUOP_INPROGRESS, 0);
380
381 return 0;
382 }
383
384 static int
385 mvswitch_config_aneg(struct phy_device *phydev)
386 {
387 return 0;
388 }
389
390 static void
391 mvswitch_remove(struct phy_device *pdev)
392 {
393 struct mvswitch_priv *priv = to_mvsw(pdev);
394 struct net_device *dev = pdev->attached_dev;
395
396 /* restore old xmit handler */
397 if (priv->hardstart && dev)
398 dev->hard_start_xmit = priv->hardstart;
399 dev->vlan_rx_register = NULL;
400 dev->vlan_rx_kill_vid = NULL;
401 dev->phy_ptr = NULL;
402 dev->features &= ~NETIF_F_HW_VLAN_RX;
403 kfree(priv);
404 }
405
406 static int
407 mvswitch_probe(struct phy_device *pdev)
408 {
409 struct mvswitch_priv *priv;
410
411 priv = kzalloc(sizeof(struct mvswitch_priv), GFP_KERNEL);
412 if (priv == NULL)
413 return -ENOMEM;
414
415 pdev->priv = priv;
416
417 return 0;
418 }
419
420 static int
421 mvswitch_fixup(struct phy_device *dev)
422 {
423 u16 reg;
424
425 if (dev->addr != 0x10)
426 return 0;
427
428 reg = dev->bus->read(dev->bus, MV_PORTREG(IDENT, 0)) & MV_IDENT_MASK;
429 if (reg != MV_IDENT_VALUE)
430 return 0;
431
432 dev->phy_id = MVSWITCH_MAGIC;
433 return 0;
434 }
435
436
437 static struct phy_driver mvswitch_driver = {
438 .name = "Marvell 88E6060",
439 .phy_id = MVSWITCH_MAGIC,
440 .phy_id_mask = 0xffffffff,
441 .features = PHY_BASIC_FEATURES,
442 .probe = &mvswitch_probe,
443 .remove = &mvswitch_remove,
444 .config_init = &mvswitch_config_init,
445 .config_aneg = &mvswitch_config_aneg,
446 .read_status = &mvswitch_read_status,
447 .driver = { .owner = THIS_MODULE,},
448 };
449
450 static int __init
451 mvswitch_init(void)
452 {
453 phy_register_fixup_for_id(PHY_ANY_ID, mvswitch_fixup);
454 return phy_driver_register(&mvswitch_driver);
455 }
456
457 static void __exit
458 mvswitch_exit(void)
459 {
460 phy_driver_unregister(&mvswitch_driver);
461 }
462
463 module_init(mvswitch_init);
464 module_exit(mvswitch_exit);
This page took 0.065739 seconds and 5 git commands to generate.