ramips: rt305x: add support for the ZyXEL NBG-419N board
[openwrt.git] / target / linux / lantiq / patches-2.6.39 / 0011-MIPS-Lantiq-Add-ethernet-driver.patch
1 From 435de86088af82496bcba69165cd7422bb4622ec Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Fri, 6 May 2011 00:10:01 +0200
4 Subject: [PATCH 11/13] MIPS: Lantiq: Add ethernet driver
5
6 This patch adds the driver for the ETOP Packet Processing Engine (PPE32)
7 found inside the XWAY family of Lantiq MIPS SoCs. This driver makes 100MBit
8 ethernet work. Support for all 8 dma channels, gbit and the embedded switch
9 found on the ar9/vr9 still needs to be implemented.
10
11 Signed-off-by: John Crispin <blogic@openwrt.org>
12 Signed-off-by: Ralph Hempel <ralph.hempel@lantiq.com>
13 Cc: linux-mips@linux-mips.org
14 Cc: netdev@vger.kernel.org
15 Patchwork: https://patchwork.linux-mips.org/patch/2357/
16 Acked-by: David S. Miller <davem@davemloft.net>
17 Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
18 ---
19 .../mips/include/asm/mach-lantiq/lantiq_platform.h | 7 +
20 .../mips/include/asm/mach-lantiq/xway/lantiq_soc.h | 4 +-
21 arch/mips/lantiq/xway/devices.c | 23 +
22 arch/mips/lantiq/xway/devices.h | 1 +
23 drivers/net/Kconfig | 7 +
24 drivers/net/Makefile | 1 +
25 drivers/net/lantiq_etop.c | 805 ++++++++++++++++++++
26 7 files changed, 846 insertions(+), 2 deletions(-)
27 create mode 100644 drivers/net/lantiq_etop.c
28
29 --- a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
30 +++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
31 @@ -10,6 +10,7 @@
32 #define _LANTIQ_PLATFORM_H__
33
34 #include <linux/mtd/partitions.h>
35 +#include <linux/socket.h>
36
37 /* struct used to pass info to the pci core */
38 enum {
39 @@ -43,4 +44,10 @@ struct ltq_pci_data {
40 int irq[16];
41 };
42
43 +/* struct used to pass info to network drivers */
44 +struct ltq_eth_data {
45 + struct sockaddr mac;
46 + int mii_mode;
47 +};
48 +
49 #endif
50 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
51 +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
52 @@ -82,8 +82,8 @@
53 #define PMU_SWITCH 0x10000000
54
55 /* ETOP - ethernet */
56 -#define LTQ_PPE32_BASE_ADDR 0xBE180000
57 -#define LTQ_PPE32_SIZE 0x40000
58 +#define LTQ_ETOP_BASE_ADDR 0x1E180000
59 +#define LTQ_ETOP_SIZE 0x40000
60
61 /* DMA */
62 #define LTQ_DMA_BASE_ADDR 0x1E104100
63 --- a/arch/mips/lantiq/xway/devices.c
64 +++ b/arch/mips/lantiq/xway/devices.c
65 @@ -96,3 +96,26 @@ void __init ltq_register_ase_asc(void)
66 platform_device_register_simple("ltq_asc", 0,
67 ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources));
68 }
69 +
70 +/* ethernet */
71 +static struct resource ltq_etop_resources = {
72 + .name = "etop",
73 + .start = LTQ_ETOP_BASE_ADDR,
74 + .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1,
75 + .flags = IORESOURCE_MEM,
76 +};
77 +
78 +static struct platform_device ltq_etop = {
79 + .name = "ltq_etop",
80 + .resource = &ltq_etop_resources,
81 + .num_resources = 1,
82 +};
83 +
84 +void __init
85 +ltq_register_etop(struct ltq_eth_data *eth)
86 +{
87 + if (eth) {
88 + ltq_etop.dev.platform_data = eth;
89 + platform_device_register(&ltq_etop);
90 + }
91 +}
92 --- a/arch/mips/lantiq/xway/devices.h
93 +++ b/arch/mips/lantiq/xway/devices.h
94 @@ -15,5 +15,6 @@
95 extern void ltq_register_gpio(void);
96 extern void ltq_register_gpio_stp(void);
97 extern void ltq_register_ase_asc(void);
98 +extern void ltq_register_etop(struct ltq_eth_data *eth);
99
100 #endif
101 --- a/drivers/net/Kconfig
102 +++ b/drivers/net/Kconfig
103 @@ -2017,6 +2017,13 @@ config FTMAC100
104 from Faraday. It is used on Faraday A320, Andes AG101 and some
105 other ARM/NDS32 SoC's.
106
107 +config LANTIQ_ETOP
108 + tristate "Lantiq SoC ETOP driver"
109 + depends on SOC_TYPE_XWAY
110 + help
111 + Support for the MII0 inside the Lantiq SoC
112 +
113 +
114 source "drivers/net/fs_enet/Kconfig"
115
116 source "drivers/net/octeon/Kconfig"
117 --- a/drivers/net/Makefile
118 +++ b/drivers/net/Makefile
119 @@ -259,6 +259,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/
120 obj-$(CONFIG_ENC28J60) += enc28j60.o
121 obj-$(CONFIG_ETHOC) += ethoc.o
122 obj-$(CONFIG_GRETH) += greth.o
123 +obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
124
125 obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
126
127 --- /dev/null
128 +++ b/drivers/net/lantiq_etop.c
129 @@ -0,0 +1,813 @@
130 +/*
131 + * This program is free software; you can redistribute it and/or modify it
132 + * under the terms of the GNU General Public License version 2 as published
133 + * by the Free Software Foundation.
134 + *
135 + * This program is distributed in the hope that it will be useful,
136 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
137 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
138 + * GNU General Public License for more details.
139 + *
140 + * You should have received a copy of the GNU General Public License
141 + * along with this program; if not, write to the Free Software
142 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
143 + *
144 + * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
145 + */
146 +
147 +#include <linux/kernel.h>
148 +#include <linux/slab.h>
149 +#include <linux/errno.h>
150 +#include <linux/types.h>
151 +#include <linux/interrupt.h>
152 +#include <linux/uaccess.h>
153 +#include <linux/in.h>
154 +#include <linux/netdevice.h>
155 +#include <linux/etherdevice.h>
156 +#include <linux/phy.h>
157 +#include <linux/ip.h>
158 +#include <linux/tcp.h>
159 +#include <linux/skbuff.h>
160 +#include <linux/mm.h>
161 +#include <linux/platform_device.h>
162 +#include <linux/ethtool.h>
163 +#include <linux/init.h>
164 +#include <linux/delay.h>
165 +#include <linux/io.h>
166 +
167 +#include <asm/checksum.h>
168 +
169 +#include <lantiq_soc.h>
170 +#include <xway_dma.h>
171 +#include <lantiq_platform.h>
172 +
173 +#define LTQ_ETOP_MDIO 0x11804
174 +#define MDIO_REQUEST 0x80000000
175 +#define MDIO_READ 0x40000000
176 +#define MDIO_ADDR_MASK 0x1f
177 +#define MDIO_ADDR_OFFSET 0x15
178 +#define MDIO_REG_MASK 0x1f
179 +#define MDIO_REG_OFFSET 0x10
180 +#define MDIO_VAL_MASK 0xffff
181 +
182 +#define PPE32_CGEN 0x800
183 +#define LTQ_PPE32_ENET_MAC_CFG 0x1840
184 +
185 +#define LTQ_ETOP_ENETS0 0x11850
186 +#define LTQ_ETOP_MAC_DA0 0x1186C
187 +#define LTQ_ETOP_MAC_DA1 0x11870
188 +#define LTQ_ETOP_CFG 0x16020
189 +#define LTQ_ETOP_IGPLEN 0x16080
190 +
191 +#define MAX_DMA_CHAN 0x8
192 +#define MAX_DMA_CRC_LEN 0x4
193 +#define MAX_DMA_DATA_LEN 0x600
194 +
195 +#define ETOP_FTCU BIT(28)
196 +#define ETOP_MII_MASK 0xf
197 +#define ETOP_MII_NORMAL 0xd
198 +#define ETOP_MII_REVERSE 0xe
199 +#define ETOP_PLEN_UNDER 0x40
200 +#define ETOP_CGEN 0x800
201 +
202 +/* use 2 static channels for TX/RX */
203 +#define LTQ_ETOP_TX_CHANNEL 1
204 +#define LTQ_ETOP_RX_CHANNEL 6
205 +#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
206 +#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
207 +
208 +#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
209 +#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
210 +#define ltq_etop_w32_mask(x, y, z) \
211 + ltq_w32_mask(x, y, ltq_etop_membase + (z))
212 +
213 +#define DRV_VERSION "1.0"
214 +
215 +#ifndef netdev_err
216 +#define netdev_err(a, b, ...) printk(b, ##__VA_ARGS__)
217 +#endif
218 +
219 +#ifndef pr_warn
220 +#define pr_warn pr_warning
221 +#endif
222 +
223 +static void __iomem *ltq_etop_membase;
224 +
225 +struct ltq_etop_chan {
226 + int idx;
227 + int tx_free;
228 + struct net_device *netdev;
229 + struct napi_struct napi;
230 + struct ltq_dma_channel dma;
231 + struct sk_buff *skb[LTQ_DESC_NUM];
232 +};
233 +
234 +struct ltq_etop_priv {
235 + struct net_device *netdev;
236 + struct ltq_eth_data *pldata;
237 + struct resource *res;
238 +
239 + struct mii_bus *mii_bus;
240 + struct phy_device *phydev;
241 +
242 + struct ltq_etop_chan ch[MAX_DMA_CHAN];
243 + int tx_free[MAX_DMA_CHAN >> 1];
244 +
245 + spinlock_t lock;
246 +};
247 +
248 +static int
249 +ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
250 +{
251 + ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
252 + if (!ch->skb[ch->dma.desc])
253 + return -ENOMEM;
254 + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
255 + ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
256 + DMA_FROM_DEVICE);
257 + ch->dma.desc_base[ch->dma.desc].addr =
258 + CPHYSADDR(ch->skb[ch->dma.desc]->data);
259 + ch->dma.desc_base[ch->dma.desc].ctl =
260 + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
261 + MAX_DMA_DATA_LEN;
262 + skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
263 + return 0;
264 +}
265 +
266 +static void
267 +ltq_etop_hw_receive(struct ltq_etop_chan *ch)
268 +{
269 + struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
270 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
271 + struct sk_buff *skb = ch->skb[ch->dma.desc];
272 + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
273 + unsigned long flags;
274 +
275 + spin_lock_irqsave(&priv->lock, flags);
276 + if (ltq_etop_alloc_skb(ch)) {
277 + netdev_err(ch->netdev,
278 + "failed to allocate new rx buffer, stopping DMA\n");
279 + ltq_dma_close(&ch->dma);
280 + }
281 + ch->dma.desc++;
282 + ch->dma.desc %= LTQ_DESC_NUM;
283 + spin_unlock_irqrestore(&priv->lock, flags);
284 +
285 + skb_put(skb, len);
286 + skb->dev = ch->netdev;
287 + skb->protocol = eth_type_trans(skb, ch->netdev);
288 + netif_receive_skb(skb);
289 +}
290 +
291 +static int
292 +ltq_etop_poll_rx(struct napi_struct *napi, int budget)
293 +{
294 + struct ltq_etop_chan *ch = container_of(napi,
295 + struct ltq_etop_chan, napi);
296 + int rx = 0;
297 + int complete = 0;
298 +
299 + while ((rx < budget) && !complete) {
300 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
301 +
302 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
303 + ltq_etop_hw_receive(ch);
304 + rx++;
305 + } else {
306 + complete = 1;
307 + }
308 + }
309 + if (complete || !rx) {
310 + napi_complete(&ch->napi);
311 + ltq_dma_ack_irq(&ch->dma);
312 + }
313 + return rx;
314 +}
315 +
316 +static int
317 +ltq_etop_poll_tx(struct napi_struct *napi, int budget)
318 +{
319 + struct ltq_etop_chan *ch =
320 + container_of(napi, struct ltq_etop_chan, napi);
321 + struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
322 + struct netdev_queue *txq =
323 + netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
324 + unsigned long flags;
325 +
326 + spin_lock_irqsave(&priv->lock, flags);
327 + while ((ch->dma.desc_base[ch->tx_free].ctl &
328 + (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
329 + dev_kfree_skb_any(ch->skb[ch->tx_free]);
330 + ch->skb[ch->tx_free] = NULL;
331 + memset(&ch->dma.desc_base[ch->tx_free], 0,
332 + sizeof(struct ltq_dma_desc));
333 + ch->tx_free++;
334 + ch->tx_free %= LTQ_DESC_NUM;
335 + }
336 + spin_unlock_irqrestore(&priv->lock, flags);
337 +
338 + if (netif_tx_queue_stopped(txq))
339 + netif_tx_start_queue(txq);
340 + napi_complete(&ch->napi);
341 + ltq_dma_ack_irq(&ch->dma);
342 + return 1;
343 +}
344 +
345 +static irqreturn_t
346 +ltq_etop_dma_irq(int irq, void *_priv)
347 +{
348 + struct ltq_etop_priv *priv = _priv;
349 + int ch = irq - LTQ_DMA_CH0_INT;
350 +
351 + napi_schedule(&priv->ch[ch].napi);
352 + return IRQ_HANDLED;
353 +}
354 +
355 +static void
356 +ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
357 +{
358 + struct ltq_etop_priv *priv = netdev_priv(dev);
359 +
360 + ltq_dma_free(&ch->dma);
361 + if (ch->dma.irq)
362 + free_irq(ch->dma.irq, priv);
363 + if (IS_RX(ch->idx)) {
364 + int desc;
365 + for (desc = 0; desc < LTQ_DESC_NUM; desc++)
366 + dev_kfree_skb_any(ch->skb[ch->dma.desc]);
367 + }
368 +}
369 +
370 +static void
371 +ltq_etop_hw_exit(struct net_device *dev)
372 +{
373 + struct ltq_etop_priv *priv = netdev_priv(dev);
374 + int i;
375 +
376 + ltq_pmu_disable(PMU_PPE);
377 + for (i = 0; i < MAX_DMA_CHAN; i++)
378 + if (IS_TX(i) || IS_RX(i))
379 + ltq_etop_free_channel(dev, &priv->ch[i]);
380 +}
381 +
382 +static int
383 +ltq_etop_hw_init(struct net_device *dev)
384 +{
385 + struct ltq_etop_priv *priv = netdev_priv(dev);
386 + int i;
387 +
388 + ltq_pmu_enable(PMU_PPE);
389 +
390 + switch (priv->pldata->mii_mode) {
391 + case PHY_INTERFACE_MODE_RMII:
392 + ltq_etop_w32_mask(ETOP_MII_MASK,
393 + ETOP_MII_REVERSE, LTQ_ETOP_CFG);
394 + break;
395 +
396 + case PHY_INTERFACE_MODE_MII:
397 + ltq_etop_w32_mask(ETOP_MII_MASK,
398 + ETOP_MII_NORMAL, LTQ_ETOP_CFG);
399 + break;
400 +
401 + default:
402 + netdev_err(dev, "unknown mii mode %d\n",
403 + priv->pldata->mii_mode);
404 + return -ENOTSUPP;
405 + }
406 +
407 + /* enable crc generation */
408 + ltq_etop_w32(PPE32_CGEN, LTQ_PPE32_ENET_MAC_CFG);
409 +
410 + ltq_dma_init_port(DMA_PORT_ETOP);
411 +
412 + for (i = 0; i < MAX_DMA_CHAN; i++) {
413 + int irq = LTQ_DMA_CH0_INT + i;
414 + struct ltq_etop_chan *ch = &priv->ch[i];
415 +
416 + ch->idx = ch->dma.nr = i;
417 +
418 + if (IS_TX(i)) {
419 + ltq_dma_alloc_tx(&ch->dma);
420 + request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
421 + "etop_tx", priv);
422 + } else if (IS_RX(i)) {
423 + ltq_dma_alloc_rx(&ch->dma);
424 + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
425 + ch->dma.desc++)
426 + if (ltq_etop_alloc_skb(ch))
427 + return -ENOMEM;
428 + ch->dma.desc = 0;
429 + request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
430 + "etop_rx", priv);
431 + }
432 + ch->dma.irq = irq;
433 + }
434 + return 0;
435 +}
436 +
437 +static void
438 +ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
439 +{
440 + strcpy(info->driver, "Lantiq ETOP");
441 + strcpy(info->bus_info, "internal");
442 + strcpy(info->version, DRV_VERSION);
443 +}
444 +
445 +static int
446 +ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
447 +{
448 + struct ltq_etop_priv *priv = netdev_priv(dev);
449 +
450 + return phy_ethtool_gset(priv->phydev, cmd);
451 +}
452 +
453 +static int
454 +ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
455 +{
456 + struct ltq_etop_priv *priv = netdev_priv(dev);
457 +
458 + return phy_ethtool_sset(priv->phydev, cmd);
459 +}
460 +
461 +static int
462 +ltq_etop_nway_reset(struct net_device *dev)
463 +{
464 + struct ltq_etop_priv *priv = netdev_priv(dev);
465 +
466 + return phy_start_aneg(priv->phydev);
467 +}
468 +
469 +static const struct ethtool_ops ltq_etop_ethtool_ops = {
470 + .get_drvinfo = ltq_etop_get_drvinfo,
471 + .get_settings = ltq_etop_get_settings,
472 + .set_settings = ltq_etop_set_settings,
473 + .nway_reset = ltq_etop_nway_reset,
474 +};
475 +
476 +static int
477 +ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
478 +{
479 + u32 val = MDIO_REQUEST |
480 + ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
481 + ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
482 + phy_data;
483 +
484 + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
485 + ;
486 + ltq_etop_w32(val, LTQ_ETOP_MDIO);
487 + return 0;
488 +}
489 +
490 +static int
491 +ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
492 +{
493 + u32 val = MDIO_REQUEST | MDIO_READ |
494 + ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
495 + ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
496 +
497 + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
498 + ;
499 + ltq_etop_w32(val, LTQ_ETOP_MDIO);
500 + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
501 + ;
502 + val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
503 + return val;
504 +}
505 +
506 +static void
507 +ltq_etop_mdio_link(struct net_device *dev)
508 +{
509 + /* nothing to do */
510 +}
511 +
512 +static int
513 +ltq_etop_mdio_probe(struct net_device *dev)
514 +{
515 + struct ltq_etop_priv *priv = netdev_priv(dev);
516 + struct phy_device *phydev = NULL;
517 + int phy_addr;
518 +
519 + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
520 + if (priv->mii_bus->phy_map[phy_addr]) {
521 + phydev = priv->mii_bus->phy_map[phy_addr];
522 + break;
523 + }
524 + }
525 +
526 + if (!phydev) {
527 + netdev_err(dev, "no PHY found\n");
528 + return -ENODEV;
529 + }
530 +
531 + phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
532 + 0, priv->pldata->mii_mode);
533 +
534 + if (IS_ERR(phydev)) {
535 + netdev_err(dev, "Could not attach to PHY\n");
536 + return PTR_ERR(phydev);
537 + }
538 +
539 + phydev->supported &= (SUPPORTED_10baseT_Half
540 + | SUPPORTED_10baseT_Full
541 + | SUPPORTED_100baseT_Half
542 + | SUPPORTED_100baseT_Full
543 + | SUPPORTED_Autoneg
544 + | SUPPORTED_MII
545 + | SUPPORTED_TP);
546 +
547 + phydev->advertising = phydev->supported;
548 + priv->phydev = phydev;
549 + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
550 + dev->name, phydev->drv->name,
551 + dev_name(&phydev->dev), phydev->irq);
552 +
553 + return 0;
554 +}
555 +
556 +static int
557 +ltq_etop_mdio_init(struct net_device *dev)
558 +{
559 + struct ltq_etop_priv *priv = netdev_priv(dev);
560 + int i;
561 + int err;
562 +
563 + priv->mii_bus = mdiobus_alloc();
564 + if (!priv->mii_bus) {
565 + netdev_err(dev, "failed to allocate mii bus\n");
566 + err = -ENOMEM;
567 + goto err_out;
568 + }
569 +
570 + priv->mii_bus->priv = dev;
571 + priv->mii_bus->read = ltq_etop_mdio_rd;
572 + priv->mii_bus->write = ltq_etop_mdio_wr;
573 + priv->mii_bus->name = "ltq_mii";
574 + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
575 + priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
576 + if (!priv->mii_bus->irq) {
577 + err = -ENOMEM;
578 + goto err_out_free_mdiobus;
579 + }
580 +
581 + for (i = 0; i < PHY_MAX_ADDR; ++i)
582 + priv->mii_bus->irq[i] = PHY_POLL;
583 +
584 + if (mdiobus_register(priv->mii_bus)) {
585 + err = -ENXIO;
586 + goto err_out_free_mdio_irq;
587 + }
588 +
589 + if (ltq_etop_mdio_probe(dev)) {
590 + err = -ENXIO;
591 + goto err_out_unregister_bus;
592 + }
593 + return 0;
594 +
595 +err_out_unregister_bus:
596 + mdiobus_unregister(priv->mii_bus);
597 +err_out_free_mdio_irq:
598 + kfree(priv->mii_bus->irq);
599 +err_out_free_mdiobus:
600 + mdiobus_free(priv->mii_bus);
601 +err_out:
602 + return err;
603 +}
604 +
605 +static void
606 +ltq_etop_mdio_cleanup(struct net_device *dev)
607 +{
608 + struct ltq_etop_priv *priv = netdev_priv(dev);
609 +
610 + phy_disconnect(priv->phydev);
611 + mdiobus_unregister(priv->mii_bus);
612 + kfree(priv->mii_bus->irq);
613 + mdiobus_free(priv->mii_bus);
614 +}
615 +
616 +static int
617 +ltq_etop_open(struct net_device *dev)
618 +{
619 + struct ltq_etop_priv *priv = netdev_priv(dev);
620 + int i;
621 +
622 + for (i = 0; i < MAX_DMA_CHAN; i++) {
623 + struct ltq_etop_chan *ch = &priv->ch[i];
624 +
625 + if (!IS_TX(i) && (!IS_RX(i)))
626 + continue;
627 + ltq_dma_open(&ch->dma);
628 + napi_enable(&ch->napi);
629 + }
630 + phy_start(priv->phydev);
631 + netif_tx_start_all_queues(dev);
632 + return 0;
633 +}
634 +
635 +static int
636 +ltq_etop_stop(struct net_device *dev)
637 +{
638 + struct ltq_etop_priv *priv = netdev_priv(dev);
639 + int i;
640 +
641 + netif_tx_stop_all_queues(dev);
642 + phy_stop(priv->phydev);
643 + for (i = 0; i < MAX_DMA_CHAN; i++) {
644 + struct ltq_etop_chan *ch = &priv->ch[i];
645 +
646 + if (!IS_RX(i) && !IS_TX(i))
647 + continue;
648 + napi_disable(&ch->napi);
649 + ltq_dma_close(&ch->dma);
650 + }
651 + return 0;
652 +}
653 +
654 +static int
655 +ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
656 +{
657 + int queue = skb_get_queue_mapping(skb);
658 + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
659 + struct ltq_etop_priv *priv = netdev_priv(dev);
660 + struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
661 + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
662 + int len;
663 + unsigned long flags;
664 + u32 byte_offset;
665 +
666 + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
667 +
668 + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
669 + dev_kfree_skb_any(skb);
670 + netdev_err(dev, "tx ring full\n");
671 + netif_tx_stop_queue(txq);
672 + return NETDEV_TX_BUSY;
673 + }
674 +
675 + /* dma needs to start on a 16 byte aligned address */
676 + byte_offset = CPHYSADDR(skb->data) % 16;
677 + ch->skb[ch->dma.desc] = skb;
678 +
679 + dev->trans_start = jiffies;
680 +
681 + spin_lock_irqsave(&priv->lock, flags);
682 + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
683 + DMA_TO_DEVICE)) - byte_offset;
684 + wmb();
685 + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
686 + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
687 + ch->dma.desc++;
688 + ch->dma.desc %= LTQ_DESC_NUM;
689 + spin_unlock_irqrestore(&priv->lock, flags);
690 +
691 + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
692 + netif_tx_stop_queue(txq);
693 +
694 + return NETDEV_TX_OK;
695 +}
696 +
697 +static int
698 +ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
699 +{
700 + int ret = eth_change_mtu(dev, new_mtu);
701 +
702 + if (!ret) {
703 + struct ltq_etop_priv *priv = netdev_priv(dev);
704 + unsigned long flags;
705 +
706 + spin_lock_irqsave(&priv->lock, flags);
707 + ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
708 + LTQ_ETOP_IGPLEN);
709 + spin_unlock_irqrestore(&priv->lock, flags);
710 + }
711 + return ret;
712 +}
713 +
714 +static int
715 +ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
716 +{
717 + struct ltq_etop_priv *priv = netdev_priv(dev);
718 +
719 + /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
720 + return phy_mii_ioctl(priv->phydev, rq, cmd);
721 +}
722 +
723 +static int
724 +ltq_etop_set_mac_address(struct net_device *dev, void *p)
725 +{
726 + int ret = eth_mac_addr(dev, p);
727 +
728 + if (!ret) {
729 + struct ltq_etop_priv *priv = netdev_priv(dev);
730 + unsigned long flags;
731 +
732 + /* store the mac for the unicast filter */
733 + spin_lock_irqsave(&priv->lock, flags);
734 + ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
735 + ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
736 + LTQ_ETOP_MAC_DA1);
737 + spin_unlock_irqrestore(&priv->lock, flags);
738 + }
739 + return ret;
740 +}
741 +
742 +static void
743 +ltq_etop_set_multicast_list(struct net_device *dev)
744 +{
745 + struct ltq_etop_priv *priv = netdev_priv(dev);
746 + unsigned long flags;
747 +
748 + /* ensure that the unicast filter is not enabled in promiscious mode */
749 + spin_lock_irqsave(&priv->lock, flags);
750 + if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
751 + ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
752 + else
753 + ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
754 + spin_unlock_irqrestore(&priv->lock, flags);
755 +}
756 +
757 +static u16
758 +ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
759 +{
760 + /* we are currently only using the first queue */
761 + return 0;
762 +}
763 +
764 +static int
765 +ltq_etop_init(struct net_device *dev)
766 +{
767 + struct ltq_etop_priv *priv = netdev_priv(dev);
768 + struct sockaddr mac;
769 + int err;
770 +
771 + ether_setup(dev);
772 + dev->watchdog_timeo = 10 * HZ;
773 + err = ltq_etop_hw_init(dev);
774 + if (err)
775 + goto err_hw;
776 + ltq_etop_change_mtu(dev, 1500);
777 +
778 + memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
779 + if (!is_valid_ether_addr(mac.sa_data)) {
780 + pr_warn("etop: invalid MAC, using random\n");
781 + random_ether_addr(mac.sa_data);
782 + }
783 +
784 + err = ltq_etop_set_mac_address(dev, &mac);
785 + if (err)
786 + goto err_netdev;
787 + ltq_etop_set_multicast_list(dev);
788 + err = ltq_etop_mdio_init(dev);
789 + if (err)
790 + goto err_netdev;
791 + return 0;
792 +
793 +err_netdev:
794 + unregister_netdev(dev);
795 + free_netdev(dev);
796 +err_hw:
797 + ltq_etop_hw_exit(dev);
798 + return err;
799 +}
800 +
801 +static void
802 +ltq_etop_tx_timeout(struct net_device *dev)
803 +{
804 + int err;
805 +
806 + ltq_etop_hw_exit(dev);
807 + err = ltq_etop_hw_init(dev);
808 + if (err)
809 + goto err_hw;
810 + dev->trans_start = jiffies;
811 + netif_wake_queue(dev);
812 + return;
813 +
814 +err_hw:
815 + ltq_etop_hw_exit(dev);
816 + netdev_err(dev, "failed to restart etop after TX timeout\n");
817 +}
818 +
819 +static const struct net_device_ops ltq_eth_netdev_ops = {
820 + .ndo_open = ltq_etop_open,
821 + .ndo_stop = ltq_etop_stop,
822 + .ndo_start_xmit = ltq_etop_tx,
823 + .ndo_change_mtu = ltq_etop_change_mtu,
824 + .ndo_do_ioctl = ltq_etop_ioctl,
825 + .ndo_set_mac_address = ltq_etop_set_mac_address,
826 + .ndo_validate_addr = eth_validate_addr,
827 + .ndo_set_multicast_list = ltq_etop_set_multicast_list,
828 + .ndo_select_queue = ltq_etop_select_queue,
829 + .ndo_init = ltq_etop_init,
830 + .ndo_tx_timeout = ltq_etop_tx_timeout,
831 +};
832 +
833 +static int __init
834 +ltq_etop_probe(struct platform_device *pdev)
835 +{
836 + struct net_device *dev;
837 + struct ltq_etop_priv *priv;
838 + struct resource *res;
839 + int err;
840 + int i;
841 +
842 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
843 + if (!res) {
844 + dev_err(&pdev->dev, "failed to get etop resource\n");
845 + err = -ENOENT;
846 + goto err_out;
847 + }
848 +
849 + res = devm_request_mem_region(&pdev->dev, res->start,
850 + resource_size(res), dev_name(&pdev->dev));
851 + if (!res) {
852 + dev_err(&pdev->dev, "failed to request etop resource\n");
853 + err = -EBUSY;
854 + goto err_out;
855 + }
856 +
857 + ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
858 + res->start, resource_size(res));
859 + if (!ltq_etop_membase) {
860 + dev_err(&pdev->dev, "failed to remap etop engine %d\n",
861 + pdev->id);
862 + err = -ENOMEM;
863 + goto err_out;
864 + }
865 +
866 + dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
867 + strcpy(dev->name, "eth%d");
868 + dev->netdev_ops = &ltq_eth_netdev_ops;
869 + dev->ethtool_ops = &ltq_etop_ethtool_ops;
870 + priv = netdev_priv(dev);
871 + priv->res = res;
872 + priv->pldata = dev_get_platdata(&pdev->dev);
873 + priv->netdev = dev;
874 + spin_lock_init(&priv->lock);
875 +
876 + for (i = 0; i < MAX_DMA_CHAN; i++) {
877 + if (IS_TX(i))
878 + netif_napi_add(dev, &priv->ch[i].napi,
879 + ltq_etop_poll_tx, 8);
880 + else if (IS_RX(i))
881 + netif_napi_add(dev, &priv->ch[i].napi,
882 + ltq_etop_poll_rx, 32);
883 + priv->ch[i].netdev = dev;
884 + }
885 +
886 + err = register_netdev(dev);
887 + if (err)
888 + goto err_free;
889 +
890 + platform_set_drvdata(pdev, dev);
891 + return 0;
892 +
893 +err_free:
894 + kfree(dev);
895 +err_out:
896 + return err;
897 +}
898 +
899 +static int __devexit
900 +ltq_etop_remove(struct platform_device *pdev)
901 +{
902 + struct net_device *dev = platform_get_drvdata(pdev);
903 +
904 + if (dev) {
905 + netif_tx_stop_all_queues(dev);
906 + ltq_etop_hw_exit(dev);
907 + ltq_etop_mdio_cleanup(dev);
908 + unregister_netdev(dev);
909 + }
910 + return 0;
911 +}
912 +
913 +static struct platform_driver ltq_mii_driver = {
914 + .remove = __devexit_p(ltq_etop_remove),
915 + .driver = {
916 + .name = "ltq_etop",
917 + .owner = THIS_MODULE,
918 + },
919 +};
920 +
921 +int __init
922 +init_ltq_etop(void)
923 +{
924 + int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
925 +
926 + if (ret)
927 + pr_err("ltq_etop: Error registering platfom driver!");
928 + return ret;
929 +}
930 +
931 +static void __exit
932 +exit_ltq_etop(void)
933 +{
934 + platform_driver_unregister(&ltq_mii_driver);
935 +}
936 +
937 +module_init(init_ltq_etop);
938 +module_exit(exit_ltq_etop);
939 +
940 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
941 +MODULE_DESCRIPTION("Lantiq SoC ETOP");
942 +MODULE_LICENSE("GPL");
This page took 0.077603 seconds and 5 git commands to generate.