brcm47xx: add fallback sprom for pci devices without an own sprom.
[openwrt.git] / target / linux / lantiq / patches / 230-xway_etop.patch
1 --- a/drivers/net/Kconfig
2 +++ b/drivers/net/Kconfig
3 @@ -235,6 +235,12 @@
4
5 source "drivers/net/arm/Kconfig"
6
7 +config LANTIQ_ETOP
8 + tristate "Lantiq SoC ETOP driver"
9 + depends on SOC_LANTIQ_XWAY
10 + help
11 + Support for the MII0 inside the Lantiq SoC
12 +
13 config AX88796
14 tristate "ASIX AX88796 NE2000 clone support"
15 depends on ARM || MIPS || SUPERH
16 --- a/drivers/net/Makefile
17 +++ b/drivers/net/Makefile
18 @@ -212,6 +212,7 @@
19 obj-$(CONFIG_MVME16x_NET) += 82596.o
20 obj-$(CONFIG_BVME6000_NET) += 82596.o
21 obj-$(CONFIG_SC92031) += sc92031.o
22 +obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
23
24 # This is also a 82596 and should probably be merged
25 obj-$(CONFIG_LP486E) += lp486e.o
26 --- /dev/null
27 +++ b/drivers/net/lantiq_etop.c
28 @@ -0,0 +1,557 @@
29 +/*
30 + * This program is free software; you can redistribute it and/or modify it
31 + * under the terms of the GNU General Public License version 2 as published
32 + * by the Free Software Foundation.
33 + *
34 + * This program is distributed in the hope that it will be useful,
35 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
36 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
37 + * GNU General Public License for more details.
38 + *
39 + * You should have received a copy of the GNU General Public License
40 + * along with this program; if not, write to the Free Software
41 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
42 + *
43 + * Copyright (C) 2005 Wu Qi Ming <Qi-Ming.Wu@infineon.com>
44 + * Copyright (C) 2008 John Crispin <blogic@openwrt.org>
45 + */
46 +
47 +#include <linux/kernel.h>
48 +#include <linux/slab.h>
49 +#include <linux/errno.h>
50 +#include <linux/types.h>
51 +#include <linux/interrupt.h>
52 +#include <linux/uaccess.h>
53 +#include <linux/in.h>
54 +#include <linux/netdevice.h>
55 +#include <linux/etherdevice.h>
56 +#include <linux/phy.h>
57 +#include <linux/ip.h>
58 +#include <linux/tcp.h>
59 +#include <linux/skbuff.h>
60 +#include <linux/mm.h>
61 +#include <linux/platform_device.h>
62 +#include <linux/ethtool.h>
63 +#include <linux/init.h>
64 +#include <linux/delay.h>
65 +
66 +#include <asm/checksum.h>
67 +
68 +#include <xway.h>
69 +#include <xway_dma.h>
70 +#include <lantiq_platform.h>
71 +
72 +#define ETHERNET_PACKET_DMA_BUFFER_SIZE 0x600
73 +#define LQ_PPE32_MEM_MAP ((u32 *)(LQ_PPE32_BASE_ADDR + 0x10000))
74 +#define LQ_PPE32_SRST ((u32 *)(LQ_PPE32_BASE_ADDR + 0x10080))
75 +
76 +/* mdio access */
77 +#define LQ_PPE32_MDIO_CFG ((u32 *)(LQ_PPE32_BASE_ADDR + 0x11800))
78 +#define LQ_PPE32_MDIO_ACC ((u32 *)(LQ_PPE32_BASE_ADDR + 0x11804))
79 +
80 +#define MDIO_ACC_REQUEST 0x80000000
81 +#define MDIO_ACC_READ 0x40000000
82 +#define MDIO_ACC_ADDR_MASK 0x1f
83 +#define MDIO_ACC_ADDR_OFFSET 0x15
84 +#define MDIO_ACC_REG_MASK 0x1f
85 +#define MDIO_ACC_REG_OFFSET 0x10
86 +#define MDIO_ACC_VAL_MASK 0xffff
87 +
88 +/* configuration */
89 +#define LQ_PPE32_CFG ((u32 *)(LQ_PPE32_MEM_MAP + 0x1808))
90 +
91 +#define PPE32_MII_MASK 0xfffffffc
92 +#define PPE32_MII_NORMAL 0x8
93 +#define PPE32_MII_REVERSE 0xe
94 +
95 +/* packet length */
96 +#define LQ_PPE32_IG_PLEN_CTRL ((u32 *)(LQ_PPE32_MEM_MAP + 0x1820))
97 +
98 +#define PPE32_PLEN_OVER 0x5ee
99 +#define PPE32_PLEN_UNDER 0x400000
100 +
101 +/* enet */
102 +#define LQ_PPE32_ENET_MAC_CFG ((u32 *)(LQ_PPE32_MEM_MAP + 0x1840))
103 +
104 +#define PPE32_CGEN 0x800
105 +
106 +struct lq_mii_priv {
107 + struct net_device_stats stats;
108 + struct dma_device_info *dma_device;
109 + struct sk_buff *skb;
110 +
111 + struct mii_bus *mii_bus;
112 + struct phy_device *phydev;
113 + int oldlink, oldspeed, oldduplex;
114 +};
115 +
116 +static struct net_device *lq_etop_dev;
117 +static unsigned char mac_addr[MAX_ADDR_LEN];
118 +
119 +static int lq_mdiobus_write(struct mii_bus *bus, int phy_addr,
120 + int phy_reg, u16 phy_data)
121 +{
122 + u32 val = MDIO_ACC_REQUEST |
123 + ((phy_addr & MDIO_ACC_ADDR_MASK) << MDIO_ACC_ADDR_OFFSET) |
124 + ((phy_reg & MDIO_ACC_REG_MASK) << MDIO_ACC_REG_OFFSET) |
125 + phy_data;
126 +
127 + while (lq_r32(LQ_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST)
128 + ;
129 + lq_w32(val, LQ_PPE32_MDIO_ACC);
130 +
131 + return 0;
132 +}
133 +
134 +static int lq_mdiobus_read(struct mii_bus *bus, int phy_addr, int phy_reg)
135 +{
136 + u32 val = MDIO_ACC_REQUEST | MDIO_ACC_READ |
137 + ((phy_addr & MDIO_ACC_ADDR_MASK) << MDIO_ACC_ADDR_OFFSET) |
138 + ((phy_reg & MDIO_ACC_REG_MASK) << MDIO_ACC_REG_OFFSET);
139 +
140 + while (lq_r32(LQ_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST)
141 + ;
142 + lq_w32(val, LQ_PPE32_MDIO_ACC);
143 + while (lq_r32(LQ_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST)
144 + ;
145 + val = lq_r32(LQ_PPE32_MDIO_ACC) & MDIO_ACC_VAL_MASK;
146 + return val;
147 +}
148 +
149 +int lq_mii_open(struct net_device *dev)
150 +{
151 + struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev);
152 + struct dma_device_info *dma_dev = priv->dma_device;
153 + int i;
154 +
155 + for (i = 0; i < dma_dev->max_rx_chan_num; i++) {
156 + if ((dma_dev->rx_chan[i])->control == LQ_DMA_CH_ON)
157 + (dma_dev->rx_chan[i])->open(dma_dev->rx_chan[i]);
158 + }
159 + netif_start_queue(dev);
160 + return 0;
161 +}
162 +
163 +int lq_mii_release(struct net_device *dev)
164 +{
165 + struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev);
166 + struct dma_device_info *dma_dev = priv->dma_device;
167 + int i;
168 +
169 + for (i = 0; i < dma_dev->max_rx_chan_num; i++)
170 + dma_dev->rx_chan[i]->close(dma_dev->rx_chan[i]);
171 + netif_stop_queue(dev);
172 + return 0;
173 +}
174 +
175 +int lq_mii_hw_receive(struct net_device *dev, struct dma_device_info *dma_dev)
176 +{
177 + struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev);
178 + unsigned char *buf = NULL;
179 + struct sk_buff *skb = NULL;
180 + int len = 0;
181 +
182 + len = dma_device_read(dma_dev, &buf, (void **)&skb);
183 +
184 + if (len >= ETHERNET_PACKET_DMA_BUFFER_SIZE) {
185 + printk(KERN_INFO "lq_etop: packet too large %d\n", len);
186 + goto lq_mii_hw_receive_err_exit;
187 + }
188 +
189 + /* remove CRC */
190 + len -= 4;
191 + if (skb == NULL) {
192 + printk(KERN_INFO "lq_etop: cannot restore pointer\n");
193 + goto lq_mii_hw_receive_err_exit;
194 + }
195 +
196 + if (len > (skb->end - skb->tail)) {
197 + printk(KERN_INFO "lq_etop: BUG, len:%d end:%p tail:%p\n",
198 + (len+4), skb->end, skb->tail);
199 + goto lq_mii_hw_receive_err_exit;
200 + }
201 +
202 + skb_put(skb, len);
203 + skb->dev = dev;
204 +
205 + if (priv->phydev && priv->phydev->netif_rx) {
206 + priv->phydev->netif_rx(skb);
207 + } else {
208 + skb->protocol = eth_type_trans(skb, dev);
209 + netif_rx(skb);
210 + }
211 +
212 + priv->stats.rx_packets++;
213 + priv->stats.rx_bytes += len;
214 + return 0;
215 +
216 +lq_mii_hw_receive_err_exit:
217 + if (len == 0) {
218 + if (skb)
219 + dev_kfree_skb_any(skb);
220 + priv->stats.rx_errors++;
221 + priv->stats.rx_dropped++;
222 + return -EIO;
223 + } else {
224 + return len;
225 + }
226 +}
227 +
228 +int lq_mii_hw_tx(char *buf, int len, struct net_device *dev)
229 +{
230 + int ret = 0;
231 + struct lq_mii_priv *priv = netdev_priv(dev);
232 + struct dma_device_info *dma_dev = priv->dma_device;
233 + ret = dma_device_write(dma_dev, buf, len, priv->skb);
234 + return ret;
235 +}
236 +
237 +int lq_mii_tx(struct sk_buff *skb, struct net_device *dev)
238 +{
239 + int len;
240 + char *data;
241 + struct lq_mii_priv *priv = netdev_priv(dev);
242 + struct dma_device_info *dma_dev = priv->dma_device;
243 +
244 + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
245 + data = skb->data;
246 + priv->skb = skb;
247 + dev->trans_start = jiffies;
248 + /* TODO: we got more than 1 dma channel,
249 + so we should do something intelligent here to select one */
250 + dma_dev->current_tx_chan = 0;
251 +
252 + wmb();
253 +
254 + if (lq_mii_hw_tx(data, len, dev) != len) {
255 + dev_kfree_skb_any(skb);
256 + priv->stats.tx_errors++;
257 + priv->stats.tx_dropped++;
258 + } else {
259 + priv->stats.tx_packets++;
260 + priv->stats.tx_bytes += len;
261 + }
262 +
263 + return 0;
264 +}
265 +
266 +void lq_mii_tx_timeout(struct net_device *dev)
267 +{
268 + int i;
269 + struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev);
270 +
271 + priv->stats.tx_errors++;
272 + for (i = 0; i < priv->dma_device->max_tx_chan_num; i++)
273 + priv->dma_device->tx_chan[i]->disable_irq(priv->dma_device->tx_chan[i]);
274 + netif_wake_queue(dev);
275 + return;
276 +}
277 +
278 +int dma_intr_handler(struct dma_device_info *dma_dev, int status)
279 +{
280 + int i;
281 +
282 + switch (status) {
283 + case RCV_INT:
284 + lq_mii_hw_receive(lq_etop_dev, dma_dev);
285 + break;
286 +
287 + case TX_BUF_FULL_INT:
288 + printk(KERN_INFO "lq_etop: tx buffer full\n");
289 + netif_stop_queue(lq_etop_dev);
290 + for (i = 0; i < dma_dev->max_tx_chan_num; i++) {
291 + if ((dma_dev->tx_chan[i])->control == LQ_DMA_CH_ON)
292 + dma_dev->tx_chan[i]->enable_irq(dma_dev->tx_chan[i]);
293 + }
294 + break;
295 +
296 + case TRANSMIT_CPT_INT:
297 + for (i = 0; i < dma_dev->max_tx_chan_num; i++)
298 + dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i]);
299 +
300 + netif_wake_queue(lq_etop_dev);
301 + break;
302 + }
303 +
304 + return 0;
305 +}
306 +
307 +unsigned char *lq_etop_dma_buffer_alloc(int len, int *byte_offset, void **opt)
308 +{
309 + unsigned char *buffer = NULL;
310 + struct sk_buff *skb = NULL;
311 +
312 + skb = dev_alloc_skb(ETHERNET_PACKET_DMA_BUFFER_SIZE);
313 + if (skb == NULL)
314 + return NULL;
315 +
316 + buffer = (unsigned char *)(skb->data);
317 + skb_reserve(skb, 2);
318 + *(int *)opt = (int)skb;
319 + *byte_offset = 2;
320 +
321 + return buffer;
322 +}
323 +
324 +void lq_etop_dma_buffer_free(unsigned char *dataptr, void *opt)
325 +{
326 + struct sk_buff *skb = NULL;
327 +
328 + if (opt == NULL) {
329 + kfree(dataptr);
330 + } else {
331 + skb = (struct sk_buff *)opt;
332 + dev_kfree_skb_any(skb);
333 + }
334 +}
335 +
336 +static void
337 +lq_adjust_link(struct net_device *dev)
338 +{
339 + struct lq_mii_priv *priv = netdev_priv(dev);
340 + struct phy_device *phydev = priv->phydev;
341 + int new_state = 0;
342 +
343 + /* Did anything change? */
344 + if (priv->oldlink != phydev->link ||
345 + priv->oldduplex != phydev->duplex ||
346 + priv->oldspeed != phydev->speed) {
347 + /* Yes, so update status and mark as changed */
348 + new_state = 1;
349 + priv->oldduplex = phydev->duplex;
350 + priv->oldspeed = phydev->speed;
351 + priv->oldlink = phydev->link;
352 + }
353 +
354 + /* If link status changed, show new status */
355 + if (new_state)
356 + phy_print_status(phydev);
357 +}
358 +
359 +static int mii_probe(struct net_device *dev)
360 +{
361 + struct lq_mii_priv *priv = netdev_priv(dev);
362 + struct phy_device *phydev = NULL;
363 + int phy_addr;
364 +
365 + priv->oldlink = 0;
366 + priv->oldspeed = 0;
367 + priv->oldduplex = -1;
368 +
369 + /* find the first (lowest address) PHY on the current MAC's MII bus */
370 + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
371 + if (priv->mii_bus->phy_map[phy_addr]) {
372 + phydev = priv->mii_bus->phy_map[phy_addr];
373 + break; /* break out with first one found */
374 + }
375 + }
376 +
377 + if (!phydev) {
378 + printk (KERN_ERR "%s: no PHY found\n", dev->name);
379 + return -ENODEV;
380 + }
381 +
382 + /* now we are supposed to have a proper phydev, to attach to... */
383 + BUG_ON(!phydev);
384 + BUG_ON(phydev->attached_dev);
385 +
386 + phydev = phy_connect(dev, dev_name(&phydev->dev), &lq_adjust_link,
387 + 0, PHY_INTERFACE_MODE_MII);
388 +
389 + if (IS_ERR(phydev)) {
390 + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
391 + return PTR_ERR(phydev);
392 + }
393 +
394 + /* mask with MAC supported features */
395 + phydev->supported &= (SUPPORTED_10baseT_Half
396 + | SUPPORTED_10baseT_Full
397 + | SUPPORTED_100baseT_Half
398 + | SUPPORTED_100baseT_Full
399 + | SUPPORTED_Autoneg
400 + /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
401 + | SUPPORTED_MII
402 + | SUPPORTED_TP);
403 +
404 + phydev->advertising = phydev->supported;
405 +
406 + priv->phydev = phydev;
407 +
408 + printk(KERN_INFO "%s: attached PHY driver [%s] "
409 + "(mii_bus:phy_addr=%s, irq=%d)\n",
410 + dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
411 +
412 + return 0;
413 +}
414 +
415 +
416 +static int lq_mii_dev_init(struct net_device *dev)
417 +{
418 + int i;
419 + struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(dev);
420 + ether_setup(dev);
421 + dev->watchdog_timeo = 10 * HZ;
422 + dev->mtu = 1500;
423 + memset(priv, 0, sizeof(struct lq_mii_priv));
424 + priv->dma_device = dma_device_reserve("PPE");
425 + if (!priv->dma_device) {
426 + BUG();
427 + return -ENODEV;
428 + }
429 + priv->dma_device->buffer_alloc = &lq_etop_dma_buffer_alloc;
430 + priv->dma_device->buffer_free = &lq_etop_dma_buffer_free;
431 + priv->dma_device->intr_handler = &dma_intr_handler;
432 + priv->dma_device->max_rx_chan_num = 4;
433 +
434 + for (i = 0; i < priv->dma_device->max_rx_chan_num; i++) {
435 + priv->dma_device->rx_chan[i]->packet_size = ETHERNET_PACKET_DMA_BUFFER_SIZE;
436 + priv->dma_device->rx_chan[i]->control = LQ_DMA_CH_ON;
437 + }
438 +
439 + for (i = 0; i < priv->dma_device->max_tx_chan_num; i++)
440 + if (i == 0)
441 + priv->dma_device->tx_chan[i]->control = LQ_DMA_CH_ON;
442 + else
443 + priv->dma_device->tx_chan[i]->control = LQ_DMA_CH_OFF;
444 +
445 + dma_device_register(priv->dma_device);
446 +
447 + printk(KERN_INFO "%s: using mac=", dev->name);
448 + for (i = 0; i < 6; i++) {
449 + dev->dev_addr[i] = mac_addr[i];
450 + printk("%02X%c", dev->dev_addr[i], (i == 5) ? ('\n') : (':'));
451 + }
452 +
453 + priv->mii_bus = mdiobus_alloc();
454 + if (priv->mii_bus == NULL)
455 + return -ENOMEM;
456 +
457 + priv->mii_bus->priv = dev;
458 + priv->mii_bus->read = lq_mdiobus_read;
459 + priv->mii_bus->write = lq_mdiobus_write;
460 + priv->mii_bus->name = "lq_mii";
461 + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
462 + priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
463 + for(i = 0; i < PHY_MAX_ADDR; ++i)
464 + priv->mii_bus->irq[i] = PHY_POLL;
465 +
466 + mdiobus_register(priv->mii_bus);
467 +
468 + return mii_probe(dev);
469 +}
470 +
471 +static void lq_mii_chip_init(int mode)
472 +{
473 + lq_pmu_enable(PMU_DMA);
474 + lq_pmu_enable(PMU_PPE);
475 +
476 + if (mode == REV_MII_MODE)
477 + lq_w32_mask(PPE32_MII_MASK, PPE32_MII_REVERSE, LQ_PPE32_CFG);
478 + else if (mode == MII_MODE)
479 + lq_w32_mask(PPE32_MII_MASK, PPE32_MII_NORMAL, LQ_PPE32_CFG);
480 + lq_w32(PPE32_PLEN_UNDER | PPE32_PLEN_OVER, LQ_PPE32_IG_PLEN_CTRL);
481 + lq_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
482 + wmb();
483 +}
484 +
485 +static int lq_mii_eth_mac_addr(struct net_device *dev, void *p)
486 +{
487 + int retcode;
488 +
489 + retcode = eth_mac_addr(dev, p);
490 +
491 + if (retcode)
492 + return retcode;
493 +
494 + // set rx_addr for unicast filter
495 + lq_w32(((dev->dev_addr[0]<<24)|(dev->dev_addr[1]<<16)|(dev->dev_addr[2]<< 8)|dev->dev_addr[3]), (u32*)(LQ_PPE32_BASE_ADDR|(0x461b<<2)));
496 + lq_w32(((dev->dev_addr[4]<<24)|(dev->dev_addr[5]<<16)), (u32*)(LQ_PPE32_BASE_ADDR|(0x461c<<2)));
497 +
498 + return 0;
499 +}
500 +
501 +static void lq_mii_set_rx_mode (struct net_device *dev)
502 +{
503 + // rx_mode promisc: unset unicast filter
504 + if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
505 + lq_w32(lq_r32((u32*)(LQ_PPE32_BASE_ADDR|(0x4614<<2))) & ~(1<<28), (u32*)(LQ_PPE32_BASE_ADDR|(0x4614<<2)));
506 + // enable unicast filter
507 + else
508 + lq_w32(lq_r32((u32*)(LQ_PPE32_BASE_ADDR|(0x4614<<2))) | (1<<28), (u32*)(LQ_PPE32_BASE_ADDR|(0x4614<<2)));
509 +}
510 +
511 +static const struct net_device_ops lq_eth_netdev_ops = {
512 + .ndo_init = lq_mii_dev_init,
513 + .ndo_open = lq_mii_open,
514 + .ndo_stop = lq_mii_release,
515 + .ndo_start_xmit = lq_mii_tx,
516 + .ndo_tx_timeout = lq_mii_tx_timeout,
517 + .ndo_change_mtu = eth_change_mtu,
518 + .ndo_set_mac_address = lq_mii_eth_mac_addr,
519 + .ndo_validate_addr = eth_validate_addr,
520 + .ndo_set_multicast_list = lq_mii_set_rx_mode,
521 +};
522 +
523 +static int
524 +lq_mii_probe(struct platform_device *dev)
525 +{
526 + int result = 0;
527 + struct lq_eth_data *eth = (struct lq_eth_data*)dev->dev.platform_data;
528 + lq_etop_dev = alloc_etherdev(sizeof(struct lq_mii_priv));
529 + lq_etop_dev->netdev_ops = &lq_eth_netdev_ops;
530 + memcpy(mac_addr, eth->mac, 6);
531 + strcpy(lq_etop_dev->name, "eth%d");
532 + lq_mii_chip_init(eth->mii_mode);
533 + result = register_netdev(lq_etop_dev);
534 + if (result) {
535 + printk(KERN_INFO "lq_etop: error %i registering device \"%s\"\n", result, lq_etop_dev->name);
536 + goto out;
537 + }
538 +
539 + printk(KERN_INFO "lq_etop: driver loaded!\n");
540 +
541 +out:
542 + return result;
543 +}
544 +
545 +static int lq_mii_remove(struct platform_device *dev)
546 +{
547 + struct lq_mii_priv *priv = (struct lq_mii_priv *)netdev_priv(lq_etop_dev);
548 +
549 + printk(KERN_INFO "lq_etop: lq_etop cleanup\n");
550 +
551 + dma_device_unregister(priv->dma_device);
552 + dma_device_release(priv->dma_device);
553 + kfree(priv->dma_device);
554 + unregister_netdev(lq_etop_dev);
555 + return 0;
556 +}
557 +
558 +static struct platform_driver lq_mii_driver = {
559 + .probe = lq_mii_probe,
560 + .remove = lq_mii_remove,
561 + .driver = {
562 + .name = "lq_etop",
563 + .owner = THIS_MODULE,
564 + },
565 +};
566 +
567 +int __init lq_mii_init(void)
568 +{
569 + int ret = platform_driver_register(&lq_mii_driver);
570 + if (ret)
571 + printk(KERN_INFO "lq_etop: Error registering platfom driver!");
572 + return ret;
573 +}
574 +
575 +static void __exit lq_mii_cleanup(void)
576 +{
577 + platform_driver_unregister(&lq_mii_driver);
578 +}
579 +
580 +module_init(lq_mii_init);
581 +module_exit(lq_mii_cleanup);
582 +
583 +MODULE_LICENSE("GPL");
584 +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
585 +MODULE_DESCRIPTION("ethernet driver for IFXMIPS boards");
This page took 0.06009 seconds and 5 git commands to generate.