2 * drivers/net/danube_mii0.c
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) 2005 Infineon
20 * Rewrite of Infineon Danube code, thanks to infineon for the support,
21 * software and hardware
23 * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/interrupt.h>
32 #include <asm/uaccess.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
37 #include <linux/tcp.h>
38 #include <linux/skbuff.h>
40 #include <linux/ethtool.h>
41 #include <asm/checksum.h>
42 #include <linux/init.h>
43 #include <asm/delay.h>
44 #include <asm/danube/danube.h>
45 #include <asm/danube/danube_mii0.h>
46 #include <asm/danube/danube_dma.h>
48 static struct net_device danube_mii0_dev
;
49 static unsigned char u_boot_ethaddr
[MAX_ADDR_LEN
];
52 danube_write_mdio (u32 phy_addr
, u32 phy_reg
, u16 phy_data
)
54 u32 val
= MDIO_ACC_REQUEST
|
55 ((phy_addr
& MDIO_ACC_ADDR_MASK
) << MDIO_ACC_ADDR_OFFSET
) |
56 ((phy_reg
& MDIO_ACC_REG_MASK
) << MDIO_ACC_REG_OFFSET
) |
59 while (readl(DANUBE_PPE32_MDIO_ACC
) & MDIO_ACC_REQUEST
);
60 writel(val
, DANUBE_PPE32_MDIO_ACC
);
64 danube_read_mdio (u32 phy_addr
, u32 phy_reg
)
66 u32 val
= MDIO_ACC_REQUEST
| MDIO_ACC_READ
|
67 ((phy_addr
& MDIO_ACC_ADDR_MASK
) << MDIO_ACC_ADDR_OFFSET
) |
68 ((phy_reg
& MDIO_ACC_REG_MASK
) << MDIO_ACC_REG_OFFSET
);
70 writel(val
, DANUBE_PPE32_MDIO_ACC
);
71 while (readl(DANUBE_PPE32_MDIO_ACC
) & MDIO_ACC_REQUEST
){};
72 val
= readl(DANUBE_PPE32_MDIO_ACC
) & MDIO_ACC_VAL_MASK
;
78 danube_switch_open (struct net_device
*dev
)
80 struct switch_priv
* priv
= (struct switch_priv
*)dev
->priv
;
81 struct dma_device_info
* dma_dev
= priv
->dma_device
;
84 for (i
= 0; i
< dma_dev
->max_rx_chan_num
; i
++)
86 if ((dma_dev
->rx_chan
[i
])->control
== DANUBE_DMA_CH_ON
)
87 (dma_dev
->rx_chan
[i
])->open(dma_dev
->rx_chan
[i
]);
90 netif_start_queue(dev
);
96 switch_release (struct net_device
*dev
){
97 struct switch_priv
* priv
= (struct switch_priv
*)dev
->priv
;
98 struct dma_device_info
* dma_dev
= priv
->dma_device
;
101 for (i
= 0; i
< dma_dev
->max_rx_chan_num
; i
++)
102 dma_dev
->rx_chan
[i
]->close(dma_dev
->rx_chan
[i
]);
104 netif_stop_queue(dev
);
110 switch_hw_receive (struct net_device
* dev
,struct dma_device_info
* dma_dev
)
112 struct switch_priv
*priv
= (struct switch_priv
*)dev
->priv
;
113 unsigned char* buf
= NULL
;
114 struct sk_buff
*skb
= NULL
;
117 len
= dma_device_read(dma_dev
, &buf
, (void**)&skb
);
119 if (len
>= ETHERNET_PACKET_DMA_BUFFER_SIZE
)
121 printk("packet too large %d\n",len
);
122 goto switch_hw_receive_err_exit
;
129 printk("cannot restore pointer\n");
130 goto switch_hw_receive_err_exit
;
133 if (len
> (skb
->end
- skb
->tail
))
135 printk("BUG, len:%d end:%p tail:%p\n", (len
+4), skb
->end
, skb
->tail
);
136 goto switch_hw_receive_err_exit
;
141 skb
->protocol
= eth_type_trans(skb
, dev
);
144 priv
->stats
.rx_packets
++;
145 priv
->stats
.rx_bytes
+= len
;
149 switch_hw_receive_err_exit
:
153 dev_kfree_skb_any(skb
);
154 priv
->stats
.rx_errors
++;
155 priv
->stats
.rx_dropped
++;
164 switch_hw_tx (char *buf
, int len
, struct net_device
*dev
)
167 struct switch_priv
*priv
= dev
->priv
;
168 struct dma_device_info
* dma_dev
= priv
->dma_device
;
170 ret
= dma_device_write(dma_dev
, buf
, len
, priv
->skb
);
176 switch_tx (struct sk_buff
*skb
, struct net_device
*dev
)
180 struct switch_priv
*priv
= dev
->priv
;
181 struct dma_device_info
* dma_dev
= priv
->dma_device
;
183 len
= skb
->len
< ETH_ZLEN
? ETH_ZLEN
: skb
->len
;
186 dev
->trans_start
= jiffies
;
187 // TODO we got more than 1 dma channel, so we should do something intelligent
188 // here to select one
189 dma_dev
->current_tx_chan
= 0;
193 if (switch_hw_tx(data
, len
, dev
) != len
)
195 dev_kfree_skb_any(skb
);
196 priv
->stats
.tx_errors
++;
197 priv
->stats
.tx_dropped
++;
199 priv
->stats
.tx_packets
++;
200 priv
->stats
.tx_bytes
+=len
;
207 switch_tx_timeout (struct net_device
*dev
)
210 struct switch_priv
* priv
= (struct switch_priv
*)dev
->priv
;
212 priv
->stats
.tx_errors
++;
214 for (i
= 0; i
< priv
->dma_device
->max_tx_chan_num
; i
++)
216 priv
->dma_device
->tx_chan
[i
]->disable_irq(priv
->dma_device
->tx_chan
[i
]);
219 netif_wake_queue(dev
);
225 dma_intr_handler (struct dma_device_info
* dma_dev
, int status
)
232 switch_hw_receive(&danube_mii0_dev
, dma_dev
);
235 case TX_BUF_FULL_INT
:
236 printk("tx buffer full\n");
237 netif_stop_queue(&danube_mii0_dev
);
238 for (i
= 0; i
< dma_dev
->max_tx_chan_num
; i
++)
240 if ((dma_dev
->tx_chan
[i
])->control
==DANUBE_DMA_CH_ON
)
241 dma_dev
->tx_chan
[i
]->enable_irq(dma_dev
->tx_chan
[i
]);
245 case TRANSMIT_CPT_INT
:
246 for (i
= 0; i
< dma_dev
->max_tx_chan_num
; i
++)
247 dma_dev
->tx_chan
[i
]->disable_irq(dma_dev
->tx_chan
[i
]);
249 netif_wake_queue(&danube_mii0_dev
);
257 danube_etop_dma_buffer_alloc (int len
, int *byte_offset
, void **opt
)
259 unsigned char *buffer
= NULL
;
260 struct sk_buff
*skb
= NULL
;
262 skb
= dev_alloc_skb(ETHERNET_PACKET_DMA_BUFFER_SIZE
);
266 buffer
= (unsigned char*)(skb
->data
);
268 *(int*)opt
= (int)skb
;
275 danube_etop_dma_buffer_free (unsigned char *dataptr
, void *opt
)
277 struct sk_buff
*skb
= NULL
;
283 skb
= (struct sk_buff
*)opt
;
284 dev_kfree_skb_any(skb
);
288 static struct net_device_stats
*
289 danube_get_stats (struct net_device
*dev
)
291 return (struct net_device_stats
*)dev
->priv
;
295 switch_init (struct net_device
*dev
)
299 struct switch_priv
*priv
;
303 printk("%s up\n", dev
->name
);
305 dev
->open
= danube_switch_open
;
306 dev
->stop
= switch_release
;
307 dev
->hard_start_xmit
= switch_tx
;
308 dev
->get_stats
= danube_get_stats
;
309 dev
->tx_timeout
= switch_tx_timeout
;
310 dev
->watchdog_timeo
= 10 * HZ
;
311 dev
->priv
= kmalloc(sizeof(struct switch_priv
), GFP_KERNEL
);
313 if (dev
->priv
== NULL
)
316 memset(dev
->priv
, 0, sizeof(struct switch_priv
));
319 priv
->dma_device
= dma_device_reserve("PPE");
321 if (!priv
->dma_device
){
326 priv
->dma_device
->buffer_alloc
= &danube_etop_dma_buffer_alloc
;
327 priv
->dma_device
->buffer_free
= &danube_etop_dma_buffer_free
;
328 priv
->dma_device
->intr_handler
= &dma_intr_handler
;
329 priv
->dma_device
->max_rx_chan_num
= 4;
331 for (i
= 0; i
< priv
->dma_device
->max_rx_chan_num
; i
++)
333 priv
->dma_device
->rx_chan
[i
]->packet_size
= ETHERNET_PACKET_DMA_BUFFER_SIZE
;
334 priv
->dma_device
->rx_chan
[i
]->control
= DANUBE_DMA_CH_ON
;
337 for (i
= 0; i
< priv
->dma_device
->max_tx_chan_num
; i
++)
340 priv
->dma_device
->tx_chan
[i
]->control
= DANUBE_DMA_CH_ON
;
342 priv
->dma_device
->tx_chan
[i
]->control
= DANUBE_DMA_CH_OFF
;
345 dma_device_register(priv
->dma_device
);
347 /*read the mac address from the mac table and put them into the mac table.*/
348 for (i
= 0; i
< 6; i
++)
350 retval
+= u_boot_ethaddr
[i
];
354 /* ethaddr not set in u-boot ? */
357 printk("use default MAC address\n");
358 dev
->dev_addr
[0] = 0x00;
359 dev
->dev_addr
[1] = 0x11;
360 dev
->dev_addr
[2] = 0x22;
361 dev
->dev_addr
[3] = 0x33;
362 dev
->dev_addr
[4] = 0x44;
363 dev
->dev_addr
[5] = 0x55;
365 for (i
= 0; i
< 6; i
++)
366 dev
->dev_addr
[i
] = u_boot_ethaddr
[i
];
373 danube_sw_chip_init (int mode
)
375 writel(readl(DANUBE_PMU_PWDCR
) & ~DANUBE_PMU_PWDCR_DMA
, DANUBE_PMU_PWDCR
);
376 writel(readl(DANUBE_PMU_PWDCR
) & ~DANUBE_PMU_PWDCR_PPE
, DANUBE_PMU_PWDCR
);
379 if(mode
== REV_MII_MODE
)
380 writel((readl(DANUBE_PPE32_CFG
) & PPE32_MII_MASK
) | PPE32_MII_REVERSE
, DANUBE_PPE32_CFG
);
381 else if(mode
== MII_MODE
)
382 writel((readl(DANUBE_PPE32_CFG
) & PPE32_MII_MASK
) | PPE32_MII_NORMAL
, DANUBE_PPE32_CFG
);
384 writel(PPE32_PLEN_UNDER
| PPE32_PLEN_OVER
, DANUBE_PPE32_IG_PLEN_CTRL
);
386 writel(PPE32_CGEN
, DANUBE_PPE32_ENET_MAC_CFG
);
392 switch_init_module(void)
396 danube_mii0_dev
.init
= switch_init
;
398 strcpy(danube_mii0_dev
.name
, "eth%d");
399 SET_MODULE_OWNER(dev
);
401 result
= register_netdev(&danube_mii0_dev
);
404 printk("error %i registering device \"%s\"\n", result
, danube_mii0_dev
.name
);
408 /* danube eval kit connects the phy/switch in REV mode */
409 danube_sw_chip_init(REV_MII_MODE
);
410 printk("danube MAC driver loaded!\n");
419 struct switch_priv
*priv
= (struct switch_priv
*)danube_mii0_dev
.priv
;
421 printk("danube_mii0 cleanup\n");
423 dma_device_unregister(priv
->dma_device
);
424 dma_device_release(priv
->dma_device
);
425 kfree(priv
->dma_device
);
426 kfree(danube_mii0_dev
.priv
);
427 unregister_netdev(&danube_mii0_dev
);
432 module_init(switch_init_module
);
433 module_exit(switch_cleanup
);