danube gpio clean up
[openwrt.git] / target / linux / danube / files / drivers / net / danube_mii0.c
1 /*
2 * drivers/net/danube_mii0.c
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2005 Infineon
19 *
20 * Rewrite of Infineon Danube code, thanks to infineon for the support,
21 * software and hardware
22 *
23 * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
24 *
25 */
26
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/interrupt.h>
32 #include <asm/uaccess.h>
33 #include <linux/in.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/skbuff.h>
39 #include <linux/mm.h>
40 #include <linux/ethtool.h>
41 #include <asm/checksum.h>
42 #include <linux/init.h>
43 #include <asm/delay.h>
44 #include <asm/danube/danube.h>
45 #include <asm/danube/danube_mii0.h>
46 #include <asm/danube/danube_dma.h>
47
48 static struct net_device danube_mii0_dev;
49 static unsigned char u_boot_ethaddr[MAX_ADDR_LEN];
50
51 void
52 danube_write_mdio (u32 phy_addr, u32 phy_reg, u16 phy_data)
53 {
54 u32 val = MDIO_ACC_REQUEST |
55 ((phy_addr & MDIO_ACC_ADDR_MASK) << MDIO_ACC_ADDR_OFFSET) |
56 ((phy_reg & MDIO_ACC_REG_MASK) << MDIO_ACC_REG_OFFSET) |
57 phy_data;
58
59 while (readl(DANUBE_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST);
60 writel(val, DANUBE_PPE32_MDIO_ACC);
61 }
62
63 unsigned short
64 danube_read_mdio (u32 phy_addr, u32 phy_reg)
65 {
66 u32 val = MDIO_ACC_REQUEST | MDIO_ACC_READ |
67 ((phy_addr & MDIO_ACC_ADDR_MASK) << MDIO_ACC_ADDR_OFFSET) |
68 ((phy_reg & MDIO_ACC_REG_MASK) << MDIO_ACC_REG_OFFSET);
69
70 writel(val, DANUBE_PPE32_MDIO_ACC);
71 while (readl(DANUBE_PPE32_MDIO_ACC) & MDIO_ACC_REQUEST){};
72 val = readl(DANUBE_PPE32_MDIO_ACC) & MDIO_ACC_VAL_MASK;
73
74 return val;
75 }
76
77 int
78 danube_switch_open (struct net_device *dev)
79 {
80 struct switch_priv* priv = (struct switch_priv*)dev->priv;
81 struct dma_device_info* dma_dev = priv->dma_device;
82 int i;
83
84 for (i = 0; i < dma_dev->max_rx_chan_num; i++)
85 {
86 if ((dma_dev->rx_chan[i])->control == DANUBE_DMA_CH_ON)
87 (dma_dev->rx_chan[i])->open(dma_dev->rx_chan[i]);
88 }
89
90 netif_start_queue(dev);
91
92 return 0;
93 }
94
95 int
96 switch_release (struct net_device *dev){
97 struct switch_priv* priv = (struct switch_priv*)dev->priv;
98 struct dma_device_info* dma_dev = priv->dma_device;
99 int i;
100
101 for (i = 0; i < dma_dev->max_rx_chan_num; i++)
102 dma_dev->rx_chan[i]->close(dma_dev->rx_chan[i]);
103
104 netif_stop_queue(dev);
105
106 return 0;
107 }
108
109 int
110 switch_hw_receive (struct net_device* dev,struct dma_device_info* dma_dev)
111 {
112 struct switch_priv *priv = (struct switch_priv*)dev->priv;
113 unsigned char* buf = NULL;
114 struct sk_buff *skb = NULL;
115 int len = 0;
116
117 len = dma_device_read(dma_dev, &buf, (void**)&skb);
118
119 if (len >= ETHERNET_PACKET_DMA_BUFFER_SIZE)
120 {
121 printk("packet too large %d\n",len);
122 goto switch_hw_receive_err_exit;
123 }
124
125 /* remove CRC */
126 len -= 4;
127 if (skb == NULL )
128 {
129 printk("cannot restore pointer\n");
130 goto switch_hw_receive_err_exit;
131 }
132
133 if (len > (skb->end - skb->tail))
134 {
135 printk("BUG, len:%d end:%p tail:%p\n", (len+4), skb->end, skb->tail);
136 goto switch_hw_receive_err_exit;
137 }
138
139 skb_put(skb, len);
140 skb->dev = dev;
141 skb->protocol = eth_type_trans(skb, dev);
142 netif_rx(skb);
143
144 priv->stats.rx_packets++;
145 priv->stats.rx_bytes += len;
146
147 return 0;
148
149 switch_hw_receive_err_exit:
150 if (len == 0)
151 {
152 if(skb)
153 dev_kfree_skb_any(skb);
154 priv->stats.rx_errors++;
155 priv->stats.rx_dropped++;
156
157 return -EIO;
158 } else {
159 return len;
160 }
161 }
162
163 int
164 switch_hw_tx (char *buf, int len, struct net_device *dev)
165 {
166 int ret = 0;
167 struct switch_priv *priv = dev->priv;
168 struct dma_device_info* dma_dev = priv->dma_device;
169
170 ret = dma_device_write(dma_dev, buf, len, priv->skb);
171
172 return ret;
173 }
174
175 int
176 switch_tx (struct sk_buff *skb, struct net_device *dev)
177 {
178 int len;
179 char *data;
180 struct switch_priv *priv = dev->priv;
181 struct dma_device_info* dma_dev = priv->dma_device;
182
183 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
184 data = skb->data;
185 priv->skb = skb;
186 dev->trans_start = jiffies;
187 // TODO we got more than 1 dma channel, so we should do something intelligent
188 // here to select one
189 dma_dev->current_tx_chan = 0;
190
191 wmb();
192
193 if (switch_hw_tx(data, len, dev) != len)
194 {
195 dev_kfree_skb_any(skb);
196 priv->stats.tx_errors++;
197 priv->stats.tx_dropped++;
198 } else {
199 priv->stats.tx_packets++;
200 priv->stats.tx_bytes+=len;
201 }
202
203 return 0;
204 }
205
206 void
207 switch_tx_timeout (struct net_device *dev)
208 {
209 int i;
210 struct switch_priv* priv = (struct switch_priv*)dev->priv;
211
212 priv->stats.tx_errors++;
213
214 for (i = 0; i < priv->dma_device->max_tx_chan_num; i++)
215 {
216 priv->dma_device->tx_chan[i]->disable_irq(priv->dma_device->tx_chan[i]);
217 }
218
219 netif_wake_queue(dev);
220
221 return;
222 }
223
224 int
225 dma_intr_handler (struct dma_device_info* dma_dev, int status)
226 {
227 int i;
228
229 switch (status)
230 {
231 case RCV_INT:
232 switch_hw_receive(&danube_mii0_dev, dma_dev);
233 break;
234
235 case TX_BUF_FULL_INT:
236 printk("tx buffer full\n");
237 netif_stop_queue(&danube_mii0_dev);
238 for (i = 0; i < dma_dev->max_tx_chan_num; i++)
239 {
240 if ((dma_dev->tx_chan[i])->control==DANUBE_DMA_CH_ON)
241 dma_dev->tx_chan[i]->enable_irq(dma_dev->tx_chan[i]);
242 }
243 break;
244
245 case TRANSMIT_CPT_INT:
246 for (i = 0; i < dma_dev->max_tx_chan_num; i++)
247 dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i]);
248
249 netif_wake_queue(&danube_mii0_dev);
250 break;
251 }
252
253 return 0;
254 }
255
256 unsigned char*
257 danube_etop_dma_buffer_alloc (int len, int *byte_offset, void **opt)
258 {
259 unsigned char *buffer = NULL;
260 struct sk_buff *skb = NULL;
261
262 skb = dev_alloc_skb(ETHERNET_PACKET_DMA_BUFFER_SIZE);
263 if (skb == NULL)
264 return NULL;
265
266 buffer = (unsigned char*)(skb->data);
267 skb_reserve(skb, 2);
268 *(int*)opt = (int)skb;
269 *byte_offset = 2;
270
271 return buffer;
272 }
273
274 void
275 danube_etop_dma_buffer_free (unsigned char *dataptr, void *opt)
276 {
277 struct sk_buff *skb = NULL;
278
279 if(opt == NULL)
280 {
281 kfree(dataptr);
282 } else {
283 skb = (struct sk_buff*)opt;
284 dev_kfree_skb_any(skb);
285 }
286 }
287
288 static struct net_device_stats*
289 danube_get_stats (struct net_device *dev)
290 {
291 return (struct net_device_stats *)dev->priv;
292 }
293
294 static int
295 switch_init (struct net_device *dev)
296 {
297 u64 retval = 0;
298 int i;
299 struct switch_priv *priv;
300
301 ether_setup(dev);
302
303 printk("%s up\n", dev->name);
304
305 dev->open = danube_switch_open;
306 dev->stop = switch_release;
307 dev->hard_start_xmit = switch_tx;
308 dev->get_stats = danube_get_stats;
309 dev->tx_timeout = switch_tx_timeout;
310 dev->watchdog_timeo = 10 * HZ;
311 dev->priv = kmalloc(sizeof(struct switch_priv), GFP_KERNEL);
312
313 if (dev->priv == NULL)
314 return -ENOMEM;
315
316 memset(dev->priv, 0, sizeof(struct switch_priv));
317 priv = dev->priv;
318
319 priv->dma_device = dma_device_reserve("PPE");
320
321 if (!priv->dma_device){
322 BUG();
323 return -ENODEV;
324 }
325
326 priv->dma_device->buffer_alloc = &danube_etop_dma_buffer_alloc;
327 priv->dma_device->buffer_free = &danube_etop_dma_buffer_free;
328 priv->dma_device->intr_handler = &dma_intr_handler;
329 priv->dma_device->max_rx_chan_num = 4;
330
331 for (i = 0; i < priv->dma_device->max_rx_chan_num; i++)
332 {
333 priv->dma_device->rx_chan[i]->packet_size = ETHERNET_PACKET_DMA_BUFFER_SIZE;
334 priv->dma_device->rx_chan[i]->control = DANUBE_DMA_CH_ON;
335 }
336
337 for (i = 0; i < priv->dma_device->max_tx_chan_num; i++)
338 {
339 if(i == 0)
340 priv->dma_device->tx_chan[i]->control = DANUBE_DMA_CH_ON;
341 else
342 priv->dma_device->tx_chan[i]->control = DANUBE_DMA_CH_OFF;
343 }
344
345 dma_device_register(priv->dma_device);
346
347 /*read the mac address from the mac table and put them into the mac table.*/
348 for (i = 0; i < 6; i++)
349 {
350 retval += u_boot_ethaddr[i];
351 }
352
353 //TODO
354 /* ethaddr not set in u-boot ? */
355 if (retval == 0)
356 {
357 printk("use default MAC address\n");
358 dev->dev_addr[0] = 0x00;
359 dev->dev_addr[1] = 0x11;
360 dev->dev_addr[2] = 0x22;
361 dev->dev_addr[3] = 0x33;
362 dev->dev_addr[4] = 0x44;
363 dev->dev_addr[5] = 0x55;
364 } else {
365 for (i = 0; i < 6; i++)
366 dev->dev_addr[i] = u_boot_ethaddr[i];
367 }
368
369 return 0;
370 }
371
372 static void
373 danube_sw_chip_init (int mode)
374 {
375 writel(readl(DANUBE_PMU_PWDCR) & ~DANUBE_PMU_PWDCR_DMA, DANUBE_PMU_PWDCR);
376 writel(readl(DANUBE_PMU_PWDCR) & ~DANUBE_PMU_PWDCR_PPE, DANUBE_PMU_PWDCR);
377 wmb();
378
379 if(mode == REV_MII_MODE)
380 writel((readl(DANUBE_PPE32_CFG) & PPE32_MII_MASK) | PPE32_MII_REVERSE, DANUBE_PPE32_CFG);
381 else if(mode == MII_MODE)
382 writel((readl(DANUBE_PPE32_CFG) & PPE32_MII_MASK) | PPE32_MII_NORMAL, DANUBE_PPE32_CFG);
383
384 writel(PPE32_PLEN_UNDER | PPE32_PLEN_OVER, DANUBE_PPE32_IG_PLEN_CTRL);
385
386 writel(PPE32_CGEN, DANUBE_PPE32_ENET_MAC_CFG);
387
388 wmb();
389 }
390
391 int __init
392 switch_init_module(void)
393 {
394 int result = 0;
395
396 danube_mii0_dev.init = switch_init;
397
398 strcpy(danube_mii0_dev.name, "eth%d");
399 SET_MODULE_OWNER(dev);
400
401 result = register_netdev(&danube_mii0_dev);
402 if (result)
403 {
404 printk("error %i registering device \"%s\"\n", result, danube_mii0_dev.name);
405 goto out;
406 }
407
408 /* danube eval kit connects the phy/switch in REV mode */
409 danube_sw_chip_init(REV_MII_MODE);
410 printk("danube MAC driver loaded!\n");
411
412 out:
413 return result;
414 }
415
416 static void __exit
417 switch_cleanup(void)
418 {
419 struct switch_priv *priv = (struct switch_priv*)danube_mii0_dev.priv;
420
421 printk("danube_mii0 cleanup\n");
422
423 dma_device_unregister(priv->dma_device);
424 dma_device_release(priv->dma_device);
425 kfree(priv->dma_device);
426 kfree(danube_mii0_dev.priv);
427 unregister_netdev(&danube_mii0_dev);
428
429 return;
430 }
431
432 module_init(switch_init_module);
433 module_exit(switch_cleanup);
This page took 0.070003 seconds and 5 git commands to generate.