2 * Lantiq CPE device ethernet driver.
3 * Supposed to work on Twinpass/Danube.
5 * Based on INCA-IP driver:
6 * (C) Copyright 2003-2004
7 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
10 * Thomas Langer, Ralph Hempel
12 * See file CREDITS for list of people who contributed to this
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
36 #include <asm/types.h>
38 #include <asm/addrspace.h>
43 #if defined(CONFIG_AR9)
51 #define NUM_RX_DESC PKTBUFSRX
65 volatile u32 reserved
:3;
66 volatile u32 Byteoffset
:2;
67 volatile u32 reserve
:7;
68 volatile u32 DataLen
:16;
75 } dma_rx_descriptor_t
;
87 volatile u32 Byteoffset
:5;
88 volatile u32 reserved
:7;
89 volatile u32 DataLen
:16;
96 } dma_tx_descriptor_t
;
98 static volatile dma_rx_descriptor_t rx_des_ring
[NUM_RX_DESC
] __attribute__ ((aligned(8)));
99 static volatile dma_tx_descriptor_t tx_des_ring
[NUM_TX_DESC
] __attribute__ ((aligned(8)));
100 static int tx_num
, rx_num
;
102 static volatile IfxDMA_t
*pDma
= (IfxDMA_t
*)CKSEG1ADDR(DANUBE_DMA_BASE
);
104 static int lq_eth_init(struct eth_device
*dev
, bd_t
* bis
);
105 static int lq_eth_send(struct eth_device
*dev
, volatile void *packet
,int length
);
106 static int lq_eth_recv(struct eth_device
*dev
);
107 static void lq_eth_halt(struct eth_device
*dev
);
108 static void lq_eth_init_chip(void);
109 static void lq_eth_init_dma(void);
111 static int lq_eth_miiphy_read(char *devname
, u8 phyAddr
, u8 regAddr
, u16
* retVal
)
116 if ((phyAddr
> 0x1F) || (regAddr
> 0x1F) || (retVal
== NULL
))
119 phy
= (phyAddr
& 0x1F) << 21;
120 reg
= (regAddr
& 0x1F) << 16;
122 *ETOP_MDIO_ACC
= 0xC0000000 | phy
| reg
;
123 while ((timeout
--) && (*ETOP_MDIO_ACC
& 0x80000000))
130 *retVal
= *ETOP_MDIO_ACC
& 0xFFFF;
134 static int lq_eth_miiphy_write(char *devname
, u8 phyAddr
, u8 regAddr
, u16 data
)
139 if ((phyAddr
> 0x1F) || (regAddr
> 0x1F))
142 phy
= (phyAddr
& 0x1F) << 21;
143 reg
= (regAddr
& 0x1F) << 16;
145 *ETOP_MDIO_ACC
= 0x80000000 | phy
| reg
| data
;
146 while ((timeout
--) && (*ETOP_MDIO_ACC
& 0x80000000))
155 int lq_eth_initialize(bd_t
* bis
)
157 struct eth_device
*dev
;
159 debug("Entered lq_eth_initialize()\n");
161 if (!(dev
= malloc (sizeof *dev
))) {
162 printf("Failed to allocate memory\n");
165 memset(dev
, 0, sizeof(*dev
));
167 sprintf(dev
->name
, "lq_cpe_eth");
168 dev
->init
= lq_eth_init
;
169 dev
->halt
= lq_eth_halt
;
170 dev
->send
= lq_eth_send
;
171 dev
->recv
= lq_eth_recv
;
175 #if defined (CONFIG_MII) || defined(CONFIG_CMD_MII)
176 /* register mii command access routines */
177 miiphy_register(dev
->name
,
178 lq_eth_miiphy_read
, lq_eth_miiphy_write
);
187 static int lq_eth_init(struct eth_device
*dev
, bd_t
* bis
)
190 uchar
*enetaddr
= dev
->enetaddr
;
192 debug("lq_eth_init %x:%x:%x:%x:%x:%x\n",
193 enetaddr
[0], enetaddr
[1], enetaddr
[2], enetaddr
[3], enetaddr
[4], enetaddr
[5]);
195 *ENET_MAC_DA0
= (enetaddr
[0]<<24) + (enetaddr
[1]<<16) + (enetaddr
[2]<< 8) + enetaddr
[3];
196 *ENET_MAC_DA1
= (enetaddr
[4]<<24) + (enetaddr
[5]<<16);
197 *ENETS_CFG
|= 1<<28; /* enable filter for unicast packets */
202 for(i
=0;i
< NUM_RX_DESC
; i
++) {
203 dma_rx_descriptor_t
* rx_desc
= (dma_rx_descriptor_t
*)CKSEG1ADDR(&rx_des_ring
[i
]);
204 rx_desc
->status
.word
=0;
205 rx_desc
->status
.field
.OWN
=1;
206 rx_desc
->status
.field
.DataLen
=PKTSIZE_ALIGN
; /* 1536 */
207 rx_desc
->DataPtr
=(u32
)CKSEG1ADDR(NetRxPackets
[i
]);
208 NetRxPackets
[i
][0] = 0xAA;
212 dma_writel(dma_cs
, RX_CHAN_NO
);
213 dma_writel(dma_cctrl
, 0x2);/*fix me, need to reset this channel first?*/
214 dma_writel(dma_cpoll
, 0x80000040);
215 /*set descriptor base*/
216 dma_writel(dma_cdba
, (u32
)rx_des_ring
);
217 dma_writel(dma_cdlen
, NUM_RX_DESC
);
218 dma_writel(dma_cie
, 0);
219 dma_writel(dma_cctrl
, 0x30000);
221 for(i
=0;i
< NUM_TX_DESC
; i
++) {
222 dma_tx_descriptor_t
* tx_desc
= (dma_tx_descriptor_t
*)CKSEG1ADDR(&tx_des_ring
[i
]);
223 memset(tx_desc
, 0, sizeof(tx_des_ring
[0]));
226 dma_writel(dma_cs
, TX_CHAN_NO
);
227 dma_writel(dma_cctrl
, 0x2);/*fix me, need to reset this channel first?*/
228 dma_writel(dma_cpoll
, 0x80000040);
229 dma_writel(dma_cdba
, (u32
)tx_des_ring
);
230 dma_writel(dma_cdlen
, NUM_TX_DESC
);
231 dma_writel(dma_cie
, 0);
232 dma_writel(dma_cctrl
, 0x30100);
234 /* turn on DMA rx & tx channel
236 dma_writel(dma_cs
, RX_CHAN_NO
);
237 dma_writel(dma_cctrl
, dma_readl(dma_cctrl
) | 1); /*reset and turn on the channel*/
242 static void lq_eth_halt(struct eth_device
*dev
)
246 debug("lq_eth_halt()\n");
249 dma_writel(dma_cs
, i
);
250 dma_writel(dma_cctrl
, dma_readl(dma_cctrl
) & ~1);/*stop the dma channel*/
255 static void lq_dump(const u8
*data
, const u32 length
)
259 for(i
=0;i
<length
;i
++) {
260 debug("%02x ", data
[i
]);
266 static int lq_eth_send(struct eth_device
*dev
, volatile void *packet
, int length
)
270 volatile dma_tx_descriptor_t
* tx_desc
= (dma_tx_descriptor_t
*)CKSEG1ADDR(&tx_des_ring
[tx_num
]);
273 printf ("%s: bad packet size: %d\n", dev
->name
, length
);
277 for(i
=0; tx_desc
->status
.field
.OWN
==1; i
++) {
279 printf("NO Tx Descriptor...");
284 tx_desc
->status
.field
.Sop
=1;
285 tx_desc
->status
.field
.Eop
=1;
286 tx_desc
->status
.field
.C
=0;
287 tx_desc
->DataPtr
= (u32
)CKSEG1ADDR(packet
);
289 tx_desc
->status
.field
.DataLen
= 60;
291 tx_desc
->status
.field
.DataLen
= (u32
)length
;
293 flush_cache((u32
)packet
, tx_desc
->status
.field
.DataLen
);
295 tx_desc
->status
.field
.OWN
=1;
299 if (tx_num
==NUM_TX_DESC
) tx_num
=0;
302 lq_dump(tx_desc
->DataPtr
, tx_desc
->status
.field
.DataLen
);
305 dma_writel(dma_cs
, TX_CHAN_NO
);
306 if (!(dma_readl(dma_cctrl
) & 1)) {
307 dma_writel(dma_cctrl
, dma_readl(dma_cctrl
) | 1);
314 static int lq_eth_recv(struct eth_device
*dev
)
317 volatile dma_rx_descriptor_t
* rx_desc
;
319 rx_desc
= (dma_rx_descriptor_t
*)CKSEG1ADDR(&rx_des_ring
[rx_num
]);
321 if ((rx_desc
->status
.field
.C
== 0) || (rx_desc
->status
.field
.OWN
== 1)) {
326 lq_dump(rx_desc
->DataPtr
, rx_desc
->status
.field
.DataLen
);
328 length
= rx_desc
->status
.field
.DataLen
;
330 invalidate_dcache_range((u32
)CKSEG0ADDR(rx_desc
->DataPtr
), (u32
) CKSEG0ADDR(rx_desc
->DataPtr
) + length
);
331 NetReceive(NetRxPackets
[rx_num
], length
);
333 printf("ERROR: Invalid rx packet length.\n");
336 rx_desc
->status
.field
.Sop
=0;
337 rx_desc
->status
.field
.Eop
=0;
338 rx_desc
->status
.field
.C
=0;
339 rx_desc
->status
.field
.DataLen
=PKTSIZE_ALIGN
;
340 rx_desc
->status
.field
.OWN
=1;
343 if (rx_num
== NUM_RX_DESC
)
349 static void lq_eth_init_chip(void)
351 *ETOP_MDIO_CFG
&= ~0x6;
352 *ENET_MAC_CFG
= 0x187;
354 // turn on port0, set to rmii and turn off port1.
356 *ETOP_CFG
= (*ETOP_CFG
& 0xFFFFFFFC) | 0x0000000A;
358 *ETOP_CFG
= (*ETOP_CFG
& 0xFFFFFFFC) | 0x00000008;
361 *ETOP_IG_PLEN_CTRL
= 0x004005EE; // set packetlen.
362 *ENET_MAC_CFG
|= 1<<11; /*enable the crc*/
366 static void lq_eth_init_dma(void)
369 dma_writel(dma_ctrl
, dma_readl(dma_ctrl
) | 1);
370 dma_writel(dma_irnen
, 0);/*disable all the interrupts first*/
372 /* Clear Interrupt Status Register */
373 dma_writel(dma_irncr
, 0xfffff);
374 /*disable all the dma interrupts*/
375 dma_writel(dma_irnen
, 0);
376 /*disable channel 0 and channel 1 interrupts*/
378 dma_writel(dma_cs
, RX_CHAN_NO
);
379 dma_writel(dma_cctrl
, 0x2);/*fix me, need to reset this channel first?*/
380 dma_writel(dma_cpoll
, 0x80000040);
381 /*set descriptor base*/
382 dma_writel(dma_cdba
, (u32
)rx_des_ring
);
383 dma_writel(dma_cdlen
, NUM_RX_DESC
);
384 dma_writel(dma_cie
, 0);
385 dma_writel(dma_cctrl
, 0x30000);
387 dma_writel(dma_cs
, TX_CHAN_NO
);
388 dma_writel(dma_cctrl
, 0x2);/*fix me, need to reset this channel first?*/
389 dma_writel(dma_cpoll
, 0x80000040);
390 dma_writel(dma_cdba
, (u32
)tx_des_ring
);
391 dma_writel(dma_cdlen
, NUM_TX_DESC
);
392 dma_writel(dma_cie
, 0);
393 dma_writel(dma_cctrl
, 0x30100);
394 /*enable the poll function and set the poll counter*/
395 //dma_writel(DMA_CPOLL=DANUBE_DMA_POLL_EN | (DANUBE_DMA_POLL_COUNT<<4);
396 /*set port properties, enable endian conversion for switch*/
397 dma_writel(dma_ps
, 0);
398 dma_writel(dma_pctrl
, dma_readl(dma_pctrl
) | (0xf<<8));/*enable 32 bit endian conversion*/