hotplug2 rules: Run /etc/hotplug.d/firmware/ scripts (if any) on firmware load events
[openwrt.git] / package / uboot-ar71xx / files / drivers / net / ag71xx.c
1 /*
2 * Atheros AR71xx built-in ethernet mac driver
3 *
4 * Copyright (C) 2010 Michael Kurz <michi.kurz@googlemail.com>
5 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 *
8 * Based on Atheros' AG7100 driver
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15 #include <common.h>
16 #include <malloc.h>
17 #include <net.h>
18 #include <miiphy.h>
19
20 #include <asm/ar71xx.h>
21
22 #include "ag71xx.h"
23
24 #ifdef AG71XX_DEBUG
25 #define DBG(fmt,args...) printf(fmt ,##args)
26 #else
27 #define DBG(fmt,args...)
28 #endif
29
30
31 static struct ag71xx agtable[] = {
32 {
33 .mac_base = KSEG1ADDR(AR71XX_GE0_BASE),
34 .mii_ctrl = KSEG1ADDR(AR71XX_MII_BASE + MII_REG_MII0_CTRL),
35 .mii_if = CONFIG_AG71XX_MII0_IIF,
36 } , {
37 .mac_base = KSEG1ADDR(AR71XX_GE1_BASE),
38 .mii_ctrl = KSEG1ADDR(AR71XX_MII_BASE + MII_REG_MII1_CTRL),
39 .mii_if = CONFIG_AG71XX_MII1_IIF,
40 }
41 };
42
43 static int ag71xx_ring_alloc(struct ag71xx_ring *ring, unsigned int size)
44 {
45 int err;
46 int i;
47 int rsize;
48
49 ring->desc_size = sizeof(struct ag71xx_desc);
50 if (ring->desc_size % (CONFIG_SYS_CACHELINE_SIZE)) {
51 rsize = roundup(ring->desc_size, CONFIG_SYS_CACHELINE_SIZE);
52 DBG("ag71xx: ring %p, desc size %u rounded to %u\n",
53 ring, ring->desc_size,
54 rsize);
55 ring->desc_size = rsize;
56 }
57
58 ring->descs_cpu = (u8 *) malloc((size * ring->desc_size)
59 + CONFIG_SYS_CACHELINE_SIZE - 1);
60 if (!ring->descs_cpu) {
61 err = -1;
62 goto err;
63 }
64 ring->descs_cpu = (u8 *) UNCACHED_SDRAM((((u32) ring->descs_cpu +
65 CONFIG_SYS_CACHELINE_SIZE - 1) & ~(CONFIG_SYS_CACHELINE_SIZE - 1)));
66 ring->descs_dma = (u8 *) virt_to_phys(ring->descs_cpu);
67
68 ring->size = size;
69
70 ring->buf = malloc(size * sizeof(*ring->buf));
71 if (!ring->buf) {
72 err = -1;
73 goto err;
74 }
75 memset(ring->buf, 0, size * sizeof(*ring->buf));
76
77 for (i = 0; i < size; i++) {
78 ring->buf[i].desc =
79 (struct ag71xx_desc *)&ring->descs_cpu[i * ring->desc_size];
80 DBG("ag71xx: ring %p, desc %d at %p\n",
81 ring, i, ring->buf[i].desc);
82 }
83
84 flush_cache( (u32) ring->buf, size * sizeof(*ring->buf));
85
86 return 0;
87
88 err:
89 return err;
90 }
91
92 static void ag71xx_ring_tx_init(struct ag71xx *ag)
93 {
94 struct ag71xx_ring *ring = &ag->tx_ring;
95 int i;
96
97 for (i = 0; i < AG71XX_TX_RING_SIZE; i++) {
98 ring->buf[i].desc->next = (u32) virt_to_phys((ring->descs_dma +
99 ring->desc_size * ((i + 1) % AG71XX_TX_RING_SIZE)));
100
101 ring->buf[i].desc->ctrl = DESC_EMPTY;
102 ring->buf[i].skb = NULL;
103 }
104
105 ring->curr = 0;
106 }
107
108 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
109 {
110 struct ag71xx_ring *ring = &ag->rx_ring;
111 int i;
112
113 if (!ring->buf)
114 return;
115
116 for (i = 0; i < AG71XX_RX_RING_SIZE; i++) {
117 ring->buf[i].desc->data = (u32) virt_to_phys(NetRxPackets[i]);
118 flush_cache((u32) NetRxPackets[i], PKTSIZE_ALIGN);
119 ring->buf[i].desc->ctrl = DESC_EMPTY;
120 }
121
122 ring->curr = 0;
123 }
124
125 static int ag71xx_ring_rx_init(struct ag71xx *ag)
126 {
127 struct ag71xx_ring *ring = &ag->rx_ring;
128 unsigned int i;
129
130 for (i = 0; i < AG71XX_RX_RING_SIZE; i++) {
131 ring->buf[i].desc->next = (u32) virt_to_phys((ring->descs_dma +
132 ring->desc_size * ((i + 1) % AG71XX_RX_RING_SIZE)));
133
134 DBG("ag71xx: RX desc at %p, next is %08x\n",
135 ring->buf[i].desc,
136 ring->buf[i].desc->next);
137 }
138
139 for (i = 0; i < AG71XX_RX_RING_SIZE; i++) {
140 ring->buf[i].desc->data = (u32) virt_to_phys(NetRxPackets[i]);
141 ring->buf[i].desc->ctrl = DESC_EMPTY;
142 }
143
144 ring->curr = 0;
145
146 return 0;
147 }
148
149 static int ag71xx_rings_init(struct ag71xx *ag)
150 {
151 int ret;
152
153 ret = ag71xx_ring_alloc(&ag->tx_ring, AG71XX_TX_RING_SIZE);
154 if (ret)
155 return ret;
156
157 ag71xx_ring_tx_init(ag);
158
159 ret = ag71xx_ring_alloc(&ag->rx_ring, AG71XX_RX_RING_SIZE);
160 if (ret)
161 return ret;
162
163 ret = ag71xx_ring_rx_init(ag);
164 return ret;
165 }
166
167 static void ar71xx_set_pll(u32 cfg_reg, u32 pll_reg, u32 pll_val, u32 shift)
168 {
169 uint32_t base = KSEG1ADDR(AR71XX_PLL_BASE);
170 u32 t;
171
172 t = readl(base + cfg_reg);
173 t &= ~(3 << shift);
174 t |= (2 << shift);
175 writel(t, base + cfg_reg);
176 udelay(100);
177
178 writel(pll_val, base + pll_reg);
179
180 t |= (3 << shift);
181 writel(t, base + cfg_reg);
182 udelay(100);
183
184 t &= ~(3 << shift);
185 writel(t, base + cfg_reg);
186 udelay(100);
187
188 debug("ar71xx: pll_reg %#x: %#x\n", (unsigned int)(base + pll_reg),
189 readl(base + pll_reg));
190 }
191
192 static void ar91xx_set_pll_ge0(int speed)
193 {
194 //u32 val = ar71xx_get_eth_pll(0, speed);
195 u32 pll_val;
196
197 switch (speed) {
198 case SPEED_10:
199 pll_val = 0x00441099;
200 break;
201 case SPEED_100:
202 pll_val = 0x13000a44;
203 break;
204 case SPEED_1000:
205 pll_val = 0x1a000000;
206 break;
207 default:
208 BUG();
209 }
210
211 ar71xx_set_pll(AR91XX_PLL_REG_ETH_CONFIG, AR91XX_PLL_REG_ETH0_INT_CLOCK,
212 pll_val, AR91XX_ETH0_PLL_SHIFT);
213 }
214
215 static void ar91xx_set_pll_ge1(int speed)
216 {
217 //u32 val = ar71xx_get_eth_pll(1, speed);
218 u32 pll_val;
219
220 switch (speed) {
221 case SPEED_10:
222 pll_val = 0x00441099;
223 break;
224 case SPEED_100:
225 pll_val = 0x13000a44;
226 break;
227 case SPEED_1000:
228 pll_val = 0x1a000000;
229 break;
230 default:
231 BUG();
232 }
233
234 ar71xx_set_pll(AR91XX_PLL_REG_ETH_CONFIG, AR91XX_PLL_REG_ETH1_INT_CLOCK,
235 pll_val, AR91XX_ETH1_PLL_SHIFT);
236 }
237
238 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
239 {
240 u32 t;
241
242 t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16)
243 | (((u32) mac[3]) << 8) | ((u32) mac[2]);
244
245 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
246
247 t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16);
248 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
249 }
250
251 static void ag71xx_dma_reset(struct ag71xx *ag)
252 {
253 u32 val;
254 int i;
255
256 DBG("%s: txdesc reg: 0x%08x rxdesc reg: 0x%08x\n",
257 ag->dev->name,
258 ag71xx_rr(ag, AG71XX_REG_TX_DESC),
259 ag71xx_rr(ag, AG71XX_REG_RX_DESC));
260
261 /* stop RX and TX */
262 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
263 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
264
265 /* clear descriptor addresses */
266 ag71xx_wr(ag, AG71XX_REG_TX_DESC, 0);
267 ag71xx_wr(ag, AG71XX_REG_RX_DESC, 0);
268
269 /* clear pending RX/TX interrupts */
270 for (i = 0; i < 256; i++) {
271 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
272 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
273 }
274
275 /* clear pending errors */
276 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
277 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
278
279 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
280 if (val)
281 printf("%s: unable to clear DMA Rx status: %08x\n",
282 ag->dev->name, val);
283
284 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
285
286 /* mask out reserved bits */
287 val &= ~0xff000000;
288
289 if (val)
290 printf("%s: unable to clear DMA Tx status: %08x\n",
291 ag->dev->name, val);
292 }
293
294 static void ag71xx_halt(struct eth_device *dev)
295 {
296 struct ag71xx *ag = (struct ag71xx *) dev->priv;
297
298 /* stop RX engine */
299 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
300
301 ag71xx_dma_reset(ag);
302 }
303
304 #define MAX_WAIT 1000
305
306 static int ag71xx_send(struct eth_device *dev, volatile void *packet,
307 int length)
308 {
309 struct ag71xx *ag = (struct ag71xx *) dev->priv;
310 struct ag71xx_ring *ring = &ag->tx_ring;
311 struct ag71xx_desc *desc;
312 int i;
313
314 i = ring->curr % AG71XX_TX_RING_SIZE;
315 desc = ring->buf[i].desc;
316
317 if (!ag71xx_desc_empty(desc)) {
318 printf("%s: tx buffer full\n", ag->dev->name);
319 return 1;
320 }
321
322 flush_cache((u32) packet, length);
323 desc->data = (u32) virt_to_phys(packet);
324 desc->ctrl = (length & DESC_PKTLEN_M);
325
326 DBG("%s: sending %#08x length %#08x\n",
327 ag->dev->name, desc->data, desc->ctrl);
328
329 ring->curr++;
330 if (ring->curr >= AG71XX_TX_RING_SIZE){
331 ring->curr = 0;
332 }
333
334 /* enable TX engine */
335 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
336
337 for (i = 0; i < MAX_WAIT; i++)
338 {
339 if (ag71xx_desc_empty(desc))
340 break;
341 udelay(10);
342 }
343 if (i == MAX_WAIT) {
344 printf("%s: tx timed out!\n", ag->dev->name);
345 return -1;
346 }
347
348 /* disable TX engine */
349 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
350 desc->data = 0;
351 desc->ctrl = DESC_EMPTY;
352
353 return 0;
354 }
355
356 static int ag71xx_recv(struct eth_device *dev)
357 {
358 struct ag71xx *ag = (struct ag71xx *) dev->priv;
359 struct ag71xx_ring *ring = &ag->rx_ring;
360
361 for (;;) {
362 unsigned int i = ring->curr % AG71XX_RX_RING_SIZE;
363 struct ag71xx_desc *desc = ring->buf[i].desc;
364 int pktlen;
365
366 if (ag71xx_desc_empty(desc))
367 break;
368
369 DBG("%s: rx packets, curr=%u\n", dev->name, ring->curr);
370
371 pktlen = ag71xx_desc_pktlen(desc);
372 pktlen -= ETH_FCS_LEN;
373
374
375 NetReceive(NetRxPackets[i] , pktlen);
376 flush_cache( (u32) NetRxPackets[i], PKTSIZE_ALIGN);
377
378 ring->buf[i].desc->ctrl = DESC_EMPTY;
379 ring->curr++;
380 if (ring->curr >= AG71XX_RX_RING_SIZE){
381 ring->curr = 0;
382 }
383
384 }
385
386 if ((ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE) == 0) {
387 /* start RX engine */
388 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
389 }
390
391 return 0;
392 }
393
394 #ifdef AG71XX_DEBUG
395 static char *ag71xx_speed_str(struct ag71xx *ag)
396 {
397 switch (ag->speed) {
398 case SPEED_1000:
399 return "1000";
400 case SPEED_100:
401 return "100";
402 case SPEED_10:
403 return "10";
404 }
405
406 return "?";
407 }
408 #endif
409
410 void ag71xx_link_adjust(struct ag71xx *ag)
411 {
412 u32 cfg2;
413 u32 ifctl;
414 u32 fifo5;
415 u32 mii_speed;
416
417 if (!ag->link) {
418 DBG("%s: link down\n", ag->dev->name);
419 return;
420 }
421
422 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
423 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
424 cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0;
425
426 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
427 ifctl &= ~(MAC_IFCTL_SPEED);
428
429 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
430 fifo5 &= ~FIFO_CFG5_BM;
431
432 switch (ag->speed) {
433 case SPEED_1000:
434 mii_speed = MII_CTRL_SPEED_1000;
435 cfg2 |= MAC_CFG2_IF_1000;
436 fifo5 |= FIFO_CFG5_BM;
437 break;
438 case SPEED_100:
439 mii_speed = MII_CTRL_SPEED_100;
440 cfg2 |= MAC_CFG2_IF_10_100;
441 ifctl |= MAC_IFCTL_SPEED;
442 break;
443 case SPEED_10:
444 mii_speed = MII_CTRL_SPEED_10;
445 cfg2 |= MAC_CFG2_IF_10_100;
446 break;
447 default:
448 BUG();
449 return;
450 }
451
452 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x00780fff);
453
454 if (ag->macNum == 0)
455 ar91xx_set_pll_ge0(ag->speed);
456 else
457 ar91xx_set_pll_ge1(ag->speed);
458
459 ag71xx_mii_ctrl_set_speed(ag, mii_speed);
460
461 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
462 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
463 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
464
465 DBG("%s: link up (%sMbps/%s duplex)\n",
466 ag->dev->name,
467 ag71xx_speed_str(ag),
468 (1 == ag->duplex) ? "Full" : "Half");
469
470 DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
471 ag->dev->name,
472 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
473 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
474 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
475
476 DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
477 ag->dev->name,
478 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
479 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
480 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
481
482 DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x, mii_ctrl=%#x\n",
483 ag->dev->name,
484 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
485 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
486 ag71xx_mii_ctrl_rr(ag));
487 }
488
489 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
490 static int ag71xx_getMiiSpeed(struct ag71xx *ag)
491 {
492 uint16_t phyreg, cap;
493
494 if (miiphy_read(ag->phyname, ag->phyid,
495 PHY_BMSR, &phyreg)) {
496 puts("PHY_BMSR read failed, assuming no link\n");
497 return -1;
498 }
499
500 if ((phyreg & PHY_BMSR_LS) == 0) {
501 return -1;
502 }
503
504 if (miiphy_read(ag->phyname, ag->phyid,
505 PHY_1000BTSR, &phyreg))
506 return -1;
507
508 if (phyreg & PHY_1000BTSR_1000FD) {
509 ag->speed = SPEED_1000;
510 ag->duplex = 1;
511 } else if (phyreg & PHY_1000BTSR_1000HD) {
512 ag->speed = SPEED_1000;
513 ag->duplex = 0;
514 } else {
515 if (miiphy_read(ag->phyname, ag->phyid,
516 PHY_ANAR, &cap))
517 return -1;
518
519 if (miiphy_read(ag->phyname, ag->phyid,
520 PHY_ANLPAR, &phyreg))
521 return -1;
522
523 cap &= phyreg;
524 if (cap & PHY_ANLPAR_TXFD) {
525 ag->speed = SPEED_100;
526 ag->duplex = 1;
527 } else if (cap & PHY_ANLPAR_TX) {
528 ag->speed = SPEED_100;
529 ag->duplex = 0;
530 } else if (cap & PHY_ANLPAR_10FD) {
531 ag->speed = SPEED_10;
532 ag->duplex = 1;
533 } else {
534 ag->speed = SPEED_10;
535 ag->duplex = 0;
536 }
537 }
538
539 ag->link = 1;
540
541 return 0;
542 }
543 #endif
544
545 static int ag71xx_hw_start(struct eth_device *dev, bd_t * bd)
546 {
547 struct ag71xx *ag = (struct ag71xx *) dev->priv;
548
549 ag71xx_dma_reset(ag);
550
551 ag71xx_ring_rx_clean(ag);
552 ag71xx_ring_tx_init(ag);
553
554 ag71xx_wr(ag, AG71XX_REG_TX_DESC,
555 (u32) virt_to_phys(ag->tx_ring.descs_dma));
556 ag71xx_wr(ag, AG71XX_REG_RX_DESC,
557 (u32) virt_to_phys(ag->rx_ring.descs_dma));
558
559 ag71xx_hw_set_macaddr(ag, ag->dev->enetaddr);
560
561 if (ag->phyfixed) {
562 ag->link = 1;
563 ag->duplex = 1;
564 ag->speed = SPEED_1000;
565 } else {
566
567 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII))
568 if (ag71xx_getMiiSpeed(ag))
569 return -1;
570 #else
571 /* only fixed, without mii */
572 return -1;
573 #endif
574
575 }
576 ag71xx_link_adjust(ag);
577
578 DBG("%s: txdesc reg: %#08x rxdesc reg: %#08x\n",
579 ag->dev->name,
580 ag71xx_rr(ag, AG71XX_REG_TX_DESC),
581 ag71xx_rr(ag, AG71XX_REG_RX_DESC));
582
583 /* start RX engine */
584 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
585
586 return 0;
587 }
588
589 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
590
591 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
592 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
593 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
594 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
595 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
596 FIFO_CFG4_VT)
597
598 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
599 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
600 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
601 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
602 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
603 FIFO_CFG5_17 | FIFO_CFG5_SF)
604
605 static int ag71xx_hw_init(struct ag71xx *ag)
606 {
607 int ret = 0;
608 uint32_t reg;
609 uint32_t mask, mii_type;
610
611 if (ag->macNum == 0) {
612 mask = (RESET_MODULE_GE0_MAC | RESET_MODULE_GE0_PHY);
613 mii_type = 0x13;
614 } else {
615 mask = (RESET_MODULE_GE1_MAC | RESET_MODULE_GE1_PHY);
616 mii_type = 0x11;
617 }
618
619 // mac soft reset
620 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
621 udelay(20);
622
623 // device stop
624 reg = ar71xx_reset_rr(AR91XX_RESET_REG_RESET_MODULE);
625 ar71xx_reset_wr(AR91XX_RESET_REG_RESET_MODULE, reg | mask);
626 udelay(100 * 1000);
627
628 // device start
629 reg = ar71xx_reset_rr(AR91XX_RESET_REG_RESET_MODULE);
630 ar71xx_reset_wr(AR91XX_RESET_REG_RESET_MODULE, reg & ~mask);
631 udelay(100 * 1000);
632
633 /* setup MAC configuration registers */
634 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, (MAC_CFG1_RXE | MAC_CFG1_TXE));
635
636 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
637 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
638
639 /* setup FIFO configuration register 0 */
640 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
641
642 /* setup MII interface type */
643 ag71xx_mii_ctrl_set_if(ag, ag->mii_if);
644
645 /* setup mdio clock divisor */
646 ag71xx_wr(ag, AG71XX_REG_MII_CFG, MII_CFG_CLK_DIV_20);
647
648 /* setup FIFO configuration registers */
649 ag71xx_sb(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
650 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
651 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
652 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
653
654 ag71xx_dma_reset(ag);
655
656 ret = ag71xx_rings_init(ag);
657 if (ret)
658 return -1;
659
660 ag71xx_wr(ag, AG71XX_REG_TX_DESC,
661 (u32) virt_to_phys(ag->tx_ring.descs_dma));
662 ag71xx_wr(ag, AG71XX_REG_RX_DESC,
663 (u32) virt_to_phys(ag->rx_ring.descs_dma));
664
665 ag71xx_hw_set_macaddr(ag, ag->dev->enetaddr);
666
667 return 0;
668 }
669
670 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
671 #define AG71XX_MDIO_RETRY 1000
672 #define AG71XX_MDIO_DELAY 5
673
674 static inline struct ag71xx *ag71xx_name2mac(char *devname)
675 {
676 if (strcmp(devname, agtable[0].dev->name) == 0)
677 return &agtable[0];
678 else if (strcmp(devname, agtable[1].dev->name) == 0)
679 return &agtable[1];
680 else
681 return NULL;
682 }
683
684 static inline void ag71xx_mdio_wr(struct ag71xx *ag, unsigned reg,
685 u32 value)
686 {
687 uint32_t r;
688
689 r = ag->mac_base + reg;
690 writel(value, r);
691
692 /* flush write */
693 (void) readl(r);
694 }
695
696 static inline u32 ag71xx_mdio_rr(struct ag71xx *ag, unsigned reg)
697 {
698 return readl(ag->mac_base + reg);
699 }
700
701 static int ag71xx_mdio_read(char *devname, unsigned char addr,
702 unsigned char reg, unsigned short *val)
703 {
704 struct ag71xx *ag = ag71xx_name2mac(devname);
705 uint16_t regData;
706 int i;
707
708 ag71xx_mdio_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_WRITE);
709 ag71xx_mdio_wr(ag, AG71XX_REG_MII_ADDR,
710 ((addr & 0xff) << MII_ADDR_SHIFT) | (reg & 0xff));
711 ag71xx_mdio_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
712
713 i = AG71XX_MDIO_RETRY;
714 while (ag71xx_mdio_rr(ag, AG71XX_REG_MII_IND) & MII_IND_BUSY) {
715 if (i-- == 0) {
716 printf("%s: mii_read timed out\n",
717 ag->dev->name);
718 return -1;
719 }
720 udelay(AG71XX_MDIO_DELAY);
721 }
722
723 regData = (uint16_t) ag71xx_mdio_rr(ag, AG71XX_REG_MII_STATUS) & 0xffff;
724 ag71xx_mdio_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_WRITE);
725
726 DBG("mii_read: addr=%04x, reg=%04x, value=%04x\n", addr, reg, regData);
727
728 if (val)
729 *val = regData;
730
731 return 0;
732 }
733
734 static int ag71xx_mdio_write(char *devname, unsigned char addr,
735 unsigned char reg, unsigned short val)
736 {
737 struct ag71xx *ag = ag71xx_name2mac(devname);
738 int i;
739
740 if (ag == NULL)
741 return 1;
742
743 DBG("mii_write: addr=%04x, reg=%04x, value=%04x\n", addr, reg, val);
744
745 ag71xx_mdio_wr(ag, AG71XX_REG_MII_ADDR,
746 ((addr & 0xff) << MII_ADDR_SHIFT) | (reg & 0xff));
747 ag71xx_mdio_wr(ag, AG71XX_REG_MII_CTRL, val);
748
749 i = AG71XX_MDIO_RETRY;
750 while (ag71xx_mdio_rr(ag, AG71XX_REG_MII_IND) & MII_IND_BUSY) {
751 if (i-- == 0) {
752 printf("%s: mii_write timed out\n",
753 ag->dev->name);
754 break;
755 }
756 udelay(AG71XX_MDIO_DELAY);
757 }
758
759 return 0;
760 }
761 #endif
762
763 int ag71xx_register(bd_t * bis, char *phyname[], uint16_t phyid[], uint16_t phyfixed[])
764 {
765 int i, num = 0;
766 u8 used_ports[MAX_AG71XX_DEVS] = CONFIG_AG71XX_PORTS;
767
768 for (i = 0; i < MAX_AG71XX_DEVS; i++) {
769 /*skip if port is configured not to use */
770 if (used_ports[i] == 0)
771 continue;
772
773 agtable[i].dev = malloc(sizeof(struct eth_device));
774 if (agtable[i].dev == NULL) {
775 puts("malloc failed\n");
776 return 0;
777 }
778 memset(agtable[i].dev, 0, sizeof(struct eth_device));
779 sprintf(agtable[i].dev->name, "eth%d", i);
780
781 agtable[i].dev->iobase = 0;
782 agtable[i].dev->init = ag71xx_hw_start;
783 agtable[i].dev->halt = ag71xx_halt;
784 agtable[i].dev->send = ag71xx_send;
785 agtable[i].dev->recv = ag71xx_recv;
786 agtable[i].dev->priv = (void *) (&agtable[i]);
787 agtable[i].macNum = i;
788 eth_register(agtable[i].dev);
789 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
790
791 if ((phyname == NULL) || (phyid == NULL) || (phyfixed == NULL))
792 return -1;
793
794 agtable[i].phyname = strdup(phyname[i]);
795 agtable[i].phyid = phyid[i];
796 agtable[i].phyfixed = phyfixed[i];
797
798 miiphy_register(agtable[i].dev->name, ag71xx_mdio_read,
799 ag71xx_mdio_write);
800 #endif
801
802 if (ag71xx_hw_init(&agtable[i]))
803 continue;
804
805 num++;
806 }
807
808 return num;
809 }
This page took 0.097247 seconds and 5 git commands to generate.