ar71xx: add AR933x specific early_printk support
[openwrt.git] / target / linux / cns3xxx / patches / 051-cns3xxx_gigabit.patch
1 --- a/drivers/net/Kconfig
2 +++ b/drivers/net/Kconfig
3 @@ -2071,6 +2071,14 @@ config ACENIC_OMIT_TIGON_I
4
5 The safe and default value for this is N.
6
7 +config CNS3XXX_ETH
8 + tristate "Cavium CNS3xxx Ethernet support"
9 + depends on ARCH_CNS3XXX
10 + select PHYLIB
11 + help
12 + Say Y here if you want to use built-in Ethernet ports
13 + on CNS3XXX processor.
14 +
15 config DL2K
16 tristate "DL2000/TC902x-based Gigabit Ethernet support"
17 depends on PCI
18 --- a/drivers/net/Makefile
19 +++ b/drivers/net/Makefile
20 @@ -240,6 +240,7 @@ obj-$(CONFIG_MAC89x0) += mac89x0.o
21 obj-$(CONFIG_TUN) += tun.o
22 obj-$(CONFIG_VETH) += veth.o
23 obj-$(CONFIG_NET_NETX) += netx-eth.o
24 +obj-$(CONFIG_CNS3XXX_ETH) += cns3xxx_eth.o
25 obj-$(CONFIG_DL2K) += dl2k.o
26 obj-$(CONFIG_R8169) += r8169.o
27 obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
28 --- /dev/null
29 +++ b/drivers/net/cns3xxx_eth.c
30 @@ -0,0 +1,1233 @@
31 +/*
32 + * Cavium CNS3xxx Gigabit driver for Linux
33 + *
34 + * Copyright 2011 Gateworks Corporation
35 + * Chris Lang <clang@gateworks.com>
36 + *
37 + * This program is free software; you can redistribute it and/or modify it
38 + * under the terms of version 2 of the GNU General Public License
39 + * as published by the Free Software Foundation.
40 + *
41 + */
42 +
43 +#include <linux/delay.h>
44 +#include <linux/dma-mapping.h>
45 +#include <linux/dmapool.h>
46 +#include <linux/etherdevice.h>
47 +#include <linux/io.h>
48 +#include <linux/kernel.h>
49 +#include <linux/phy.h>
50 +#include <linux/platform_device.h>
51 +#include <linux/skbuff.h>
52 +#include <mach/hardware.h>
53 +
54 +#define DRV_NAME "cns3xxx_eth"
55 +
56 +#define RX_DESCS 512
57 +#define TX_DESCS 512
58 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
59 +
60 +#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
61 +#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
62 +#define REGS_SIZE 0x150
63 +#define MAX_MRU 9500
64 +
65 +#define NAPI_WEIGHT 64
66 +
67 +/* Port Config Defines */
68 +#define PORT_DISABLE (1 << 18)
69 +
70 +/* DMA AUTO Poll Defines */
71 +#define TS_POLL_EN (1 << 5)
72 +#define TS_SUSPEND (1 << 4)
73 +#define FS_POLL_EN (1 << 1)
74 +#define FS_SUSPEND (1 << 0)
75 +
76 +struct tx_desc
77 +{
78 + u32 sdp; // segment data pointer
79 +
80 + union {
81 + struct {
82 + u32 sdl:16; // segment data length
83 + u32 tco:1;
84 + u32 uco:1;
85 + u32 ico:1;
86 + u32 rsv_1:3; // reserve
87 + u32 pri:3;
88 + u32 fp:1; // force priority
89 + u32 fr:1;
90 + u32 interrupt:1;
91 + u32 lsd:1;
92 + u32 fsd:1;
93 + u32 eor:1;
94 + u32 cown:1;
95 + };
96 + u32 config0;
97 + };
98 +
99 + union {
100 + struct {
101 + u32 ctv:1;
102 + u32 stv:1;
103 + u32 sid:4;
104 + u32 inss:1;
105 + u32 dels:1;
106 + u32 rsv_2:9;
107 + u32 pmap:5;
108 + u32 mark:3;
109 + u32 ewan:1;
110 + u32 fewan:1;
111 + u32 rsv_3:5;
112 + };
113 + u32 config1;
114 + };
115 +
116 + union {
117 + struct {
118 + u32 c_vid:12;
119 + u32 c_cfs:1;
120 + u32 c_pri:3;
121 + u32 s_vid:12;
122 + u32 s_dei:1;
123 + u32 s_pri:3;
124 + };
125 + u32 config2;
126 + };
127 +
128 + u8 alignment[16]; // for alignment 32 byte
129 +};
130 +
131 +struct rx_desc
132 +{
133 + u32 sdp; // segment data pointer
134 +
135 + union {
136 + struct {
137 + u32 sdl:16; // segment data length
138 + u32 l4f:1;
139 + u32 ipf:1;
140 + u32 prot:4;
141 + u32 hr:6;
142 + u32 lsd:1;
143 + u32 fsd:1;
144 + u32 eor:1;
145 + u32 cown:1;
146 + };
147 + u32 config0;
148 + };
149 +
150 + union {
151 + struct {
152 + u32 ctv:1;
153 + u32 stv:1;
154 + u32 unv:1;
155 + u32 iwan:1;
156 + u32 exdv:1;
157 + u32 e_wan:1;
158 + u32 rsv_1:2;
159 + u32 sp:3;
160 + u32 crc_err:1;
161 + u32 un_eth:1;
162 + u32 tc:2;
163 + u32 rsv_2:1;
164 + u32 ip_offset:5;
165 + u32 rsv_3:11;
166 + };
167 + u32 config1;
168 + };
169 +
170 + union {
171 + struct {
172 + u32 c_vid:12;
173 + u32 c_cfs:1;
174 + u32 c_pri:3;
175 + u32 s_vid:12;
176 + u32 s_dei:1;
177 + u32 s_pri:3;
178 + };
179 + u32 config2;
180 + };
181 +
182 + u8 alignment[16]; // for alignment 32 byte
183 +};
184 +
185 +
186 +struct switch_regs {
187 + u32 phy_control; /* 000 */
188 + u32 phy_auto_addr;
189 + u32 mac_glob_cfg;
190 + u32 mac_cfg[4];
191 + u32 mac_pri_ctrl[5], __res;
192 + u32 etype[2];
193 + u32 udp_range[4];
194 + u32 prio_etype_udp;
195 + u32 prio_ipdscp[8];
196 + u32 tc_ctrl;
197 + u32 rate_ctrl;
198 + u32 fc_glob_thrs;
199 + u32 fc_port_thrs;
200 + u32 mc_fc_glob_thrs;
201 + u32 dc_glob_thrs;
202 + u32 arl_vlan_cmd;
203 + u32 arl_ctrl[3];
204 + u32 vlan_cfg;
205 + u32 pvid[2];
206 + u32 vlan_ctrl[3];
207 + u32 session_id[8];
208 + u32 intr_stat;
209 + u32 intr_mask;
210 + u32 sram_test;
211 + u32 mem_queue;
212 + u32 farl_ctrl;
213 + u32 fc_input_thrs, __res1[2];
214 + u32 clk_skew_ctrl;
215 + u32 mac_glob_cfg_ext, __res2[2];
216 + u32 dma_ring_ctrl;
217 + u32 dma_auto_poll_cfg;
218 + u32 delay_intr_cfg, __res3;
219 + u32 ts_dma_ctrl0;
220 + u32 ts_desc_ptr0;
221 + u32 ts_desc_base_addr0, __res4;
222 + u32 fs_dma_ctrl0;
223 + u32 fs_desc_ptr0;
224 + u32 fs_desc_base_addr0, __res5;
225 + u32 ts_dma_ctrl1;
226 + u32 ts_desc_ptr1;
227 + u32 ts_desc_base_addr1, __res6;
228 + u32 fs_dma_ctrl1;
229 + u32 fs_desc_ptr1;
230 + u32 fs_desc_base_addr1;
231 +};
232 +
233 +struct _tx_ring {
234 + struct tx_desc *desc;
235 + dma_addr_t phys_addr;
236 + struct tx_desc *cur_addr;
237 + struct sk_buff *buff_tab[TX_DESCS];
238 + u32 free_index;
239 + u32 count_index;
240 + u32 cur_index;
241 + int num_used;
242 + int num_count;
243 +};
244 +
245 +struct _rx_ring {
246 + struct rx_desc *desc;
247 + dma_addr_t phys_addr;
248 + struct rx_desc *cur_addr;
249 + struct sk_buff *buff_tab[RX_DESCS];
250 + u32 cur_index;
251 + u32 alloc_index;
252 + int alloc_count;
253 +};
254 +
255 +struct sw {
256 + struct resource *mem_res;
257 + struct switch_regs __iomem *regs;
258 + struct napi_struct napi;
259 + struct cns3xxx_plat_info *plat;
260 + struct _tx_ring *tx_ring;
261 + struct _rx_ring *rx_ring;
262 + u32 mtu;
263 +};
264 +
265 +struct port {
266 + struct net_device *netdev;
267 + struct phy_device *phydev;
268 + struct sw *sw;
269 + int id; /* logical port ID */
270 + int speed, duplex;
271 + u32 mtu;
272 +};
273 +
274 +static spinlock_t mdio_lock;
275 +static spinlock_t tx_lock;
276 +static spinlock_t stat_lock;
277 +static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
278 +struct mii_bus *mdio_bus;
279 +static int ports_open;
280 +static struct port *switch_port_tab[3];
281 +static struct dma_pool *rx_dma_pool;
282 +static struct dma_pool *tx_dma_pool;
283 +struct net_device *napi_dev;
284 +
285 +static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
286 + int write, u16 cmd)
287 +{
288 + int cycles = 0;
289 + u32 temp = 0;
290 +
291 + temp = __raw_readl(&mdio_regs->phy_control);
292 + temp |= (1 << 15); /* Clear Command Complete bit */
293 + __raw_writel(temp, &mdio_regs->phy_control);
294 + udelay(10);
295 +
296 + if (write) {
297 + temp = (cmd << 16);
298 + temp |= (1 << 13); /* Write Command */
299 + } else {
300 + temp = (1 << 14); /* Read Command */
301 + }
302 + temp |= ((location & 0x1f) << 8);
303 + temp |= (phy_id & 0x1f);
304 +
305 + __raw_writel(temp, &mdio_regs->phy_control);
306 +
307 + while (((__raw_readl(&mdio_regs->phy_control) & 0x8000) == 0)
308 + && cycles < 5000) {
309 + udelay(1);
310 + cycles++;
311 + }
312 +
313 + if (cycles == 5000) {
314 + printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
315 + phy_id);
316 + return -1;
317 + }
318 +
319 + temp = __raw_readl(&mdio_regs->phy_control);
320 + temp |= (1 << 15); /* Clear Command Complete bit */
321 + __raw_writel(temp, &mdio_regs->phy_control);
322 +
323 + if (write)
324 + return 0;
325 +
326 + return ((temp >> 16) & 0xFFFF);
327 +}
328 +
329 +static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
330 +{
331 + unsigned long flags;
332 + int ret;
333 +
334 + spin_lock_irqsave(&mdio_lock, flags);
335 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
336 + spin_unlock_irqrestore(&mdio_lock, flags);
337 + return ret;
338 +}
339 +
340 +static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
341 + u16 val)
342 +{
343 + unsigned long flags;
344 + int ret;
345 +
346 + spin_lock_irqsave(&mdio_lock, flags);
347 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
348 + spin_unlock_irqrestore(&mdio_lock, flags);
349 + return ret;
350 +}
351 +
352 +static int cns3xxx_mdio_register(void)
353 +{
354 + int err;
355 +
356 + if (!(mdio_bus = mdiobus_alloc()))
357 + return -ENOMEM;
358 +
359 + mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
360 +
361 + spin_lock_init(&mdio_lock);
362 + mdio_bus->name = "CNS3xxx MII Bus";
363 + mdio_bus->read = &cns3xxx_mdio_read;
364 + mdio_bus->write = &cns3xxx_mdio_write;
365 + strcpy(mdio_bus->id, "0");
366 +
367 + if ((err = mdiobus_register(mdio_bus)))
368 + mdiobus_free(mdio_bus);
369 + return err;
370 +}
371 +
372 +static void cns3xxx_mdio_remove(void)
373 +{
374 + mdiobus_unregister(mdio_bus);
375 + mdiobus_free(mdio_bus);
376 +}
377 +
378 +static void cns3xxx_adjust_link(struct net_device *dev)
379 +{
380 + struct port *port = netdev_priv(dev);
381 + struct phy_device *phydev = port->phydev;
382 +
383 + if (!phydev->link) {
384 + if (port->speed) {
385 + port->speed = 0;
386 + printk(KERN_INFO "%s: link down\n", dev->name);
387 + }
388 + return;
389 + }
390 +
391 + if (port->speed == phydev->speed && port->duplex == phydev->duplex)
392 + return;
393 +
394 + port->speed = phydev->speed;
395 + port->duplex = phydev->duplex;
396 +
397 + printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
398 + dev->name, port->speed, port->duplex ? "full" : "half");
399 +}
400 +
401 +irqreturn_t eth_rx_irq(int irq, void *pdev)
402 +{
403 + struct net_device *dev = pdev;
404 + struct sw *sw = netdev_priv(dev);
405 + if (likely(napi_schedule_prep(&sw->napi))) {
406 + disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
407 + __napi_schedule(&sw->napi);
408 + }
409 + return (IRQ_HANDLED);
410 +}
411 +
412 +
413 +static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
414 +{
415 + struct _rx_ring *rx_ring = sw->rx_ring;
416 + unsigned int i = rx_ring->alloc_index;
417 + struct rx_desc *desc;
418 + struct sk_buff *skb;
419 + u32 mtu = sw->mtu;
420 +
421 + rx_ring->alloc_count += received;
422 +
423 + for (received = rx_ring->alloc_count; received > 0; received--) {
424 + desc = &(rx_ring)->desc[i];
425 +
426 + if ((skb = dev_alloc_skb(mtu))) {
427 + if (SKB_DMA_REALIGN)
428 + skb_reserve(skb, SKB_DMA_REALIGN);
429 + skb_reserve(skb, NET_IP_ALIGN);
430 + desc->sdp = dma_map_single(NULL, skb->data,
431 + mtu, DMA_FROM_DEVICE);
432 + if (dma_mapping_error(NULL, desc->sdp)) {
433 + printk("failed to map\n");
434 + dev_kfree_skb(skb);
435 + /* Failed to map, better luck next time */
436 + goto out;;
437 + }
438 + } else {
439 + printk("failed to alloc\n");
440 + /* Failed to allocate skb, try again next time */
441 + goto out;
442 + }
443 +
444 + /* put the new buffer on RX-free queue */
445 + rx_ring->buff_tab[i] = skb;
446 +
447 + if (++i == RX_DESCS) {
448 + i = 0;
449 + desc->config0 = 0x70000000 | mtu;
450 + } else {
451 + desc->config0 = 0x30000000 | mtu;
452 + }
453 + }
454 +out:
455 + rx_ring->alloc_count = received;
456 + rx_ring->alloc_index = i;
457 +}
458 +
459 +static void update_tx_stats(struct sw *sw)
460 +{
461 + struct _tx_ring *tx_ring = sw->tx_ring;
462 + struct tx_desc *desc;
463 + struct tx_desc *next_desc;
464 + struct sk_buff *skb;
465 + int i;
466 + int index;
467 + int num_count;
468 +
469 + spin_lock_bh(&stat_lock);
470 +
471 + num_count = tx_ring->num_count;
472 +
473 + if (!num_count) {
474 + spin_unlock_bh(&stat_lock);
475 + return;
476 + }
477 +
478 + index = tx_ring->count_index;
479 + desc = &(tx_ring)->desc[index];
480 + for (i = 0; i < num_count; i++) {
481 + skb = tx_ring->buff_tab[index];
482 + if (desc->cown) {
483 + tx_ring->buff_tab[index] = 0;
484 + if (unlikely(++index == TX_DESCS)) index = 0;
485 + next_desc = &(tx_ring)->desc[index];
486 + prefetch(next_desc + 4);
487 + if (likely(skb)) {
488 + skb->dev->stats.tx_packets++;
489 + skb->dev->stats.tx_bytes += skb->len;
490 + dev_kfree_skb_any(skb);
491 + }
492 + desc = next_desc;
493 + } else {
494 + break;
495 + }
496 + }
497 + tx_ring->num_count -= i;
498 + tx_ring->count_index = index;
499 +
500 + spin_unlock_bh(&stat_lock);
501 +}
502 +
503 +static void clear_tx_desc(struct sw *sw)
504 +{
505 + struct _tx_ring *tx_ring = sw->tx_ring;
506 + struct tx_desc *desc;
507 + struct tx_desc *next_desc;
508 + int i;
509 + int index;
510 + int num_used = tx_ring->num_used - tx_ring->num_count;
511 +
512 + if (num_used < (TX_DESCS >> 1))
513 + return;
514 +
515 + index = tx_ring->free_index;
516 + desc = &(tx_ring)->desc[index];
517 + for (i = 0; i < num_used; i++) {
518 + if (desc->cown) {
519 + if (unlikely(++index == TX_DESCS)) index = 0;
520 + next_desc = &(tx_ring)->desc[index];
521 + prefetch(next_desc);
522 + prefetch(next_desc + 4);
523 + if (likely(desc->sdp))
524 + dma_unmap_single(NULL, desc->sdp,
525 + desc->sdl, DMA_TO_DEVICE);
526 + desc = next_desc;
527 + } else {
528 + break;
529 + }
530 + }
531 + tx_ring->free_index = index;
532 + tx_ring->num_used -= i;
533 +}
534 +
535 +static int eth_poll(struct napi_struct *napi, int budget)
536 +{
537 + struct sw *sw = container_of(napi, struct sw, napi);
538 + struct net_device *dev;
539 + struct _rx_ring *rx_ring = sw->rx_ring;
540 + int received = 0;
541 + unsigned int length;
542 + unsigned int i = rx_ring->cur_index;
543 + struct rx_desc *next_desc;
544 + struct rx_desc *desc = &(rx_ring)->desc[i];
545 + int port_id;
546 +
547 + while (desc->cown) {
548 + struct sk_buff *skb;
549 +
550 + if (received >= budget)
551 + break;
552 +
553 + skb = rx_ring->buff_tab[i];
554 +
555 + if (++i == RX_DESCS) i = 0;
556 + next_desc = &(rx_ring)->desc[i];
557 + prefetch(next_desc);
558 + prefetch(next_desc + 4);
559 + prefetch(next_desc + 8);
560 +
561 + port_id = desc->sp;
562 + if (port_id == 4)
563 + dev = switch_port_tab[2]->netdev;
564 + else
565 + dev = switch_port_tab[port_id]->netdev;
566 +
567 + length = desc->sdl;
568 + /* process received frame */
569 + dma_unmap_single(&dev->dev, desc->sdp,
570 + length, DMA_FROM_DEVICE);
571 +
572 + skb_put(skb, length);
573 +
574 + skb->dev = dev;
575 + skb->protocol = eth_type_trans(skb, dev);
576 +
577 + dev->stats.rx_packets++;
578 + dev->stats.rx_bytes += length;
579 +
580 + switch (desc->prot) {
581 + case 1:
582 + case 2:
583 + case 5:
584 + case 6:
585 + case 13:
586 + case 14:
587 + if (desc->l4f)
588 + skb->ip_summed = CHECKSUM_NONE;
589 + else
590 + skb->ip_summed = CHECKSUM_UNNECESSARY;
591 + break;
592 + default:
593 + skb->ip_summed = CHECKSUM_NONE;
594 + break;
595 + }
596 +
597 + napi_gro_receive(napi, skb);
598 +
599 + received++;
600 + desc = next_desc;
601 + }
602 +
603 + cns3xxx_alloc_rx_buf(sw, received);
604 + rx_ring->cur_index = i;
605 +
606 + if (received != budget) {
607 + napi_complete(napi);
608 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
609 + }
610 +
611 + return received;
612 +}
613 +
614 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
615 +{
616 + struct port *port = netdev_priv(dev);
617 + struct sw *sw = port->sw;
618 + struct _tx_ring *tx_ring = sw->tx_ring;
619 + struct tx_desc *tx_desc;
620 + int index;
621 + int len = skb->len;
622 + char pmap = (1 << port->id);
623 +
624 + if (pmap == 8)
625 + pmap = (1 << 4);
626 +
627 + if (unlikely(len > sw->mtu)) {
628 + dev_kfree_skb(skb);
629 + dev->stats.tx_errors++;
630 + return NETDEV_TX_OK;
631 + }
632 +
633 + update_tx_stats(sw);
634 +
635 + spin_lock_bh(&tx_lock);
636 +
637 + clear_tx_desc(sw);
638 +
639 + if (unlikely(tx_ring->num_used == TX_DESCS)) {
640 + spin_unlock_bh(&tx_lock);
641 + return NETDEV_TX_BUSY;
642 + }
643 +
644 + index = tx_ring->cur_index;
645 +
646 + if (unlikely(++tx_ring->cur_index == TX_DESCS))
647 + tx_ring->cur_index = 0;
648 +
649 + tx_ring->num_used++;
650 + tx_ring->num_count++;
651 +
652 + spin_unlock_bh(&tx_lock);
653 +
654 + tx_desc = &(tx_ring)->desc[index];
655 +
656 + tx_desc->sdp = dma_map_single(NULL, skb->data, len,
657 + DMA_TO_DEVICE);
658 +
659 + if (dma_mapping_error(NULL, tx_desc->sdp)) {
660 + dev_kfree_skb(skb);
661 + dev->stats.tx_errors++;
662 + return NETDEV_TX_OK;
663 + }
664 +
665 + tx_desc->pmap = pmap;
666 + tx_ring->buff_tab[index] = skb;
667 +
668 + if (index == TX_DESCS - 1) {
669 + tx_desc->config0 = 0x74070000 | len;
670 + } else {
671 + tx_desc->config0 = 0x34070000 | len;
672 + }
673 +
674 + return NETDEV_TX_OK;
675 +}
676 +
677 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
678 +{
679 + struct port *port = netdev_priv(dev);
680 +
681 + if (!netif_running(dev))
682 + return -EINVAL;
683 + return phy_mii_ioctl(port->phydev, req, cmd);
684 +}
685 +
686 +/* ethtool support */
687 +
688 +static void cns3xxx_get_drvinfo(struct net_device *dev,
689 + struct ethtool_drvinfo *info)
690 +{
691 + strcpy(info->driver, DRV_NAME);
692 + strcpy(info->bus_info, "internal");
693 +}
694 +
695 +static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
696 +{
697 + struct port *port = netdev_priv(dev);
698 + return phy_ethtool_gset(port->phydev, cmd);
699 +}
700 +
701 +static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
702 +{
703 + struct port *port = netdev_priv(dev);
704 + return phy_ethtool_sset(port->phydev, cmd);
705 +}
706 +
707 +static int cns3xxx_nway_reset(struct net_device *dev)
708 +{
709 + struct port *port = netdev_priv(dev);
710 + return phy_start_aneg(port->phydev);
711 +}
712 +
713 +static struct ethtool_ops cns3xxx_ethtool_ops = {
714 + .get_drvinfo = cns3xxx_get_drvinfo,
715 + .get_settings = cns3xxx_get_settings,
716 + .set_settings = cns3xxx_set_settings,
717 + .nway_reset = cns3xxx_nway_reset,
718 + .get_link = ethtool_op_get_link,
719 +};
720 +
721 +
722 +static int init_rings(struct sw *sw)
723 +{
724 + int i;
725 + struct _rx_ring *rx_ring = sw->rx_ring;
726 + struct _tx_ring *tx_ring = sw->tx_ring;
727 +
728 + __raw_writel(0x0, &sw->regs->fs_dma_ctrl0);
729 + __raw_writel(0x11, &sw->regs->dma_auto_poll_cfg);
730 + __raw_writel(0x000000f0, &sw->regs->dma_ring_ctrl);
731 + __raw_writel(0x800000f0, &sw->regs->dma_ring_ctrl);
732 +
733 + __raw_writel(0x000000f0, &sw->regs->dma_ring_ctrl);
734 +
735 + if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL, RX_POOL_ALLOC_SIZE, 32, 0)))
736 + return -ENOMEM;
737 +
738 + if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
739 + &rx_ring->phys_addr)))
740 + return -ENOMEM;
741 + memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
742 +
743 + /* Setup RX buffers */
744 + for (i = 0; i < RX_DESCS; i++) {
745 + struct rx_desc *desc = &(rx_ring)->desc[i];
746 + struct sk_buff *skb;
747 + if (!(skb = dev_alloc_skb(sw->mtu)))
748 + return -ENOMEM;
749 + if (SKB_DMA_REALIGN)
750 + skb_reserve(skb, SKB_DMA_REALIGN);
751 + skb_reserve(skb, NET_IP_ALIGN);
752 + desc->sdl = sw->mtu;
753 + if (i == (RX_DESCS - 1))
754 + desc->eor = 1;
755 + desc->fsd = 1;
756 + desc->lsd = 1;
757 +
758 + desc->sdp = dma_map_single(NULL, skb->data,
759 + sw->mtu, DMA_FROM_DEVICE);
760 + if (dma_mapping_error(NULL, desc->sdp)) {
761 + return -EIO;
762 + }
763 + rx_ring->buff_tab[i] = skb;
764 + desc->cown = 0;
765 + }
766 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
767 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
768 +
769 + if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL, TX_POOL_ALLOC_SIZE, 32, 0)))
770 + return -ENOMEM;
771 +
772 + if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
773 + &tx_ring->phys_addr)))
774 + return -ENOMEM;
775 + memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
776 +
777 + /* Setup TX buffers */
778 + for (i = 0; i < TX_DESCS; i++) {
779 + struct tx_desc *desc = &(tx_ring)->desc[i];
780 + tx_ring->buff_tab[i] = 0;
781 +
782 + if (i == (TX_DESCS - 1))
783 + desc->eor = 1;
784 + desc->cown = 1;
785 + }
786 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
787 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
788 +
789 + return 0;
790 +}
791 +
792 +static void destroy_rings(struct sw *sw)
793 +{
794 + int i;
795 + if (sw->rx_ring->desc) {
796 + for (i = 0; i < RX_DESCS; i++) {
797 + struct _rx_ring *rx_ring = sw->rx_ring;
798 + struct rx_desc *desc = &(rx_ring)->desc[i];
799 + struct sk_buff *skb = sw->rx_ring->buff_tab[i];
800 + if (skb) {
801 + dma_unmap_single(NULL,
802 + desc->sdp,
803 + sw->mtu, DMA_FROM_DEVICE);
804 + dev_kfree_skb(skb);
805 + }
806 + }
807 + dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
808 + dma_pool_destroy(rx_dma_pool);
809 + rx_dma_pool = 0;
810 + sw->rx_ring->desc = 0;
811 + }
812 + if (sw->tx_ring->desc) {
813 + for (i = 0; i < TX_DESCS; i++) {
814 + struct _tx_ring *tx_ring = sw->tx_ring;
815 + struct tx_desc *desc = &(tx_ring)->desc[i];
816 + struct sk_buff *skb = sw->tx_ring->buff_tab[i];
817 + if (skb) {
818 + dma_unmap_single(NULL, desc->sdp,
819 + skb->len, DMA_TO_DEVICE);
820 + dev_kfree_skb(skb);
821 + }
822 + }
823 + dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
824 + dma_pool_destroy(tx_dma_pool);
825 + tx_dma_pool = 0;
826 + sw->tx_ring->desc = 0;
827 + }
828 +}
829 +
830 +static int eth_open(struct net_device *dev)
831 +{
832 + struct port *port = netdev_priv(dev);
833 + struct sw *sw = port->sw;
834 + u32 temp;
835 +
836 + port->speed = 0; /* force "link up" message */
837 + phy_start(port->phydev);
838 +
839 + netif_start_queue(dev);
840 +
841 + if (!ports_open) {
842 + request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
843 + napi_enable(&sw->napi);
844 + netif_start_queue(napi_dev);
845 + //enable_irq(IRQ_CNS3XXX_SW_R0RXC);
846 +
847 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
848 + temp &= ~(PORT_DISABLE);
849 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
850 +
851 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
852 + temp &= ~(0x11);
853 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
854 +
855 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
856 + }
857 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
858 + temp &= ~(PORT_DISABLE);
859 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
860 +
861 + ports_open++;
862 + netif_carrier_on(dev);
863 +
864 + return 0;
865 +}
866 +
867 +static int eth_close(struct net_device *dev)
868 +{
869 + struct port *port = netdev_priv(dev);
870 + struct sw *sw = port->sw;
871 + u32 temp;
872 +
873 + ports_open--;
874 +
875 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
876 + temp |= (PORT_DISABLE);
877 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
878 +
879 + netif_stop_queue(dev);
880 +
881 + phy_stop(port->phydev);
882 +
883 + if (!ports_open) {
884 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
885 + free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
886 + napi_disable(&sw->napi);
887 + netif_stop_queue(napi_dev);
888 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
889 + temp |= (PORT_DISABLE);
890 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
891 +
892 + temp = 0x11;
893 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
894 + }
895 +
896 + netif_carrier_off(dev);
897 + return 0;
898 +}
899 +
900 +static void eth_rx_mode(struct net_device *dev)
901 +{
902 + struct port *port = netdev_priv(dev);
903 + struct sw *sw = port->sw;
904 + u32 temp;
905 +
906 + temp = __raw_readl(&sw->regs->mac_glob_cfg);
907 +
908 + if (dev->flags & IFF_PROMISC) {
909 + if (port->id == 3)
910 + temp |= ((1 << 2) << 29);
911 + else
912 + temp |= ((1 << port->id) << 29);
913 + } else {
914 + if (port->id == 3)
915 + temp &= ~((1 << 2) << 29);
916 + else
917 + temp &= ~((1 << port->id) << 29);
918 + }
919 + __raw_writel(temp, &sw->regs->mac_glob_cfg);
920 +}
921 +
922 +static int eth_set_mac(struct net_device *netdev, void *p)
923 +{
924 + struct port *port = netdev_priv(netdev);
925 + struct sw *sw = port->sw;
926 + struct sockaddr *addr = p;
927 + u32 cycles = 0;
928 +
929 + if (!is_valid_ether_addr(addr->sa_data))
930 + return -EADDRNOTAVAIL;
931 +
932 + /* Invalidate old ARL Entry */
933 + if (port->id == 3)
934 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
935 + else
936 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
937 + __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
938 + (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
939 + &sw->regs->arl_ctrl[1]);
940 +
941 + __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
942 + (1 << 1)),
943 + &sw->regs->arl_ctrl[2]);
944 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
945 +
946 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
947 + && cycles < 5000) {
948 + udelay(1);
949 + cycles++;
950 + }
951 +
952 + cycles = 0;
953 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
954 +
955 + if (port->id == 3)
956 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
957 + else
958 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
959 + __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
960 + (addr->sa_data[2] << 8) | (addr->sa_data[3])),
961 + &sw->regs->arl_ctrl[1]);
962 +
963 + __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
964 + (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
965 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
966 +
967 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
968 + && cycles < 5000) {
969 + udelay(1);
970 + cycles++;
971 + }
972 + return 0;
973 +}
974 +
975 +static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu)
976 +{
977 + struct port *port = netdev_priv(netdev);
978 + struct sw *sw = port->sw;
979 + u32 temp;
980 + int i;
981 + struct _rx_ring *rx_ring = sw->rx_ring;
982 + struct rx_desc *desc;
983 + struct sk_buff *skb;
984 +
985 + if (new_mtu > MAX_MRU)
986 + return -EINVAL;
987 +
988 + netdev->mtu = new_mtu;
989 +
990 + new_mtu += 36 + SKB_DMA_REALIGN;
991 + port->mtu = new_mtu;
992 +
993 + new_mtu = 0;
994 + for (i = 0; i < 3; i++) {
995 + if (switch_port_tab[i]) {
996 + if (switch_port_tab[i]->mtu > new_mtu)
997 + new_mtu = switch_port_tab[i]->mtu;
998 + }
999 + }
1000 +
1001 +
1002 + if (new_mtu == sw->mtu)
1003 + return 0;
1004 +
1005 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1006 +
1007 + sw->mtu = new_mtu;
1008 +
1009 + /* Disable DMA */
1010 + temp = 0x11;
1011 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1012 +
1013 + for (i = 0; i < RX_DESCS; i++) {
1014 + desc = &(rx_ring)->desc[i];
1015 + /* Check if we own it, if we do, it will get set correctly when it is re-used */
1016 + if (!desc->cown) {
1017 + skb = rx_ring->buff_tab[i];
1018 + dma_unmap_single(NULL, desc->sdp, desc->sdl, DMA_FROM_DEVICE);
1019 + dev_kfree_skb(skb);
1020 +
1021 + if ((skb = dev_alloc_skb(new_mtu))) {
1022 + if (SKB_DMA_REALIGN)
1023 + skb_reserve(skb, SKB_DMA_REALIGN);
1024 + skb_reserve(skb, NET_IP_ALIGN);
1025 + desc->sdp = dma_map_single(NULL, skb->data,
1026 + new_mtu, DMA_FROM_DEVICE);
1027 + if (dma_mapping_error(NULL, desc->sdp)) {
1028 + dev_kfree_skb(skb);
1029 + skb = NULL;
1030 + }
1031 + }
1032 +
1033 + /* put the new buffer on RX-free queue */
1034 + rx_ring->buff_tab[i] = skb;
1035 +
1036 + if (i == RX_DESCS - 1)
1037 + desc->config0 = 0x70000000 | new_mtu;
1038 + else
1039 + desc->config0 = 0x30000000 | new_mtu;
1040 + }
1041 + }
1042 +
1043 + /* Re-ENABLE DMA */
1044 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1045 + temp &= ~(0x11);
1046 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1047 +
1048 + __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg);
1049 +
1050 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
1051 +
1052 + return 0;
1053 +}
1054 +
1055 +static const struct net_device_ops cns3xxx_netdev_ops = {
1056 + .ndo_open = eth_open,
1057 + .ndo_stop = eth_close,
1058 + .ndo_start_xmit = eth_xmit,
1059 + .ndo_set_rx_mode = eth_rx_mode,
1060 + .ndo_do_ioctl = eth_ioctl,
1061 + .ndo_change_mtu = cns3xxx_change_mtu,
1062 + .ndo_set_mac_address = eth_set_mac,
1063 + .ndo_validate_addr = eth_validate_addr,
1064 +};
1065 +
1066 +static int __devinit eth_init_one(struct platform_device *pdev)
1067 +{
1068 + int i;
1069 + struct port *port;
1070 + struct sw *sw;
1071 + struct net_device *dev;
1072 + struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1073 + u32 regs_phys;
1074 + char phy_id[MII_BUS_ID_SIZE + 3];
1075 + int err;
1076 + u32 temp;
1077 +
1078 + spin_lock_init(&tx_lock);
1079 + spin_lock_init(&stat_lock);
1080 +
1081 + if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1082 + return -ENOMEM;
1083 + strcpy(napi_dev->name, "switch%d");
1084 +
1085 + SET_NETDEV_DEV(napi_dev, &pdev->dev);
1086 + sw = netdev_priv(napi_dev);
1087 + memset(sw, 0, sizeof(struct sw));
1088 + sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1089 + regs_phys = CNS3XXX_SWITCH_BASE;
1090 + sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1091 + if (!sw->mem_res) {
1092 + err = -EBUSY;
1093 + goto err_free;
1094 + }
1095 +
1096 + sw->mtu = 1536 + SKB_DMA_REALIGN;
1097 +
1098 + for (i = 0; i < 4; i++) {
1099 + temp = __raw_readl(&sw->regs->mac_cfg[i]);
1100 + temp |= (PORT_DISABLE) | 0x80000000;
1101 + __raw_writel(temp, &sw->regs->mac_cfg[i]);
1102 + }
1103 +
1104 + temp = PORT_DISABLE;
1105 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
1106 +
1107 + temp = __raw_readl(&sw->regs->vlan_cfg);
1108 + temp |= ((1 << 15) | (1 << 0));
1109 + __raw_writel(temp, &sw->regs->vlan_cfg);
1110 +
1111 + temp = 0x02300000;
1112 + __raw_writel(temp, &sw->regs->mac_glob_cfg);
1113 +
1114 + if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1115 + err = -ENOMEM;
1116 + goto err_free;
1117 + }
1118 + memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1119 +
1120 + if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1121 + err = -ENOMEM;
1122 + goto err_free_rx;
1123 + }
1124 + memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1125 +
1126 + if ((err = init_rings(sw)) != 0) {
1127 + destroy_rings(sw);
1128 + err = -ENOMEM;
1129 + goto err_free_rings;
1130 + }
1131 + platform_set_drvdata(pdev, napi_dev);
1132 +
1133 + netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1134 +
1135 + for (i = 0; i < 3; i++) {
1136 + if (!(plat->ports & (1 << i))) {
1137 + continue;
1138 + }
1139 +
1140 + if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1141 + goto free_ports;
1142 + }
1143 +
1144 + //SET_NETDEV_DEV(dev, &pdev->dev);
1145 + port = netdev_priv(dev);
1146 + port->netdev = dev;
1147 + if (i == 2)
1148 + port->id = 3;
1149 + else
1150 + port->id = i;
1151 + port->sw = sw;
1152 + port->mtu = sw->mtu;
1153 +
1154 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1155 + temp |= (PORT_DISABLE);
1156 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1157 +
1158 + dev->netdev_ops = &cns3xxx_netdev_ops;
1159 + dev->ethtool_ops = &cns3xxx_ethtool_ops;
1160 + dev->tx_queue_len = 1000;
1161 + dev->features = NETIF_F_HW_CSUM;
1162 +
1163 + dev->vlan_features = NETIF_F_HW_CSUM;
1164 +
1165 + switch_port_tab[i] = port;
1166 + memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1167 +
1168 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1169 + port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1170 + PHY_INTERFACE_MODE_RGMII);
1171 + if ((err = IS_ERR(port->phydev))) {
1172 + switch_port_tab[i] = 0;
1173 + free_netdev(dev);
1174 + goto free_ports;
1175 + }
1176 +
1177 + port->phydev->irq = PHY_POLL;
1178 +
1179 + if ((err = register_netdev(dev))) {
1180 + phy_disconnect(port->phydev);
1181 + switch_port_tab[i] = 0;
1182 + free_netdev(dev);
1183 + goto free_ports;
1184 + }
1185 +
1186 + printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1187 + netif_carrier_off(dev);
1188 + dev = 0;
1189 + }
1190 +
1191 + return 0;
1192 +
1193 +free_ports:
1194 + err = -ENOMEM;
1195 + for (--i; i >= 0; i--) {
1196 + if (switch_port_tab[i]) {
1197 + port = switch_port_tab[i];
1198 + dev = port->netdev;
1199 + unregister_netdev(dev);
1200 + phy_disconnect(port->phydev);
1201 + switch_port_tab[i] = 0;
1202 + free_netdev(dev);
1203 + }
1204 + }
1205 +err_free_rings:
1206 + kfree(sw->tx_ring);
1207 +err_free_rx:
1208 + kfree(sw->rx_ring);
1209 +err_free:
1210 + free_netdev(napi_dev);
1211 + return err;
1212 +}
1213 +
1214 +static int __devexit eth_remove_one(struct platform_device *pdev)
1215 +{
1216 + struct net_device *dev = platform_get_drvdata(pdev);
1217 + struct sw *sw = netdev_priv(dev);
1218 + int i;
1219 + destroy_rings(sw);
1220 +
1221 + for (i = 2; i >= 0; i--) {
1222 + if (switch_port_tab[i]) {
1223 + struct port *port = switch_port_tab[i];
1224 + struct net_device *dev = port->netdev;
1225 + unregister_netdev(dev);
1226 + phy_disconnect(port->phydev);
1227 + switch_port_tab[i] = 0;
1228 + free_netdev(dev);
1229 + }
1230 + }
1231 +
1232 + release_resource(sw->mem_res);
1233 + free_netdev(napi_dev);
1234 + return 0;
1235 +}
1236 +
1237 +static struct platform_driver cns3xxx_eth_driver = {
1238 + .driver.name = DRV_NAME,
1239 + .probe = eth_init_one,
1240 + .remove = eth_remove_one,
1241 +};
1242 +
1243 +static int __init eth_init_module(void)
1244 +{
1245 + int err;
1246 + if ((err = cns3xxx_mdio_register()))
1247 + return err;
1248 + return platform_driver_register(&cns3xxx_eth_driver);
1249 +}
1250 +
1251 +static void __exit eth_cleanup_module(void)
1252 +{
1253 + platform_driver_unregister(&cns3xxx_eth_driver);
1254 + cns3xxx_mdio_remove();
1255 +}
1256 +
1257 +module_init(eth_init_module);
1258 +module_exit(eth_cleanup_module);
1259 +
1260 +MODULE_AUTHOR("Chris Lang");
1261 +MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1262 +MODULE_LICENSE("GPL v2");
1263 +MODULE_ALIAS("platform:cns3xxx_eth");
1264 --- a/arch/arm/mach-cns3xxx/include/mach/hardware.h
1265 +++ b/arch/arm/mach-cns3xxx/include/mach/hardware.h
1266 @@ -19,4 +19,6 @@
1267 #define PCIBIOS_MIN_MEM 0x00000000
1268 #define pcibios_assign_all_busses() 1
1269
1270 +#include "platform.h"
1271 +
1272 #endif
1273 --- /dev/null
1274 +++ b/arch/arm/mach-cns3xxx/include/mach/platform.h
1275 @@ -0,0 +1,26 @@
1276 +/*
1277 + * arch/arm/mach-cns3xxx/include/mach/platform.h
1278 + *
1279 + * Copyright 2011 Gateworks Corporation
1280 + * Chris Lang <clang@gateworks.com
1281 + *
1282 + * This file is free software; you can redistribute it and/or modify
1283 + * it under the terms of the GNU General Public License, Version 2, as
1284 + * published by the Free Software Foundation.
1285 + *
1286 + */
1287 +
1288 +#ifndef __ASM_ARCH_PLATFORM_H
1289 +#define __ASM_ARCH_PLATFORM_H
1290 +
1291 +#ifndef __ASSEMBLY__
1292 +
1293 +/* Information about built-in Ethernet MAC interfaces */
1294 +struct cns3xxx_plat_info {
1295 + u8 ports; /* Bitmap of enabled Ports */
1296 + u8 hwaddr[4][6];
1297 + u32 phy[3];
1298 +};
1299 +
1300 +#endif /* __ASM_ARCH_PLATFORM_H */
1301 +#endif
This page took 0.126115 seconds and 5 git commands to generate.