2 * drivers/net/ubi32-eth.c
3 * Ubicom32 ethernet TIO interface driver.
5 * (C) Copyright 2009, Ubicom, Inc.
7 * This file is part of the Ubicom32 Linux Kernel Port.
9 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10 * it and/or modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, either version 2 of the
12 * License, or (at your option) any later version.
14 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with the Ubicom32 Linux Kernel Port. If not,
21 * see <http://www.gnu.org/licenses/>.
23 * Ubicom32 implementation derived from (with many thanks):
30 * Ethernet driver for Ip5k/Ip7K
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/moduleparam.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/interrupt.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <linux/mii.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/skbuff.h>
51 #include <asm/checksum.h>
52 #include <asm/ip5000.h>
53 #include <asm/devtree.h>
54 #include <asm/system.h>
56 #define UBICOM32_USE_NAPI /* define this to use NAPI instead of tasklet */
57 //#define UBICOM32_USE_POLLING /* define this to use polling instead of interrupt */
58 #include "ubi32-eth.h"
62 * mac address from flash
71 extern int ubi32_ocm_skbuf_max
, ubi32_ocm_skbuf
, ubi32_ddr_skbuf
;
72 static const char *eth_if_name
[UBI32_ETH_NUM_OF_DEVICES
] =
73 {"eth_lan", "eth_wan"};
74 static struct net_device
*ubi32_eth_devices
[UBI32_ETH_NUM_OF_DEVICES
] =
76 static u8_t mac_addr
[UBI32_ETH_NUM_OF_DEVICES
][ETH_ALEN
] = {
77 {0x00, 0x03, 0x64, 'l', 'a', 'n'},
78 {0x00, 0x03, 0x64, 'w', 'a', 'n'}};
80 #if (defined(CONFIG_ZONE_DMA) && defined(CONFIG_UBICOM32_OCM_FOR_SKB))
81 static inline struct sk_buff
*ubi32_alloc_skb_ocm(struct net_device
*dev
, unsigned int length
)
83 return __dev_alloc_skb(length
, GFP_ATOMIC
| __GFP_NOWARN
| __GFP_NORETRY
| GFP_DMA
);
87 static inline struct sk_buff
*ubi32_alloc_skb(struct net_device
*dev
, unsigned int length
)
89 return __dev_alloc_skb(length
, GFP_ATOMIC
| __GFP_NOWARN
);
92 static void ubi32_eth_vp_rxtx_enable(struct net_device
*dev
)
94 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
95 priv
->regs
->command
= UBI32_ETH_VP_CMD_RX_ENABLE
| UBI32_ETH_VP_CMD_TX_ENABLE
;
96 priv
->regs
->int_mask
= (UBI32_ETH_VP_INT_RX
| UBI32_ETH_VP_INT_TX
);
97 ubicom32_set_interrupt(priv
->vp_int_bit
);
100 static void ubi32_eth_vp_rxtx_stop(struct net_device
*dev
)
102 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
103 priv
->regs
->command
= 0;
104 priv
->regs
->int_mask
= 0;
105 ubicom32_set_interrupt(priv
->vp_int_bit
);
107 /* Wait for graceful shutdown */
108 while (priv
->regs
->status
& (UBI32_ETH_VP_STATUS_RX_STATE
| UBI32_ETH_VP_STATUS_TX_STATE
));
112 * ubi32_eth_tx_done()
114 static int ubi32_eth_tx_done(struct net_device
*dev
)
116 struct ubi32_eth_private
*priv
;
118 volatile void *pdata
;
119 struct ubi32_eth_dma_desc
*desc
;
122 priv
= netdev_priv(dev
);
124 priv
->regs
->int_status
&= ~UBI32_ETH_VP_INT_TX
;
125 while (priv
->tx_tail
!= priv
->regs
->tx_out
) {
126 pdata
= priv
->regs
->tx_dma_ring
[priv
->tx_tail
];
127 BUG_ON(pdata
== NULL
);
129 skb
= container_of((void *)pdata
, struct sk_buff
, cb
);
130 desc
= (struct ubi32_eth_dma_desc
*)pdata
;
131 if (unlikely(!(desc
->status
& UBI32_ETH_VP_TX_OK
))) {
132 dev
->stats
.tx_errors
++;
134 dev
->stats
.tx_packets
++;
135 dev
->stats
.tx_bytes
+= skb
->len
;
137 dev_kfree_skb_any(skb
);
138 priv
->regs
->tx_dma_ring
[priv
->tx_tail
] = NULL
;
139 priv
->tx_tail
= (priv
->tx_tail
+ 1) & TX_DMA_RING_MASK
;
143 if (unlikely(priv
->regs
->status
& UBI32_ETH_VP_STATUS_TX_Q_FULL
)) {
144 spin_lock(&priv
->lock
);
145 if (priv
->regs
->status
& UBI32_ETH_VP_STATUS_TX_Q_FULL
) {
146 priv
->regs
->status
&= ~UBI32_ETH_VP_STATUS_TX_Q_FULL
;
147 netif_wake_queue(dev
);
149 spin_unlock(&priv
->lock
);
155 * ubi32_eth_receive()
156 * To avoid locking overhead, this is called only
157 * by tasklet when not using NAPI, or
158 * by NAPI poll when using NAPI.
159 * return number of frames processed
161 static int ubi32_eth_receive(struct net_device
*dev
, int quota
)
163 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
164 unsigned short rx_in
= priv
->regs
->rx_in
;
166 struct ubi32_eth_dma_desc
*desc
= NULL
;
167 volatile void *pdata
;
169 int extra_reserve_adj
;
170 int extra_alloc
= UBI32_ETH_RESERVE_SPACE
+ UBI32_ETH_TRASHED_MEMORY
;
171 int replenish_cnt
, count
= 0;
172 int replenish_max
= RX_DMA_MAX_QUEUE_SIZE
;
173 #if (defined(CONFIG_ZONE_DMA) && defined(CONFIG_UBICOM32_OCM_FOR_SKB))
174 if (likely(dev
== ubi32_eth_devices
[0]))
175 replenish_max
= min(ubi32_ocm_skbuf_max
, RX_DMA_MAX_QUEUE_SIZE
);;
178 if (unlikely(rx_in
== priv
->regs
->rx_out
))
179 priv
->vp_stats
.rx_q_full_cnt
++;
181 priv
->regs
->int_status
&= ~UBI32_ETH_VP_INT_RX
;
182 while (priv
->rx_tail
!= priv
->regs
->rx_out
) {
183 if (unlikely(count
== quota
)) {
184 /* There is still frame pending to be processed */
185 priv
->vp_stats
.rx_throttle
++;
189 pdata
= priv
->regs
->rx_dma_ring
[priv
->rx_tail
];
190 BUG_ON(pdata
== NULL
);
192 desc
= (struct ubi32_eth_dma_desc
*)pdata
;
193 skb
= container_of((void *)pdata
, struct sk_buff
, cb
);
195 priv
->regs
->rx_dma_ring
[priv
->rx_tail
] = NULL
;
196 priv
->rx_tail
= ((priv
->rx_tail
+ 1) & RX_DMA_RING_MASK
);
199 * Check only RX_OK bit here.
200 * The rest of status word is used as timestamp
202 if (unlikely(!(desc
->status
& UBI32_ETH_VP_RX_OK
))) {
203 dev
->stats
.rx_errors
++;
204 dev_kfree_skb_any(skb
);
208 skb_put(skb
, desc
->data_len
);
210 skb
->protocol
= eth_type_trans(skb
, dev
);
211 skb
->ip_summed
= CHECKSUM_NONE
;
212 dev
->stats
.rx_bytes
+= skb
->len
;
213 dev
->stats
.rx_packets
++;
214 #ifndef UBICOM32_USE_NAPI
217 netif_receive_skb(skb
);
221 /* fill in more descripor for VP*/
222 replenish_cnt
= replenish_max
-
223 ((RX_DMA_RING_SIZE
+ rx_in
- priv
->rx_tail
) & RX_DMA_RING_MASK
);
224 if (replenish_cnt
> 0) {
225 #if (defined(CONFIG_ZONE_DMA) && defined(CONFIG_UBICOM32_OCM_FOR_SKB))
227 * black magic for perforamnce:
228 * Try to allocate skb from OCM only for first Ethernet I/F.
229 * Also limit number of RX buffers to 21 due to limited OCM.
231 if (likely(dev
== ubi32_eth_devices
[0])) {
233 skb
= ubi32_alloc_skb_ocm(dev
, RX_BUF_SIZE
+ extra_alloc
);
237 /* set up dma descriptor */
239 desc
= (struct ubi32_eth_dma_desc
*)skb
->cb
;
241 ((u32
)skb
->data
+ UBI32_ETH_RESERVE_SPACE
+ ETH_HLEN
) &
242 (CACHE_LINE_SIZE
- 1);
243 skb_reserve(skb
, UBI32_ETH_RESERVE_SPACE
- extra_reserve_adj
);
244 desc
->data_pointer
= skb
->data
;
245 desc
->buffer_len
= RX_BUF_SIZE
+ UBI32_ETH_TRASHED_MEMORY
;
248 priv
->regs
->rx_dma_ring
[rx_in
] = desc
;
249 rx_in
= (rx_in
+ 1) & RX_DMA_RING_MASK
;
250 } while (--replenish_cnt
> 0);
254 while (replenish_cnt
-- > 0) {
255 skb
= ubi32_alloc_skb(dev
, RX_BUF_SIZE
+ extra_alloc
);
257 priv
->vp_stats
.rx_alloc_err
++;
260 /* set up dma descriptor */
262 desc
= (struct ubi32_eth_dma_desc
*)skb
->cb
;
264 ((u32
)skb
->data
+ UBI32_ETH_RESERVE_SPACE
+ ETH_HLEN
) &
265 (CACHE_LINE_SIZE
- 1);
266 skb_reserve(skb
, UBI32_ETH_RESERVE_SPACE
- extra_reserve_adj
);
267 desc
->data_pointer
= skb
->data
;
268 desc
->buffer_len
= RX_BUF_SIZE
+ UBI32_ETH_TRASHED_MEMORY
;
271 priv
->regs
->rx_dma_ring
[rx_in
] = desc
;
272 rx_in
= (rx_in
+ 1) & RX_DMA_RING_MASK
;
276 priv
->regs
->rx_in
= rx_in
;
277 ubicom32_set_interrupt(priv
->vp_int_bit
);
280 if (likely(count
> 0)) {
281 dev
->last_rx
= jiffies
;
286 #ifdef UBICOM32_USE_NAPI
287 static int ubi32_eth_napi_poll(struct napi_struct
*napi
, int budget
)
289 struct ubi32_eth_private
*priv
= container_of(napi
, struct ubi32_eth_private
, napi
);
290 struct net_device
*dev
= priv
->dev
;
293 if (priv
->tx_tail
!= priv
->regs
->tx_out
) {
294 ubi32_eth_tx_done(dev
);
297 count
= ubi32_eth_receive(dev
, budget
);
299 if (count
< budget
) {
301 priv
->regs
->int_mask
|= (UBI32_ETH_VP_INT_RX
| UBI32_ETH_VP_INT_TX
);
302 if ((priv
->rx_tail
!= priv
->regs
->rx_out
) || (priv
->tx_tail
!= priv
->regs
->tx_out
)) {
303 if (napi_reschedule(napi
)) {
304 priv
->regs
->int_mask
= 0;
312 static void ubi32_eth_do_tasklet(unsigned long arg
)
314 struct net_device
*dev
= (struct net_device
*)arg
;
315 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
317 if (priv
->tx_tail
!= priv
->regs
->tx_out
) {
318 ubi32_eth_tx_done(dev
);
321 /* always call receive to process new RX frame as well as replenish RX buffers */
322 ubi32_eth_receive(dev
, UBI32_RX_BOUND
);
324 priv
->regs
->int_mask
|= (UBI32_ETH_VP_INT_RX
| UBI32_ETH_VP_INT_TX
);
325 if ((priv
->rx_tail
!= priv
->regs
->rx_out
) || (priv
->tx_tail
!= priv
->regs
->tx_out
)) {
326 priv
->regs
->int_mask
= 0;
327 tasklet_schedule(&priv
->tsk
);
332 #if defined(UBICOM32_USE_POLLING)
333 static struct timer_list eth_poll_timer
;
335 static void ubi32_eth_poll(unsigned long arg
)
337 struct net_device
*dev
;
338 struct ubi32_eth_private
*priv
;
341 for (i
= 0; i
< UBI32_ETH_NUM_OF_DEVICES
; i
++) {
342 dev
= ubi32_eth_devices
[i
];
343 if (dev
&& (dev
->flags
& IFF_UP
)) {
344 priv
= netdev_priv(dev
);
345 #ifdef UBICOM32_USE_NAPI
346 napi_schedule(&priv
->napi
);
348 tasklet_schedule(&priv
->tsk
);
353 eth_poll_timer
.expires
= jiffies
+ 2;
354 add_timer(ð_poll_timer
);
358 static irqreturn_t
ubi32_eth_interrupt(int irq
, void *dev_id
)
360 struct ubi32_eth_private
*priv
;
362 struct net_device
*dev
= (struct net_device
*)dev_id
;
363 BUG_ON(irq
!= dev
->irq
);
365 priv
= netdev_priv(dev
);
366 if (unlikely(!(priv
->regs
->int_status
& priv
->regs
->int_mask
))) {
371 * Disable port interrupt
373 #ifdef UBICOM32_USE_NAPI
374 if (napi_schedule_prep(&priv
->napi
)) {
375 priv
->regs
->int_mask
= 0;
376 __napi_schedule(&priv
->napi
);
379 priv
->regs
->int_mask
= 0;
380 tasklet_schedule(&priv
->tsk
);
389 static int ubi32_eth_open(struct net_device
*dev
)
391 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
394 printk(KERN_INFO
"eth open %s\n",dev
->name
);
395 #ifndef UBICOM32_USE_POLLING
396 /* request_region() */
397 err
= request_irq(dev
->irq
, ubi32_eth_interrupt
, IRQF_DISABLED
, dev
->name
, dev
);
399 printk(KERN_WARNING
"fail to request_irq %d\n",err
);
403 #ifdef UBICOM32_USE_NAPI
404 napi_enable(&priv
->napi
);
406 tasklet_init(&priv
->tsk
, ubi32_eth_do_tasklet
, (unsigned long)dev
);
409 /* call receive to supply RX buffers */
410 ubi32_eth_receive(dev
, RX_DMA_MAX_QUEUE_SIZE
);
412 /* check phy status and call netif_carrier_on */
413 ubi32_eth_vp_rxtx_enable(dev
);
414 netif_start_queue(dev
);
418 static int ubi32_eth_close(struct net_device
*dev
)
420 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
421 volatile void *pdata
;
424 #ifndef UBICOM32_USE_POLLING
425 free_irq(dev
->irq
, dev
);
427 netif_stop_queue(dev
); /* can't transmit any more */
428 #ifdef UBICOM32_USE_NAPI
429 napi_disable(&priv
->napi
);
431 tasklet_kill(&priv
->tsk
);
433 ubi32_eth_vp_rxtx_stop(dev
);
438 while (priv
->rx_tail
!= priv
->regs
->rx_in
) {
439 pdata
= priv
->regs
->rx_dma_ring
[priv
->rx_tail
];
440 skb
= container_of((void *)pdata
, struct sk_buff
, cb
);
441 priv
->regs
->rx_dma_ring
[priv
->rx_tail
] = NULL
;
442 dev_kfree_skb_any(skb
);
443 priv
->rx_tail
= ((priv
->rx_tail
+ 1) & RX_DMA_RING_MASK
);
445 priv
->regs
->rx_in
= 0;
446 priv
->regs
->rx_out
= priv
->regs
->rx_in
;
447 priv
->rx_tail
= priv
->regs
->rx_in
;
452 BUG_ON(priv
->regs
->tx_out
!= priv
->regs
->tx_in
);
453 ubi32_eth_tx_done(dev
);
454 BUG_ON(priv
->tx_tail
!= priv
->regs
->tx_in
);
455 priv
->regs
->tx_in
= 0;
456 priv
->regs
->tx_out
= priv
->regs
->tx_in
;
457 priv
->tx_tail
= priv
->regs
->tx_in
;
463 * ubi32_eth_set_config
465 static int ubi32_eth_set_config(struct net_device
*dev
, struct ifmap
*map
)
467 /* if must to down to config it */
468 printk(KERN_INFO
"set_config %x\n", dev
->flags
);
469 if (dev
->flags
& IFF_UP
)
472 /* I/O and IRQ can not be changed */
473 if (map
->base_addr
!= dev
->base_addr
) {
474 printk(KERN_WARNING
"%s: Can't change I/O address\n", dev
->name
);
478 #ifndef UBICOM32_USE_POLLING
479 if (map
->irq
!= dev
->irq
) {
480 printk(KERN_WARNING
"%s: Can't change IRQ\n", dev
->name
);
485 /* ignore other fields */
489 static int ubi32_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
491 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
492 struct ubi32_eth_dma_desc
*desc
= NULL
;
493 unsigned short space
, tx_in
;
495 tx_in
= priv
->regs
->tx_in
;
497 dev
->trans_start
= jiffies
; /* save the timestamp */
498 space
= TX_DMA_RING_MASK
- ((TX_DMA_RING_SIZE
+ tx_in
- priv
->tx_tail
) & TX_DMA_RING_MASK
);
500 if (unlikely(space
== 0)) {
501 if (!(priv
->regs
->status
& UBI32_ETH_VP_STATUS_TX_Q_FULL
)) {
502 spin_lock(&priv
->lock
);
503 if (!(priv
->regs
->status
& UBI32_ETH_VP_STATUS_TX_Q_FULL
)) {
504 priv
->regs
->status
|= UBI32_ETH_VP_STATUS_TX_Q_FULL
;
505 priv
->vp_stats
.tx_q_full_cnt
++;
506 netif_stop_queue(dev
);
508 spin_unlock(&priv
->lock
);
511 /* give both HW and this driver an extra trigger */
512 priv
->regs
->int_mask
|= UBI32_ETH_VP_INT_TX
;
513 #ifndef UBICOM32_USE_POLLING
514 ubicom32_set_interrupt(dev
->irq
);
516 ubicom32_set_interrupt(priv
->vp_int_bit
);
518 return NETDEV_TX_BUSY
;
522 desc
= (struct ubi32_eth_dma_desc
*)skb
->cb
;
523 desc
->data_pointer
= skb
->data
;
524 desc
->data_len
= skb
->len
;
525 priv
->regs
->tx_dma_ring
[tx_in
] = desc
;
526 tx_in
= ((tx_in
+ 1) & TX_DMA_RING_MASK
);
528 priv
->regs
->tx_in
= tx_in
;
530 ubicom32_set_interrupt(priv
->vp_int_bit
);
536 * Deal with a transmit timeout.
538 static void ubi32_eth_tx_timeout (struct net_device
*dev
)
540 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
541 dev
->stats
.tx_errors
++;
542 priv
->regs
->int_mask
|= UBI32_ETH_VP_INT_TX
;
543 #ifndef UBICOM32_USE_POLLING
544 ubicom32_set_interrupt(dev
->irq
);
546 ubicom32_set_interrupt(priv
->vp_int_bit
);
549 static int ubi32_eth_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
551 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
552 struct mii_ioctl_data
*data
= if_mii(rq
);
554 printk(KERN_INFO
"ioctl %s, %d\n", dev
->name
, cmd
);
561 if ((data
->reg_num
& 0x1F) == MII_BMCR
) {
562 /* Make up MII control register value from what we know */
563 data
->val_out
= 0x0000
564 | ((priv
->regs
->status
& UBI32_ETH_VP_STATUS_DUPLEX
)
566 | ((priv
->regs
->status
& UBI32_ETH_VP_STATUS_SPEED100
)
568 | ((priv
->regs
->status
& UBI32_ETH_VP_STATUS_SPEED1000
)
569 ? BMCR_SPEED1000
: 0);
570 } else if ((data
->reg_num
& 0x1F) == MII_BMSR
) {
571 /* Make up MII status register value from what we know */
573 (BMSR_100FULL
|BMSR_100HALF
|BMSR_10FULL
|BMSR_10HALF
)
574 | ((priv
->regs
->status
& UBI32_ETH_VP_STATUS_LINK
)
593 * Return statistics to the caller
595 static struct net_device_stats
*ubi32_eth_get_stats(struct net_device
*dev
)
601 static int ubi32_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
603 struct ubi32_eth_private
*priv
= netdev_priv(dev
);
606 if ((new_mtu
< 68) || (new_mtu
> 1500))
609 spin_lock_irqsave(&priv
->lock
, flags
);
611 spin_unlock_irqrestore(&priv
->lock
, flags
);
612 printk(KERN_INFO
"set mtu to %d", new_mtu
);
617 * ubi32_eth_cleanup: unload the module
619 void ubi32_eth_cleanup(void)
621 struct ubi32_eth_private
*priv
;
622 struct net_device
*dev
;
625 for (i
= 0; i
< UBI32_ETH_NUM_OF_DEVICES
; i
++) {
626 dev
= ubi32_eth_devices
[i
];
628 priv
= netdev_priv(dev
);
629 kfree(priv
->regs
->tx_dma_ring
);
630 unregister_netdev(dev
);
632 ubi32_eth_devices
[i
] = NULL
;
637 static const struct net_device_ops ubi32_netdev_ops
= {
638 .ndo_open
= ubi32_eth_open
,
639 .ndo_stop
= ubi32_eth_close
,
640 .ndo_start_xmit
= ubi32_eth_start_xmit
,
641 .ndo_tx_timeout
= ubi32_eth_tx_timeout
,
642 .ndo_do_ioctl
= ubi32_eth_ioctl
,
643 .ndo_change_mtu
= ubi32_eth_change_mtu
,
644 .ndo_set_config
= ubi32_eth_set_config
,
645 .ndo_get_stats
= ubi32_eth_get_stats
,
646 .ndo_validate_addr
= eth_validate_addr
,
647 .ndo_set_mac_address
= eth_mac_addr
,
650 int ubi32_eth_init_module(void)
652 struct ethtionode
*eth_node
;
653 struct net_device
*dev
;
654 struct ubi32_eth_private
*priv
;
661 for (i
= 0; i
< UBI32_ETH_NUM_OF_DEVICES
; i
++) {
663 * See if the eth_vp is in the device tree.
665 eth_node
= (struct ethtionode
*)devtree_find_node(eth_if_name
[i
]);
667 printk(KERN_INFO
"%s does not exist\n", eth_if_name
[i
]);
671 eth_node
->tx_dma_ring
= (struct ubi32_eth_dma_desc
**)kmalloc(
672 sizeof(struct ubi32_eth_dma_desc
*) *
673 (TX_DMA_RING_SIZE
+ RX_DMA_RING_SIZE
),
674 GFP_ATOMIC
| __GFP_NOWARN
| __GFP_NORETRY
| GFP_DMA
);
676 if (eth_node
->tx_dma_ring
== NULL
) {
677 eth_node
->tx_dma_ring
= (struct ubi32_eth_dma_desc
**)kmalloc(
678 sizeof(struct ubi32_eth_dma_desc
*) *
679 (TX_DMA_RING_SIZE
+ RX_DMA_RING_SIZE
), GFP_KERNEL
);
680 printk(KERN_INFO
"fail to allocate from OCM\n");
683 if (!eth_node
->tx_dma_ring
) {
687 eth_node
->rx_dma_ring
= eth_node
->tx_dma_ring
+ TX_DMA_RING_SIZE
;
688 eth_node
->tx_sz
= TX_DMA_RING_SIZE
- 1;
689 eth_node
->rx_sz
= RX_DMA_RING_SIZE
- 1;
691 dev
= alloc_etherdev(sizeof(struct ubi32_eth_private
));
693 kfree(eth_node
->tx_dma_ring
);
697 priv
= netdev_priv(dev
);
701 * This just fill in some default Ubicom MAC address
703 memcpy(dev
->dev_addr
, mac_addr
[i
], ETH_ALEN
);
704 memset(dev
->broadcast
, 0xff, ETH_ALEN
);
706 priv
->regs
= eth_node
;
707 priv
->regs
->command
= 0;
708 priv
->regs
->int_mask
= 0;
709 priv
->regs
->int_status
= 0;
710 priv
->regs
->tx_out
= 0;
711 priv
->regs
->rx_out
= 0;
712 priv
->regs
->tx_in
= 0;
713 priv
->regs
->rx_in
= 0;
717 priv
->vp_int_bit
= eth_node
->dn
.sendirq
;
718 dev
->irq
= eth_node
->dn
.recvirq
;
720 spin_lock_init(&priv
->lock
);
722 dev
->netdev_ops
= &ubi32_netdev_ops
;
724 dev
->watchdog_timeo
= UBI32_ETH_VP_TX_TIMEOUT
;
725 #ifdef UBICOM32_USE_NAPI
726 netif_napi_add(dev
, &priv
->napi
, ubi32_eth_napi_poll
, UBI32_ETH_NAPI_WEIGHT
);
728 err
= register_netdev(dev
);
730 printk(KERN_WARNING
"Failed to register netdev %s\n", eth_if_name
[i
]);
733 kfree(eth_node
->tx_dma_ring
);
737 ubi32_eth_devices
[i
] = dev
;
738 printk(KERN_INFO
"%s vp_base:0x%p, tio_int:%d irq:%d feature:0x%lx\n",
739 dev
->name
, priv
->regs
, eth_node
->dn
.sendirq
, dev
->irq
, dev
->features
);
747 if (!ubi32_eth_devices
[0] && !ubi32_eth_devices
[1]) {
751 #if defined(UBICOM32_USE_POLLING)
752 init_timer(ð_poll_timer
);
753 eth_poll_timer
.function
= ubi32_eth_poll
;
754 eth_poll_timer
.data
= (unsigned long)0;
755 eth_poll_timer
.expires
= jiffies
+ 2;
756 add_timer(ð_poll_timer
);
762 module_init(ubi32_eth_init_module
);
763 module_exit(ubi32_eth_cleanup
);
765 MODULE_AUTHOR("Kan Yan, Greg Ren");
766 MODULE_LICENSE("GPL");