1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2004 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
17 #include <linux/config.h>
19 #if defined(CONFIG_SMP) && !defined(__SMP__)
23 #if defined(CONFIG_MODVERSIONS) && defined(MODULE) && !defined(MODVERSIONS)
30 #define __NO_VERSION__
32 #include <linux/version.h>
36 #if defined(MODVERSIONS) && (LINUX_VERSION_CODE < 0x020500)
38 #include <linux/modversions.h>
42 #if (LINUX_VERSION_CODE < 0x020605)
43 #include <linux/module.h>
45 #include <linux/moduleparam.h>
50 #define MOD_INC_USE_COUNT
51 #define MOD_DEC_USE_COUNT
52 #define SET_MODULE_OWNER(dev)
53 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
57 #include <linux/kernel.h>
58 #include <linux/sched.h>
59 #include <linux/string.h>
60 #include <linux/timer.h>
61 #include <linux/errno.h>
62 #include <linux/ioport.h>
63 #include <linux/slab.h>
64 #include <linux/interrupt.h>
65 #include <linux/pci.h>
66 #include <linux/init.h>
67 #include <linux/netdevice.h>
68 #include <linux/etherdevice.h>
69 #include <linux/skbuff.h>
70 #include <linux/reboot.h>
71 #include <asm/processor.h> /* Processor type for cache alignment. */
72 #include <asm/bitops.h>
74 #include <asm/unaligned.h>
75 #include <linux/delay.h>
76 #include <asm/byteorder.h>
77 #include <linux/time.h>
78 #include <asm/uaccess.h>
79 #if (LINUX_VERSION_CODE >= 0x020400)
80 #if (LINUX_VERSION_CODE < 0x020500)
81 #include <linux/wrapper.h>
83 #include <linux/ethtool.h>
86 #include <linux/smp_lock.h>
87 #include <linux/proc_fs.h>
90 #ifdef NETIF_F_HW_VLAN_TX
91 #include <linux/if_vlan.h>
96 #define INCLUDE_TCP_SEG_SUPPORT 1
99 #include <net/checksum.h>
102 #ifndef LINUX_KERNEL_VERSION
103 #define LINUX_KERNEL_VERSION 0
106 #ifndef MAX_SKB_FRAGS
107 #define MAX_SKB_FRAGS 0
110 #if (LINUX_VERSION_CODE >= 0x020400)
111 #ifndef ETHTOOL_GEEPROM
113 #define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
114 #define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
116 /* for passing EEPROM chunks */
117 struct ethtool_eeprom
{
120 u32 offset
; /* in bytes */
121 u32 len
; /* in bytes */
124 #define BCM_EEDUMP_LEN(info_p, size) *((u32 *) &((info_p)->reserved1[24]))=size
128 #define BCM_EEDUMP_LEN(info_p, size) (info_p)->eedump_len=size
133 #define BCM_INT_COAL 1
134 #define BCM_NIC_SEND_BD 1
137 #define BCM_TASKLET 1
139 #if HAVE_NETIF_RECEIVE_SKB
140 #define BCM_NAPI_RXPOLL 1
144 #if defined(CONFIG_PPC64)
145 #define BCM_DISCONNECT_AT_CACHELINE 1
150 #undef ETHTOOL_GEEPROM
151 #undef ETHTOOL_SEEPROM
153 #undef ETHTOOL_GPAUSEPARAM
154 #undef ETHTOOL_GRXCSUM
157 #undef BCM_NIC_SEND_BD
164 #define BIG_ENDIAN_HOST 1
167 #define MM_SWAP_LE32(x) cpu_to_le32(x)
168 #define MM_SWAP_BE32(x) cpu_to_be32(x)
170 #if (LINUX_VERSION_CODE < 0x020327)
171 #define __raw_readl readl
172 #define __raw_writel writel
175 #define MM_MEMWRITEL(ptr, val) __raw_writel(val, ptr)
176 #define MM_MEMREADL(ptr) __raw_readl(ptr)
178 typedef atomic_t MM_ATOMIC_T
;
180 #define MM_ATOMIC_SET(ptr, val) atomic_set(ptr, val)
181 #define MM_ATOMIC_READ(ptr) atomic_read(ptr)
182 #define MM_ATOMIC_INC(ptr) atomic_inc(ptr)
183 #define MM_ATOMIC_ADD(ptr, val) atomic_add(val, ptr)
184 #define MM_ATOMIC_DEC(ptr) atomic_dec(ptr)
185 #define MM_ATOMIC_SUB(ptr, val) atomic_sub(val, ptr)
194 #define MM_WMB() wmb()
195 #define MM_RMB() rmb()
196 #define MM_MMIOWB() mmiowb()
205 #define STATIC static
208 extern int MM_Packet_Desc_Size
;
210 #define MM_PACKET_DESC_SIZE MM_Packet_Desc_Size
212 DECLARE_QUEUE_TYPE(UM_RX_PACKET_Q
, MAX_RX_PACKET_DESC_COUNT
+1);
217 #if (LINUX_VERSION_CODE < 0x020211)
218 typedef u32 dma_addr_t
;
221 #if (LINUX_VERSION_CODE < 0x02032a)
222 #define pci_map_single(dev, address, size, dir) virt_to_bus(address)
223 #define pci_unmap_single(dev, dma_addr, size, dir)
227 #if (LINUX_VERSION_CODE >= 0x02040d)
229 typedef dma_addr_t dmaaddr_high_t
;
233 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && !defined(CONFIG_X86_64)
235 #if defined(CONFIG_HIGHMEM64G)
236 typedef unsigned long long dmaaddr_high_t
;
238 typedef dma_addr_t dmaaddr_high_t
;
242 #define pci_map_page bcm_pci_map_page
245 static inline dmaaddr_high_t
246 bcm_pci_map_page(struct pci_dev
*dev
, struct page
*page
,
247 int offset
, size_t size
, int dir
)
251 phys
= (page
-mem_map
) * (dmaaddr_high_t
) PAGE_SIZE
+ offset
;
256 #ifndef pci_unmap_page
257 #define pci_unmap_page(dev, map, size, dir)
260 #else /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
262 typedef dma_addr_t dmaaddr_high_t
;
264 /* Warning - This may not work for all architectures if HIGHMEM is defined */
267 #define pci_map_page(dev, page, offset, size, dir) \
268 pci_map_single(dev, page_address(page) + (offset), size, dir)
270 #ifndef pci_unmap_page
271 #define pci_unmap_page(dev, map, size, dir) \
272 pci_unmap_single(dev, map, size, dir)
275 #endif /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
277 #endif /* #if (LINUX_VERSION_CODE >= 0x02040d)*/
278 #endif /* #if MAX_SKB_FRAGS*/
280 #if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
281 #define NO_PCI_UNMAP 1
284 #if (LINUX_VERSION_CODE < 0x020412)
285 #if !defined(NO_PCI_UNMAP)
286 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
287 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
289 #define pci_unmap_addr(PTR, ADDR_NAME) \
292 #define pci_unmap_len(PTR, LEN_NAME) \
295 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
296 (((PTR)->ADDR_NAME) = (VAL))
298 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
299 (((PTR)->LEN_NAME) = (VAL))
301 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
302 #define DECLARE_PCI_UNMAP_LEN(ADDR_NAME)
304 #define pci_unmap_addr(PTR, ADDR_NAME) 0
305 #define pci_unmap_len(PTR, LEN_NAME) 0
306 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
307 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
311 #if (LINUX_VERSION_CODE < 0x02030e)
312 #define net_device device
313 #define netif_carrier_on(dev)
314 #define netif_carrier_off(dev)
317 #if (LINUX_VERSION_CODE < 0x02032b)
318 #define tasklet_struct tq_struct
321 typedef struct _UM_DEVICE_BLOCK
{
322 LM_DEVICE_BLOCK lm_dev
;
323 struct net_device
*dev
;
324 struct pci_dev
*pdev
;
325 struct net_device
*next_module
;
328 struct proc_dir_entry
*pfs_entry
;
331 void *mem_list
[MAX_MEM
];
332 dma_addr_t dma_list
[MAX_MEM
];
333 int mem_size_list
[MAX_MEM
];
339 int using_dac
; /* dual address cycle */
340 int delayed_link_ind
; /* Delay link status during initial load */
341 int adapter_just_inited
; /* the first few seconds after init. */
343 int statstimer_interval
;
345 int crc_counter_expiry
;
346 int poll_tbi_interval
;
351 int line_speed
; /* in Mbps, 0 if link is down */
352 UM_RX_PACKET_Q rx_out_of_buf_q
;
354 int rx_buf_repl_thresh
;
355 int rx_buf_repl_panic_thresh
;
356 int rx_buf_repl_isr_limit
;
358 struct timer_list timer
;
359 struct timer_list statstimer
;
361 spinlock_t global_lock
;
362 spinlock_t undi_lock
;
364 unsigned long undi_flags
;
365 volatile unsigned long interrupt
;
368 volatile unsigned long tasklet_busy
;
369 struct tasklet_struct tasklet
;
370 struct net_device_stats stats
;
372 int intr_test_result
;
373 #ifdef NETIF_F_HW_VLAN_TX
374 struct vlan_group
*vlgrp
;
376 int vlan_tag_mode
; /* Setting to allow ASF to work properly with */
378 #define VLAN_TAG_MODE_AUTO_STRIP 0
379 #define VLAN_TAG_MODE_NORMAL_STRIP 1
380 #define VLAN_TAG_MODE_FORCED_STRIP 2
382 /* Auto mode - VLAN TAGs are always stripped if ASF is enabled, */
383 /* If ASF is not enabled, it will be in normal mode. */
384 /* Normal mode - VLAN TAGs are stripped when VLANs are registered */
385 /* Forced mode - VLAN TAGs are always stripped. */
387 int adaptive_coalesce
;
390 uint rx_curr_coalesce_frames
;
391 uint rx_curr_coalesce_frames_intr
;
392 uint rx_curr_coalesce_ticks
;
393 uint tx_curr_coalesce_frames
;
395 unsigned long tx_zc_count
;
396 unsigned long tx_chksum_count
;
397 unsigned long tx_himem_count
;
398 unsigned long rx_good_chksum_count
;
400 unsigned long rx_bad_chksum_count
;
402 unsigned long tso_pkt_count
;
404 unsigned long rx_misc_errors
;
405 uint64_t phy_crc_count
;
406 unsigned int spurious_int
;
409 unsigned long boardflags
;
412 } UM_DEVICE_BLOCK
, *PUM_DEVICE_BLOCK
;
414 typedef struct _UM_PACKET
{
416 struct sk_buff
*skbuff
;
418 DECLARE_PCI_UNMAP_ADDR(map
[MAX_SKB_FRAGS
+ 1])
419 DECLARE_PCI_UNMAP_LEN(map_len
[MAX_SKB_FRAGS
+ 1])
421 DECLARE_PCI_UNMAP_ADDR(map
[1])
422 DECLARE_PCI_UNMAP_LEN(map_len
[1])
424 } UM_PACKET
, *PUM_PACKET
;
426 static inline void MM_SetAddr(LM_PHYSICAL_ADDRESS
*paddr
, dma_addr_t addr
)
428 #if BITS_PER_LONG == 64
429 paddr
->High
= ((unsigned long) addr
) >> 32;
430 paddr
->Low
= ((unsigned long) addr
) & 0xffffffff;
433 paddr
->Low
= (unsigned long) addr
;
437 static inline void MM_SetT3Addr(T3_64BIT_HOST_ADDR
*paddr
, dma_addr_t addr
)
439 #if BITS_PER_LONG == 64
440 paddr
->High
= ((unsigned long) addr
) >> 32;
441 paddr
->Low
= ((unsigned long) addr
) & 0xffffffff;
444 paddr
->Low
= (unsigned long) addr
;
449 static inline void MM_SetT3AddrHigh(T3_64BIT_HOST_ADDR
*paddr
,
452 #if defined(CONFIG_HIGHMEM64G) && defined(CONFIG_X86) && !defined(CONFIG_X86_64)
453 paddr
->High
= (unsigned long) (addr
>> 32);
454 paddr
->Low
= (unsigned long) (addr
& 0xffffffff);
456 MM_SetT3Addr(paddr
, (dma_addr_t
) addr
);
461 static inline void MM_MapRxDma(PLM_DEVICE_BLOCK pDevice
,
462 struct _LM_PACKET
*pPacket
,
463 T3_64BIT_HOST_ADDR
*paddr
)
466 struct sk_buff
*skb
= ((struct _UM_PACKET
*) pPacket
)->skbuff
;
468 map
= pci_map_single(((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
470 pPacket
->u
.Rx
.RxBufferSize
,
472 pci_unmap_addr_set(((struct _UM_PACKET
*) pPacket
), map
[0], map
);
473 MM_SetT3Addr(paddr
, map
);
476 static inline void MM_MapTxDma(PLM_DEVICE_BLOCK pDevice
,
477 struct _LM_PACKET
*pPacket
,
478 T3_64BIT_HOST_ADDR
*paddr
,
483 struct sk_buff
*skb
= ((struct _UM_PACKET
*) pPacket
)->skbuff
;
488 if (skb_shinfo(skb
)->nr_frags
)
489 length
= skb
->len
- skb
->data_len
;
493 map
= pci_map_single(((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
494 skb
->data
, length
, PCI_DMA_TODEVICE
);
495 MM_SetT3Addr(paddr
, map
);
496 pci_unmap_addr_set(((struct _UM_PACKET
*)pPacket
), map
[0], map
);
497 pci_unmap_len_set(((struct _UM_PACKET
*) pPacket
), map_len
[0],
504 dmaaddr_high_t hi_map
;
506 sk_frag
= &skb_shinfo(skb
)->frags
[frag
- 1];
508 hi_map
= pci_map_page(
509 ((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
511 sk_frag
->page_offset
,
512 sk_frag
->size
, PCI_DMA_TODEVICE
);
514 MM_SetT3AddrHigh(paddr
, hi_map
);
515 pci_unmap_addr_set(((struct _UM_PACKET
*) pPacket
), map
[frag
],
517 pci_unmap_len_set(((struct _UM_PACKET
*) pPacket
),
518 map_len
[frag
], sk_frag
->size
);
519 *len
= sk_frag
->size
;
524 #define BCM5700_PHY_LOCK(pUmDevice, flags) { \
526 if ((pUmDevice)->do_global_lock) { \
527 lock = &(pUmDevice)->global_lock; \
530 lock = &(pUmDevice)->phy_lock; \
532 spin_lock_irqsave(lock, flags); \
535 #define BCM5700_PHY_UNLOCK(pUmDevice, flags) { \
537 if ((pUmDevice)->do_global_lock) { \
538 lock = &(pUmDevice)->global_lock; \
541 lock = &(pUmDevice)->phy_lock; \
543 spin_unlock_irqrestore(lock, flags); \
547 #define MM_ACQUIRE_UNDI_LOCK(_pDevice) \
548 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
549 unsigned long flags; \
550 spin_lock_irqsave(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
551 ((PUM_DEVICE_BLOCK)(_pDevice))->undi_flags = flags; \
554 #define MM_RELEASE_UNDI_LOCK(_pDevice) \
555 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
556 unsigned long flags = ((PUM_DEVICE_BLOCK) (_pDevice))->undi_flags; \
557 spin_unlock_irqrestore(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
560 #define MM_ACQUIRE_PHY_LOCK_IN_IRQ(_pDevice) \
561 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
562 spin_lock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
565 #define MM_RELEASE_PHY_LOCK_IN_IRQ(_pDevice) \
566 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
567 spin_unlock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
570 #define MM_UINT_PTR(_ptr) ((unsigned long) (_ptr))
572 #define MM_GETSTATS64(_Ctr) \
573 (uint64_t) (_Ctr).Low + ((uint64_t) (_Ctr).High << 32)
575 #define MM_GETSTATS32(_Ctr) \
576 (uint32_t) (_Ctr).Low
578 #if BITS_PER_LONG == 64
579 #define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS64(_Ctr)
581 #define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS32(_Ctr)
584 #if (LINUX_VERSION_CODE >= 0x020600)
585 #define mm_copy_to_user( to, from, size ) \
586 (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_to_user((to),(from),(size)))
587 #define mm_copy_from_user( to, from, size ) \
588 (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_from_user((to),(from),(size)))
590 #define mm_copy_to_user( to, from, size ) \
591 copy_to_user((to),(from),(size) )
592 #define mm_copy_from_user( to, from, size ) \
593 copy_from_user((to),(from),(size))
597 #define printf(fmt, args...) printk(KERN_WARNING fmt, ##args)
600 #define DbgPrint(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
601 #if defined(CONFIG_X86)
602 #define DbgBreakPoint() __asm__("int $129")
604 #define DbgBreakPoint()
606 #define MM_Wait(time) udelay(time)