clean up the openswan patch to work with older patch versions as well
[openwrt.git] / openwrt / target / linux / aruba-2.6 / patches / 010-ar2313_enet.patch
1 diff -urN linux.old/drivers/net/Kconfig linux.net/drivers/net/Kconfig
2 --- linux.old/drivers/net/Kconfig 2006-01-21 20:15:08.279272000 +0100
3 +++ linux.net/drivers/net/Kconfig 2006-01-30 01:18:34.910315000 +0100
4 @@ -176,6 +176,13 @@
5
6 source "drivers/net/arm/Kconfig"
7
8 +
9 +config AR2313
10 + tristate "AR2313 Ethernet support"
11 + depends on NET_ETHERNET && MACH_ARUBA
12 + help
13 + Support for the AR2313 Ethernet part on Aruba AP60/61
14 +
15 config IDT_RC32434_ETH
16 tristate "IDT RC32434 Local Ethernet support"
17 depends on NET_ETHERNET
18 diff -urN linux.old/drivers/net/Makefile linux.net/drivers/net/Makefile
19 --- linux.old/drivers/net/Makefile 2006-01-21 20:15:08.383226000 +0100
20 +++ linux.net/drivers/net/Makefile 2006-01-30 01:18:34.914315250 +0100
21 @@ -35,6 +35,7 @@
22
23 obj-$(CONFIG_OAKNET) += oaknet.o 8390.o
24
25 +obj-$(CONFIG_AR2313) += ar2313.o
26 obj-$(CONFIG_IDT_RC32434_ETH) += rc32434_eth.o
27 obj-$(CONFIG_DGRS) += dgrs.o
28 obj-$(CONFIG_VORTEX) += 3c59x.o
29 diff -urN linux.old/drivers/net/ar2313/ar2313.h linux.net/drivers/net/ar2313/ar2313.h
30 --- linux.old/drivers/net/ar2313/ar2313.h 1970-01-01 01:00:00.000000000 +0100
31 +++ linux.net/drivers/net/ar2313/ar2313.h 2006-01-25 00:35:55.000000000 +0100
32 @@ -0,0 +1,190 @@
33 +#ifndef _AR2313_H_
34 +#define _AR2313_H_
35 +
36 +#include <linux/config.h>
37 +#include <asm/bootinfo.h>
38 +#include "platform.h"
39 +
40 +extern unsigned long mips_machtype;
41 +
42 +#undef ETHERNET_BASE
43 +#define ETHERNET_BASE ar_eth_base
44 +#define ETHERNET_SIZE 0x00100000
45 +#define ETHERNET_MACS 2
46 +
47 +#undef DMA_BASE
48 +#define DMA_BASE ar_dma_base
49 +#define DMA_SIZE 0x00100000
50 +
51 +
52 +/*
53 + * probe link timer - 5 secs
54 + */
55 +#define LINK_TIMER (5*HZ)
56 +
57 +/*
58 + * Interrupt register base address
59 + */
60 +#define INTERRUPT_BASE PHYS_TO_K1(ar_int_base)
61 +
62 +/*
63 + * Reset Register
64 + */
65 +#define AR531X_RESET (AR531X_RESETTMR + 0x0020)
66 +#define RESET_SYSTEM 0x00000001 /* cold reset full system */
67 +#define RESET_PROC 0x00000002 /* cold reset MIPS core */
68 +#define RESET_WLAN0 0x00000004 /* cold reset WLAN MAC and BB */
69 +#define RESET_EPHY0 0x00000008 /* cold reset ENET0 phy */
70 +#define RESET_EPHY1 0x00000010 /* cold reset ENET1 phy */
71 +#define RESET_ENET0 0x00000020 /* cold reset ENET0 mac */
72 +#define RESET_ENET1 0x00000040 /* cold reset ENET1 mac */
73 +
74 +#define IS_DMA_TX_INT(X) (((X) & (DMA_STATUS_TI)) != 0)
75 +#define IS_DMA_RX_INT(X) (((X) & (DMA_STATUS_RI)) != 0)
76 +#define IS_DRIVER_OWNED(X) (((X) & (DMA_TX_OWN)) == 0)
77 +
78 +#ifndef K1_TO_PHYS
79 +// hack
80 +#define K1_TO_PHYS(x) (((unsigned int)(x)) & 0x1FFFFFFF) /* kseg1 to physical */
81 +#endif
82 +
83 +#ifndef PHYS_TO_K1
84 +// hack
85 +#define PHYS_TO_K1(x) (((unsigned int)(x)) | 0xA0000000) /* physical to kseg1 */
86 +#endif
87 +
88 +#define AR2313_TX_TIMEOUT (HZ/4)
89 +
90 +/*
91 + * Rings
92 + */
93 +#define DSC_RING_ENTRIES_SIZE (AR2313_DESCR_ENTRIES * sizeof(struct desc))
94 +#define DSC_NEXT(idx) ((idx + 1) & (AR2313_DESCR_ENTRIES - 1))
95 +
96 +static inline int tx_space (u32 csm, u32 prd)
97 +{
98 + return (csm - prd - 1) & (AR2313_DESCR_ENTRIES - 1);
99 +}
100 +
101 +#if MAX_SKB_FRAGS
102 +#define TX_RESERVED (MAX_SKB_FRAGS+1) /* +1 for message header */
103 +#define tx_ring_full(csm, prd) (tx_space(csm, prd) <= TX_RESERVED)
104 +#else
105 +#define tx_ring_full 0
106 +#endif
107 +
108 +#define AR2313_MBGET 2
109 +#define AR2313_MBSET 3
110 +#define AR2313_PCI_RECONFIG 4
111 +#define AR2313_PCI_DUMP 5
112 +#define AR2313_TEST_PANIC 6
113 +#define AR2313_TEST_NULLPTR 7
114 +#define AR2313_READ_DATA 8
115 +#define AR2313_WRITE_DATA 9
116 +#define AR2313_GET_VERSION 10
117 +#define AR2313_TEST_HANG 11
118 +#define AR2313_SYNC 12
119 +
120 +
121 +struct ar2313_cmd {
122 + u32 cmd;
123 + u32 address; /* virtual address of image */
124 + u32 length; /* size of image to download */
125 + u32 mailbox; /* mailbox to get/set */
126 + u32 data[2]; /* contents of mailbox to read/write */
127 +};
128 +
129 +
130 +/*
131 + * Struct private for the Sibyte.
132 + *
133 + * Elements are grouped so variables used by the tx handling goes
134 + * together, and will go into the same cache lines etc. in order to
135 + * avoid cache line contention between the rx and tx handling on SMP.
136 + *
137 + * Frequently accessed variables are put at the beginning of the
138 + * struct to help the compiler generate better/shorter code.
139 + */
140 +struct ar2313_private
141 +{
142 + int version;
143 + u32 mb[2];
144 +
145 + volatile ETHERNET_STRUCT *eth_regs;
146 + volatile DMA *dma_regs;
147 + volatile u32 *int_regs;
148 +
149 + spinlock_t lock; /* Serialise access to device */
150 +
151 + /*
152 + * RX and TX descriptors, must be adjacent
153 + */
154 + ar2313_descr_t *rx_ring;
155 + ar2313_descr_t *tx_ring;
156 +
157 +
158 + struct sk_buff **rx_skb;
159 + struct sk_buff **tx_skb;
160 +
161 + /*
162 + * RX elements
163 + */
164 + u32 rx_skbprd;
165 + u32 cur_rx;
166 +
167 + /*
168 + * TX elements
169 + */
170 + u32 tx_prd;
171 + u32 tx_csm;
172 +
173 + /*
174 + * Misc elements
175 + */
176 + int board_idx;
177 + char name[48];
178 + struct net_device_stats stats;
179 + struct {
180 + u32 address;
181 + u32 length;
182 + char *mapping;
183 + } desc;
184 +
185 +
186 + struct timer_list link_timer;
187 + unsigned short phy; /* merlot phy = 1, samsung phy = 0x1f */
188 + unsigned short mac;
189 + unsigned short link; /* 0 - link down, 1 - link up */
190 + u16 phyData;
191 +
192 + struct tasklet_struct rx_tasklet;
193 + int unloading;
194 +};
195 +
196 +
197 +/*
198 + * Prototypes
199 + */
200 +static int ar2313_init(struct net_device *dev);
201 +#ifdef TX_TIMEOUT
202 +static void ar2313_tx_timeout(struct net_device *dev);
203 +#endif
204 +#if 0
205 +static void ar2313_multicast_list(struct net_device *dev);
206 +#endif
207 +static int ar2313_restart(struct net_device *dev);
208 +#if DEBUG
209 +static void ar2313_dump_regs(struct net_device *dev);
210 +#endif
211 +static void ar2313_load_rx_ring(struct net_device *dev, int bufs);
212 +static irqreturn_t ar2313_interrupt(int irq, void *dev_id, struct pt_regs *regs);
213 +static int ar2313_open(struct net_device *dev);
214 +static int ar2313_start_xmit(struct sk_buff *skb, struct net_device *dev);
215 +static int ar2313_close(struct net_device *dev);
216 +static int ar2313_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
217 +static void ar2313_init_cleanup(struct net_device *dev);
218 +static int ar2313_setup_timer(struct net_device *dev);
219 +static void ar2313_link_timer_fn(unsigned long data);
220 +static void ar2313_check_link(struct net_device *dev);
221 +static struct net_device_stats *ar2313_get_stats(struct net_device *dev);
222 +#endif /* _AR2313_H_ */
223 diff -urN linux.old/drivers/net/ar2313/ar2313_msg.h linux.net/drivers/net/ar2313/ar2313_msg.h
224 --- linux.old/drivers/net/ar2313/ar2313_msg.h 1970-01-01 01:00:00.000000000 +0100
225 +++ linux.net/drivers/net/ar2313/ar2313_msg.h 2006-01-24 22:57:25.000000000 +0100
226 @@ -0,0 +1,17 @@
227 +#ifndef _AR2313_MSG_H_
228 +#define _AR2313_MSG_H_
229 +
230 +#define AR2313_MTU 1692
231 +#define AR2313_PRIOS 1
232 +#define AR2313_QUEUES (2*AR2313_PRIOS)
233 +
234 +#define AR2313_DESCR_ENTRIES 64
235 +
236 +typedef struct {
237 + volatile unsigned int status; // OWN, Device control and status.
238 + volatile unsigned int devcs; // pkt Control bits + Length
239 + volatile unsigned int addr; // Current Address.
240 + volatile unsigned int descr; // Next descriptor in chain.
241 +} ar2313_descr_t;
242 +
243 +#endif /* _AR2313_MSG_H_ */
244 diff -urN linux.old/drivers/net/ar2313/dma.h linux.net/drivers/net/ar2313/dma.h
245 --- linux.old/drivers/net/ar2313/dma.h 1970-01-01 01:00:00.000000000 +0100
246 +++ linux.net/drivers/net/ar2313/dma.h 2006-01-24 22:58:45.000000000 +0100
247 @@ -0,0 +1,135 @@
248 +#ifndef __ARUBA_DMA_H__
249 +#define __ARUBA_DMA_H__
250 +
251 +/*******************************************************************************
252 + *
253 + * Copyright 2002 Integrated Device Technology, Inc.
254 + * All rights reserved.
255 + *
256 + * DMA register definition.
257 + *
258 + * File : $Id: dma.h,v 1.3 2002/06/06 18:34:03 astichte Exp $
259 + *
260 + * Author : ryan.holmQVist@idt.com
261 + * Date : 20011005
262 + * Update :
263 + * $Log: dma.h,v $
264 + * Revision 1.3 2002/06/06 18:34:03 astichte
265 + * Added XXX_PhysicalAddress and XXX_VirtualAddress
266 + *
267 + * Revision 1.2 2002/06/05 18:30:46 astichte
268 + * Removed IDTField
269 + *
270 + * Revision 1.1 2002/05/29 17:33:21 sysarch
271 + * jba File moved from vcode/include/idt/acacia
272 + *
273 + *
274 + ******************************************************************************/
275 +
276 +#define AR_BIT(x) (1 << (x))
277 +#define DMA_RX_ERR_CRC AR_BIT(1)
278 +#define DMA_RX_ERR_DRIB AR_BIT(2)
279 +#define DMA_RX_ERR_MII AR_BIT(3)
280 +#define DMA_RX_EV2 AR_BIT(5)
281 +#define DMA_RX_ERR_COL AR_BIT(6)
282 +#define DMA_RX_LONG AR_BIT(7)
283 +#define DMA_RX_LS AR_BIT(8) /* last descriptor */
284 +#define DMA_RX_FS AR_BIT(9) /* first descriptor */
285 +#define DMA_RX_MF AR_BIT(10) /* multicast frame */
286 +#define DMA_RX_ERR_RUNT AR_BIT(11) /* runt frame */
287 +#define DMA_RX_ERR_LENGTH AR_BIT(12) /* length error */
288 +#define DMA_RX_ERR_DESC AR_BIT(14) /* descriptor error */
289 +#define DMA_RX_ERROR AR_BIT(15) /* error summary */
290 +#define DMA_RX_LEN_MASK 0x3fff0000
291 +#define DMA_RX_LEN_SHIFT 16
292 +#define DMA_RX_FILT AR_BIT(30)
293 +#define DMA_RX_OWN AR_BIT(31) /* desc owned by DMA controller */
294 +
295 +#define DMA_RX1_BSIZE_MASK 0x000007ff
296 +#define DMA_RX1_BSIZE_SHIFT 0
297 +#define DMA_RX1_CHAINED AR_BIT(24)
298 +#define DMA_RX1_RER AR_BIT(25)
299 +
300 +#define DMA_TX_ERR_UNDER AR_BIT(1) /* underflow error */
301 +#define DMA_TX_ERR_DEFER AR_BIT(2) /* excessive deferral */
302 +#define DMA_TX_COL_MASK 0x78
303 +#define DMA_TX_COL_SHIFT 3
304 +#define DMA_TX_ERR_HB AR_BIT(7) /* hearbeat failure */
305 +#define DMA_TX_ERR_COL AR_BIT(8) /* excessive collisions */
306 +#define DMA_TX_ERR_LATE AR_BIT(9) /* late collision */
307 +#define DMA_TX_ERR_LINK AR_BIT(10) /* no carrier */
308 +#define DMA_TX_ERR_LOSS AR_BIT(11) /* loss of carrier */
309 +#define DMA_TX_ERR_JABBER AR_BIT(14) /* transmit jabber timeout */
310 +#define DMA_TX_ERROR AR_BIT(15) /* frame aborted */
311 +#define DMA_TX_OWN AR_BIT(31) /* descr owned by DMA controller */
312 +
313 +#define DMA_TX1_BSIZE_MASK 0x000007ff
314 +#define DMA_TX1_BSIZE_SHIFT 0
315 +#define DMA_TX1_CHAINED AR_BIT(24) /* chained descriptors */
316 +#define DMA_TX1_TER AR_BIT(25) /* transmit end of ring */
317 +#define DMA_TX1_FS AR_BIT(29) /* first segment */
318 +#define DMA_TX1_LS AR_BIT(30) /* last segment */
319 +#define DMA_TX1_IC AR_BIT(31) /* interrupt on completion */
320 +
321 +#define RCVPKT_LENGTH(X) (X >> 16) /* Received pkt Length */
322 +
323 +#define MAC_CONTROL_RE AR_BIT(2) /* receive enable */
324 +#define MAC_CONTROL_TE AR_BIT(3) /* transmit enable */
325 +#define MAC_CONTROL_DC AR_BIT(5) /* Deferral check*/
326 +#define MAC_CONTROL_ASTP AR_BIT(8) /* Auto pad strip */
327 +#define MAC_CONTROL_DRTY AR_BIT(10) /* Disable retry */
328 +#define MAC_CONTROL_DBF AR_BIT(11) /* Disable bcast frames */
329 +#define MAC_CONTROL_LCC AR_BIT(12) /* late collision ctrl */
330 +#define MAC_CONTROL_HP AR_BIT(13) /* Hash Perfect filtering */
331 +#define MAC_CONTROL_HASH AR_BIT(14) /* Unicast hash filtering */
332 +#define MAC_CONTROL_HO AR_BIT(15) /* Hash only filtering */
333 +#define MAC_CONTROL_PB AR_BIT(16) /* Pass Bad frames */
334 +#define MAC_CONTROL_IF AR_BIT(17) /* Inverse filtering */
335 +#define MAC_CONTROL_PR AR_BIT(18) /* promiscuous mode (valid frames only) */
336 +#define MAC_CONTROL_PM AR_BIT(19) /* pass multicast */
337 +#define MAC_CONTROL_F AR_BIT(20) /* full-duplex */
338 +#define MAC_CONTROL_DRO AR_BIT(23) /* Disable Receive Own */
339 +#define MAC_CONTROL_HBD AR_BIT(28) /* heart-beat disabled (MUST BE SET) */
340 +#define MAC_CONTROL_BLE AR_BIT(30) /* big endian mode */
341 +#define MAC_CONTROL_RA AR_BIT(31) /* receive all (valid and invalid frames) */
342 +
343 +#define MII_ADDR_BUSY AR_BIT(0)
344 +#define MII_ADDR_WRITE AR_BIT(1)
345 +#define MII_ADDR_REG_SHIFT 6
346 +#define MII_ADDR_PHY_SHIFT 11
347 +#define MII_DATA_SHIFT 0
348 +
349 +#define FLOW_CONTROL_FCE AR_BIT(1)
350 +
351 +#define DMA_BUS_MODE_SWR AR_BIT(0) /* software reset */
352 +#define DMA_BUS_MODE_BLE AR_BIT(7) /* big endian mode */
353 +#define DMA_BUS_MODE_PBL_SHIFT 8 /* programmable burst length 32 */
354 +#define DMA_BUS_MODE_DBO AR_BIT(20) /* big-endian descriptors */
355 +
356 +#define DMA_STATUS_TI AR_BIT(0) /* transmit interrupt */
357 +#define DMA_STATUS_TPS AR_BIT(1) /* transmit process stopped */
358 +#define DMA_STATUS_TU AR_BIT(2) /* transmit buffer unavailable */
359 +#define DMA_STATUS_TJT AR_BIT(3) /* transmit buffer timeout */
360 +#define DMA_STATUS_UNF AR_BIT(5) /* transmit underflow */
361 +#define DMA_STATUS_RI AR_BIT(6) /* receive interrupt */
362 +#define DMA_STATUS_RU AR_BIT(7) /* receive buffer unavailable */
363 +#define DMA_STATUS_RPS AR_BIT(8) /* receive process stopped */
364 +#define DMA_STATUS_ETI AR_BIT(10) /* early transmit interrupt */
365 +#define DMA_STATUS_FBE AR_BIT(13) /* fatal bus interrupt */
366 +#define DMA_STATUS_ERI AR_BIT(14) /* early receive interrupt */
367 +#define DMA_STATUS_AIS AR_BIT(15) /* abnormal interrupt summary */
368 +#define DMA_STATUS_NIS AR_BIT(16) /* normal interrupt summary */
369 +#define DMA_STATUS_RS_SHIFT 17 /* receive process state */
370 +#define DMA_STATUS_TS_SHIFT 20 /* transmit process state */
371 +#define DMA_STATUS_EB_SHIFT 23 /* error bits */
372 +
373 +#define DMA_CONTROL_SR AR_BIT(1) /* start receive */
374 +#define DMA_CONTROL_ST AR_BIT(13) /* start transmit */
375 +#define DMA_CONTROL_SF AR_BIT(21) /* store and forward */
376 +
377 +#endif // __ARUBA_DMA_H__
378 +
379 +
380 +
381 +
382 +
383 diff -urN linux.old/drivers/net/ar2313/platform.h linux.net/drivers/net/ar2313/platform.h
384 --- linux.old/drivers/net/ar2313/platform.h 1970-01-01 01:00:00.000000000 +0100
385 +++ linux.net/drivers/net/ar2313/platform.h 2006-01-25 00:10:25.000000000 +0100
386 @@ -0,0 +1,128 @@
387 +/********************************************************************************
388 + Title: $Source: platform.h,v $
389 +
390 + Author: Dan Steinberg
391 + Copyright Integrated Device Technology 2001
392 +
393 + Purpose: AR2313 Register/Bit Definitions
394 +
395 + Update:
396 + $Log: platform.h,v $
397 +
398 + Notes: See Merlot architecture spec for complete details. Note, all
399 + addresses are virtual addresses in kseg1 (Uncached, Unmapped).
400 +
401 +********************************************************************************/
402 +
403 +#ifndef PLATFORM_H
404 +#define PLATFORM_H
405 +
406 +#define BIT(x) (1 << (x))
407 +
408 +#define RESET_BASE 0xBC003020
409 +#define RESET_VALUE 0x00000001
410 +
411 +/********************************************************************
412 + * Device controller
413 + ********************************************************************/
414 +typedef struct {
415 + volatile unsigned int flash0;
416 +} DEVICE;
417 +
418 +#define device (*((volatile DEVICE *) DEV_CTL_BASE))
419 +
420 +// DDRC register
421 +#define DEV_WP (1<<26)
422 +
423 +/********************************************************************
424 + * DDR controller
425 + ********************************************************************/
426 +typedef struct {
427 + volatile unsigned int ddrc0;
428 + volatile unsigned int ddrc1;
429 + volatile unsigned int ddrrefresh;
430 +} DDR;
431 +
432 +#define ddr (*((volatile DDR *) DDR_BASE))
433 +
434 +// DDRC register
435 +#define DDRC_CS(i) ((i&0x3)<<0)
436 +#define DDRC_WE (1<<2)
437 +
438 +/********************************************************************
439 + * Ethernet interfaces
440 + ********************************************************************/
441 +#define ETHERNET_BASE 0xB8200000
442 +
443 +//
444 +// New Combo structure for Both Eth0 AND eth1
445 +//
446 +typedef struct {
447 + volatile unsigned int mac_control; /* 0x00 */
448 + volatile unsigned int mac_addr[2]; /* 0x04 - 0x08*/
449 + volatile unsigned int mcast_table[2]; /* 0x0c - 0x10 */
450 + volatile unsigned int mii_addr; /* 0x14 */
451 + volatile unsigned int mii_data; /* 0x18 */
452 + volatile unsigned int flow_control; /* 0x1c */
453 + volatile unsigned int vlan_tag; /* 0x20 */
454 + volatile unsigned int pad[7]; /* 0x24 - 0x3c */
455 + volatile unsigned int ucast_table[8]; /* 0x40-0x5c */
456 +
457 +} ETHERNET_STRUCT;
458 +
459 +/********************************************************************
460 + * Interrupt controller
461 + ********************************************************************/
462 +
463 +typedef struct {
464 + volatile unsigned int wdog_control; /* 0x08 */
465 + volatile unsigned int wdog_timer; /* 0x0c */
466 + volatile unsigned int misc_status; /* 0x10 */
467 + volatile unsigned int misc_mask; /* 0x14 */
468 + volatile unsigned int global_status; /* 0x18 */
469 + volatile unsigned int reserved; /* 0x1c */
470 + volatile unsigned int reset_control; /* 0x20 */
471 +} INTERRUPT;
472 +
473 +#define interrupt (*((volatile INTERRUPT *) INTERRUPT_BASE))
474 +
475 +#define INTERRUPT_MISC_TIMER BIT(0)
476 +#define INTERRUPT_MISC_AHBPROC BIT(1)
477 +#define INTERRUPT_MISC_AHBDMA BIT(2)
478 +#define INTERRUPT_MISC_GPIO BIT(3)
479 +#define INTERRUPT_MISC_UART BIT(4)
480 +#define INTERRUPT_MISC_UARTDMA BIT(5)
481 +#define INTERRUPT_MISC_WATCHDOG BIT(6)
482 +#define INTERRUPT_MISC_LOCAL BIT(7)
483 +
484 +#define INTERRUPT_GLOBAL_ETH BIT(2)
485 +#define INTERRUPT_GLOBAL_WLAN BIT(3)
486 +#define INTERRUPT_GLOBAL_MISC BIT(4)
487 +#define INTERRUPT_GLOBAL_ITIMER BIT(5)
488 +
489 +/********************************************************************
490 + * DMA controller
491 + ********************************************************************/
492 +#define DMA_BASE 0xB8201000
493 +
494 +typedef struct {
495 + volatile unsigned int bus_mode; /* 0x00 (CSR0) */
496 + volatile unsigned int xmt_poll; /* 0x04 (CSR1) */
497 + volatile unsigned int rcv_poll; /* 0x08 (CSR2) */
498 + volatile unsigned int rcv_base; /* 0x0c (CSR3) */
499 + volatile unsigned int xmt_base; /* 0x10 (CSR4) */
500 + volatile unsigned int status; /* 0x14 (CSR5) */
501 + volatile unsigned int control; /* 0x18 (CSR6) */
502 + volatile unsigned int intr_ena; /* 0x1c (CSR7) */
503 + volatile unsigned int rcv_missed; /* 0x20 (CSR8) */
504 + volatile unsigned int reserved[11]; /* 0x24-0x4c (CSR9-19) */
505 + volatile unsigned int cur_tx_buf_addr; /* 0x50 (CSR20) */
506 + volatile unsigned int cur_rx_buf_addr; /* 0x50 (CSR21) */
507 +} DMA;
508 +
509 +#define dma (*((volatile DMA *) DMA_BASE))
510 +
511 +// macro to convert from virtual to physical address
512 +#define phys_addr(x) (x & 0x1fffffff)
513 +
514 +#endif /* PLATFORM_H */
515 diff -urN linux.old/drivers/net/ar2313.c linux.net/drivers/net/ar2313.c
516 --- linux.old/drivers/net/ar2313.c 1970-01-01 01:00:00.000000000 +0100
517 +++ linux.net/drivers/net/ar2313.c 2006-01-30 01:21:56.822933750 +0100
518 @@ -0,0 +1,1642 @@
519 +/*
520 + * ar2313.c: Linux driver for the Atheros AR2313 Ethernet device.
521 + *
522 + * Copyright 2004 by Sameer Dekate, <sdekate@arubanetworks.com>.
523 + *
524 + * Thanks to Atheros for providing hardware and documentation
525 + * enabling me to write this driver.
526 + *
527 + * This program is free software; you can redistribute it and/or modify
528 + * it under the terms of the GNU General Public License as published by
529 + * the Free Software Foundation; either version 2 of the License, or
530 + * (at your option) any later version.
531 + *
532 + * Additional credits:
533 + * This code is taken from John Taylor's Sibyte driver and then
534 + * modified for the AR2313.
535 + */
536 +
537 +#include <linux/config.h>
538 +#include <linux/module.h>
539 +#include <linux/version.h>
540 +#include <linux/types.h>
541 +#include <linux/errno.h>
542 +#include <linux/ioport.h>
543 +#include <linux/pci.h>
544 +#include <linux/netdevice.h>
545 +#include <linux/etherdevice.h>
546 +#include <linux/skbuff.h>
547 +#include <linux/init.h>
548 +#include <linux/delay.h>
549 +#include <linux/mm.h>
550 +#include <linux/highmem.h>
551 +#include <linux/sockios.h>
552 +#include <linux/pkt_sched.h>
553 +#include <linux/compile.h>
554 +#include <linux/mii.h>
555 +#include <linux/ethtool.h>
556 +#include <linux/ctype.h>
557 +
558 +#include <net/sock.h>
559 +#include <net/ip.h>
560 +
561 +#include <asm/system.h>
562 +#include <asm/io.h>
563 +#include <asm/irq.h>
564 +#include <asm/byteorder.h>
565 +#include <asm/uaccess.h>
566 +#include <asm/bootinfo.h>
567 +
568 +extern char *getenv(char *e);
569 +
570 +
571 +#undef INDEX_DEBUG
572 +#define DEBUG 0
573 +#define DEBUG_TX 0
574 +#define DEBUG_RX 0
575 +#define DEBUG_INT 0
576 +#define DEBUG_MC 0
577 +#define DEBUG_ERR 1
578 +
579 +#ifndef __exit
580 +#define __exit
581 +#endif
582 +
583 +#ifndef min
584 +#define min(a,b) (((a)<(b))?(a):(b))
585 +#endif
586 +
587 +#ifndef SMP_CACHE_BYTES
588 +#define SMP_CACHE_BYTES L1_CACHE_BYTES
589 +#endif
590 +
591 +#ifndef SET_MODULE_OWNER
592 +#define SET_MODULE_OWNER(dev) {do{} while(0);}
593 +#define AR2313_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
594 +#define AR2313_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
595 +#else
596 +#define AR2313_MOD_INC_USE_COUNT {do{} while(0);}
597 +#define AR2313_MOD_DEC_USE_COUNT {do{} while(0);}
598 +#endif
599 +
600 +#define PHYSADDR(a) ((_ACAST32_ (a)) & 0x1fffffff)
601 +
602 +MODULE_PARM(ethaddr, "s");
603 +static char *ethaddr = "00:00:00:00:00:00";
604 +MODULE_PARM(ifname, "s");
605 +static char *ifname = "bond" ;
606 +
607 +#define AR2313_MBOX_SET_BIT 0x8
608 +
609 +#define BOARD_IDX_STATIC 0
610 +#define BOARD_IDX_OVERFLOW -1
611 +
612 +/* margot includes */
613 +#include <asm/idt-boards/rc32434/rc32434.h>
614 +
615 +#include "ar2313/ar2313_msg.h"
616 +#include "ar2313/platform.h"
617 +#include "ar2313/dma.h"
618 +#include "ar2313/ar2313.h"
619 +
620 +/*
621 + * New interrupt handler strategy:
622 + *
623 + * An old interrupt handler worked using the traditional method of
624 + * replacing an skbuff with a new one when a packet arrives. However
625 + * the rx rings do not need to contain a static number of buffer
626 + * descriptors, thus it makes sense to move the memory allocation out
627 + * of the main interrupt handler and do it in a bottom half handler
628 + * and only allocate new buffers when the number of buffers in the
629 + * ring is below a certain threshold. In order to avoid starving the
630 + * NIC under heavy load it is however necessary to force allocation
631 + * when hitting a minimum threshold. The strategy for alloction is as
632 + * follows:
633 + *
634 + * RX_LOW_BUF_THRES - allocate buffers in the bottom half
635 + * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
636 + * the buffers in the interrupt handler
637 + * RX_RING_THRES - maximum number of buffers in the rx ring
638 + *
639 + * One advantagous side effect of this allocation approach is that the
640 + * entire rx processing can be done without holding any spin lock
641 + * since the rx rings and registers are totally independent of the tx
642 + * ring and its registers. This of course includes the kmalloc's of
643 + * new skb's. Thus start_xmit can run in parallel with rx processing
644 + * and the memory allocation on SMP systems.
645 + *
646 + * Note that running the skb reallocation in a bottom half opens up
647 + * another can of races which needs to be handled properly. In
648 + * particular it can happen that the interrupt handler tries to run
649 + * the reallocation while the bottom half is either running on another
650 + * CPU or was interrupted on the same CPU. To get around this the
651 + * driver uses bitops to prevent the reallocation routines from being
652 + * reentered.
653 + *
654 + * TX handling can also be done without holding any spin lock, wheee
655 + * this is fun! since tx_csm is only written to by the interrupt
656 + * handler.
657 + */
658 +
659 +/*
660 + * Threshold values for RX buffer allocation - the low water marks for
661 + * when to start refilling the rings are set to 75% of the ring
662 + * sizes. It seems to make sense to refill the rings entirely from the
663 + * intrrupt handler once it gets below the panic threshold, that way
664 + * we don't risk that the refilling is moved to another CPU when the
665 + * one running the interrupt handler just got the slab code hot in its
666 + * cache.
667 + */
668 +#define RX_RING_SIZE AR2313_DESCR_ENTRIES
669 +#define RX_PANIC_THRES (RX_RING_SIZE/4)
670 +#define RX_LOW_THRES ((3*RX_RING_SIZE)/4)
671 +#define CRC_LEN 4
672 +#define RX_OFFSET 2
673 +
674 +#define AR2313_BUFSIZE (AR2313_MTU + ETH_HLEN + CRC_LEN + RX_OFFSET)
675 +
676 +#ifdef MODULE
677 +MODULE_AUTHOR("Sameer Dekate<sdekate@arubanetworks.com>");
678 +MODULE_DESCRIPTION("AR2313 Ethernet driver");
679 +#endif
680 +
681 +#if DEBUG
682 +static char version[] __initdata =
683 + "ar2313.c: v0.01 2004/01/06 sdekate@arubanetworks.com\n";
684 +#endif /* DEBUG */
685 +
686 +#define virt_to_phys(x) ((u32)(x) & 0x1fffffff)
687 +
688 +// prototypes
689 +static short armiiread(short phy, short reg);
690 +static void armiiwrite(short phy, short reg, short data);
691 +#ifdef TX_TIMEOUT
692 +static void ar2313_tx_timeout(struct net_device *dev);
693 +#endif
694 +static void ar2313_halt(struct net_device *dev);
695 +static void rx_tasklet_func(unsigned long data);
696 +static void ar2313_multicast_list(struct net_device *dev);
697 +
698 +static struct net_device *root_dev;
699 +static int probed __initdata = 0;
700 +static unsigned long ar_eth_base;
701 +static unsigned long ar_dma_base;
702 +static unsigned long ar_int_base;
703 +static unsigned long ar_int_mac_mask;
704 +static unsigned long ar_int_phy_mask;
705 +
706 +#ifndef ERR
707 +#define ERR(fmt, args...) printk("%s: " fmt, __func__, ##args)
708 +#endif
709 +
710 +static int parse_mac_addr(struct net_device *dev, char* macstr){
711 + int i, j;
712 + unsigned char result, value;
713 +
714 + for (i=0; i<6; i++) {
715 + result = 0;
716 + if (i != 5 && *(macstr+2) != ':') {
717 + ERR("invalid mac address format: %d %c\n",
718 + i, *(macstr+2));
719 + return -EINVAL;
720 + }
721 + for (j=0; j<2; j++) {
722 + if (isxdigit(*macstr) && (value = isdigit(*macstr) ? *macstr-'0' :
723 + toupper(*macstr)-'A'+10) < 16)
724 + {
725 + result = result*16 + value;
726 + macstr++;
727 + }
728 + else {
729 + ERR("invalid mac address "
730 + "character: %c\n", *macstr);
731 + return -EINVAL;
732 + }
733 + }
734 +
735 + macstr++;
736 + dev->dev_addr[i] = result;
737 + }
738 +
739 + return 0;
740 +}
741 +
742 +
743 +int __init ar2313_probe(void)
744 +{
745 + struct net_device *dev;
746 + struct ar2313_private *sp;
747 + int version_disp;
748 + char name[64] ;
749 +
750 + if (probed)
751 + return -ENODEV;
752 + probed++;
753 +
754 + version_disp = 0;
755 + sprintf(name, "%s%%d", ifname) ;
756 + dev = alloc_etherdev(sizeof(struct ar2313_private));
757 +
758 + if (dev == NULL) {
759 + printk(KERN_ERR "ar2313: Unable to allocate net_device structure!\n");
760 + return -ENOMEM;
761 + }
762 +
763 + SET_MODULE_OWNER(dev);
764 +
765 + sp = dev->priv;
766 +
767 + sp->link = 0;
768 + switch (mips_machtype) {
769 + case MACH_ARUBA_AP60:
770 + ar_eth_base = 0xb8100000;
771 + ar_dma_base = ar_eth_base + 0x1000;
772 + ar_int_base = 0x1C003020;
773 + ar_int_mac_mask = RESET_ENET0|RESET_ENET1;
774 + ar_int_phy_mask = RESET_EPHY0|RESET_EPHY1;
775 + sp->mac = 1;
776 + sp->phy = 1;
777 + dev->irq = 4;
778 + break;
779 +
780 + case MACH_ARUBA_AP40:
781 + ar_eth_base = 0xb0500000;
782 + ar_dma_base = ar_eth_base + 0x1000;
783 + ar_int_base = 0x11000004;
784 + ar_int_mac_mask = 0x800;
785 + ar_int_phy_mask = 0x400;
786 + sp->mac = 0;
787 + sp->phy = 1;
788 + dev->irq = 4;
789 + break;
790 +
791 + case MACH_ARUBA_AP65:
792 + ar_eth_base = 0xb8100000;
793 + ar_dma_base = ar_eth_base + 0x1000;
794 + ar_int_base = 0x1C003020;
795 + ar_int_mac_mask = RESET_ENET0|RESET_ENET1;
796 + ar_int_phy_mask = RESET_EPHY0|RESET_EPHY1;
797 + sp->mac = 0;
798 +#if 0
799 + // commented out, for now
800 +
801 + if (mips_machtype == MACH_ARUBA_SAMSUNG) {
802 + sp->phy = 0x1f;
803 + } else {
804 + sp->phy = 1;
805 + }
806 +#else
807 + sp->phy = 1;
808 +#endif
809 + dev->irq = 3;
810 + break;
811 +
812 + default:
813 + printk("%s: unsupported mips_machtype=0x%lx\n",
814 + __FUNCTION__, mips_machtype) ;
815 + return -ENODEV;
816 + }
817 +
818 + spin_lock_init(&sp->lock);
819 +
820 + /* initialize func pointers */
821 + dev->open = &ar2313_open;
822 + dev->stop = &ar2313_close;
823 + dev->hard_start_xmit = &ar2313_start_xmit;
824 +
825 + dev->get_stats = &ar2313_get_stats;
826 + dev->set_multicast_list = &ar2313_multicast_list;
827 +#ifdef TX_TIMEOUT
828 + dev->tx_timeout = ar2313_tx_timeout;
829 + dev->watchdog_timeo = AR2313_TX_TIMEOUT;
830 +#endif
831 + dev->do_ioctl = &ar2313_ioctl;
832 +
833 + // SAMEER: do we need this?
834 + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA;
835 +
836 + tasklet_init(&sp->rx_tasklet, rx_tasklet_func, (unsigned long) dev);
837 + tasklet_disable(&sp->rx_tasklet);
838 +
839 + /* display version info if adapter is found */
840 + if (!version_disp) {
841 + /* set display flag to TRUE so that */
842 + /* we only display this string ONCE */
843 + version_disp = 1;
844 +#if DEBUG
845 + printk(version);
846 +#endif /* DEBUG */
847 + }
848 +
849 + request_region(PHYSADDR(ETHERNET_BASE), ETHERNET_SIZE*ETHERNET_MACS,
850 + "AR2313ENET");
851 +
852 + sp->eth_regs = ioremap_nocache(PHYSADDR(ETHERNET_BASE + ETHERNET_SIZE*sp->mac),
853 + sizeof(*sp->eth_regs));
854 + if (!sp->eth_regs) {
855 + printk("Can't remap eth registers\n");
856 + return(-ENXIO);
857 + }
858 +
859 + sp->dma_regs = ioremap_nocache(PHYSADDR(DMA_BASE + DMA_SIZE*sp->mac),
860 + sizeof(*sp->dma_regs));
861 + dev->base_addr = (unsigned int) sp->dma_regs;
862 + if (!sp->dma_regs) {
863 + printk("Can't remap DMA registers\n");
864 + return(-ENXIO);
865 + }
866 +
867 + sp->int_regs = ioremap_nocache(PHYSADDR(INTERRUPT_BASE),
868 + sizeof(*sp->int_regs));
869 + if (!sp->int_regs) {
870 + printk("Can't remap INTERRUPT registers\n");
871 + return(-ENXIO);
872 + }
873 +
874 + strncpy(sp->name, "Atheros AR2313", sizeof (sp->name) - 1);
875 + sp->name [sizeof (sp->name) - 1] = '\0';
876 +
877 + {
878 + char mac[32];
879 + extern char *getenv(char *e);
880 + unsigned char def_mac[6] = {0, 0x0b, 0x86, 0xba, 0xdb, 0xad};
881 + memset(mac, 0, 32);
882 + memcpy(mac, getenv("ethaddr"), 17);
883 + if (parse_mac_addr(dev, mac)){
884 + printk("%s: MAC address not found, using default\n", __func__);
885 + memcpy(dev->dev_addr, def_mac, 6);
886 + }
887 + }
888 +
889 + sp->board_idx = BOARD_IDX_STATIC;
890 +
891 + if (ar2313_init(dev)) {
892 + /*
893 + * ar2313_init() calls ar2313_init_cleanup() on error.
894 + */
895 + kfree(dev);
896 + return -ENODEV;
897 + }
898 +
899 + if (register_netdev(dev)){
900 + printk("%s: register_netdev failed\n", __func__);
901 + return -1;
902 + }
903 +
904 + printk("%s: %s: %02x:%02x:%02x:%02x:%02x:%02x, irq %d\n",
905 + dev->name, sp->name,
906 + dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
907 + dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5],
908 + dev->irq);
909 +
910 + /* start link poll timer */
911 + ar2313_setup_timer(dev);
912 +
913 + /*
914 + * Register the device
915 + */
916 + root_dev = dev;
917 +
918 + return 0;
919 +}
920 +
921 +#if 0
922 +static void ar2313_dump_regs(struct net_device *dev)
923 +{
924 + unsigned int *ptr, i;
925 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
926 +
927 + ptr = (unsigned int *)sp->eth_regs;
928 + for(i=0; i< (sizeof(ETHERNET_STRUCT)/ sizeof(unsigned int)); i++, ptr++) {
929 + printk("ENET: %08x = %08x\n", (int)ptr, *ptr);
930 + }
931 +
932 + ptr = (unsigned int *)sp->dma_regs;
933 + for(i=0; i< (sizeof(DMA)/ sizeof(unsigned int)); i++, ptr++) {
934 + printk("DMA: %08x = %08x\n", (int)ptr, *ptr);
935 + }
936 +
937 + ptr = (unsigned int *)sp->int_regs;
938 + for(i=0; i< (sizeof(INTERRUPT)/ sizeof(unsigned int)); i++, ptr++){
939 + printk("INT: %08x = %08x\n", (int)ptr, *ptr);
940 + }
941 +
942 + for (i = 0; i < AR2313_DESCR_ENTRIES; i++) {
943 + ar2313_descr_t *td = &sp->tx_ring[i];
944 + printk("Tx desc %2d: %08x %08x %08x %08x\n", i,
945 + td->status, td->devcs, td->addr, td->descr);
946 + }
947 +}
948 +#endif
949 +
950 +#ifdef TX_TIMEOUT
951 +static void
952 +ar2313_tx_timeout(struct net_device *dev)
953 +{
954 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
955 + unsigned long flags;
956 +
957 +#if DEBUG_TX
958 + printk("Tx timeout\n");
959 +#endif
960 + spin_lock_irqsave(&sp->lock, flags);
961 + ar2313_restart(dev);
962 + spin_unlock_irqrestore(&sp->lock, flags);
963 +}
964 +#endif
965 +
966 +#if DEBUG_MC
967 +static void
968 +printMcList(struct net_device *dev)
969 +{
970 + struct dev_mc_list *list = dev->mc_list;
971 + int num=0, i;
972 + while(list){
973 + printk("%d MC ADDR ", num);
974 + for(i=0;i<list->dmi_addrlen;i++) {
975 + printk(":%02x", list->dmi_addr[i]);
976 + }
977 + list = list->next;
978 + printk("\n");
979 + }
980 +}
981 +#endif
982 +
983 +/*
984 + * Set or clear the multicast filter for this adaptor.
985 + * THIS IS ABSOLUTE CRAP, disabled
986 + */
987 +static void
988 +ar2313_multicast_list(struct net_device *dev)
989 +{
990 + /*
991 + * Always listen to broadcasts and
992 + * treat IFF bits independently
993 + */
994 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
995 + unsigned int recognise;
996 +
997 + recognise = sp->eth_regs->mac_control;
998 +
999 + if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
1000 + recognise |= MAC_CONTROL_PR;
1001 + } else {
1002 + recognise &= ~MAC_CONTROL_PR;
1003 + }
1004 +
1005 + if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
1006 +#if DEBUG_MC
1007 + printMcList(dev);
1008 + printk("%s: all MULTICAST mc_count %d\n", __FUNCTION__, dev->mc_count);
1009 +#endif
1010 + recognise |= MAC_CONTROL_PM;/* all multicast */
1011 + } else if (dev->mc_count > 0) {
1012 +#if DEBUG_MC
1013 + printMcList(dev);
1014 + printk("%s: mc_count %d\n", __FUNCTION__, dev->mc_count);
1015 +#endif
1016 + recognise |= MAC_CONTROL_PM; /* for the time being */
1017 + }
1018 +#if DEBUG_MC
1019 + printk("%s: setting %08x to %08x\n", __FUNCTION__, (int)sp->eth_regs, recognise);
1020 +#endif
1021 +
1022 + sp->eth_regs->mac_control = recognise;
1023 +}
1024 +
1025 +static void rx_tasklet_cleanup(struct net_device *dev)
1026 +{
1027 + struct ar2313_private *sp = dev->priv;
1028 +
1029 + /*
1030 + * Tasklet may be scheduled. Need to get it removed from the list
1031 + * since we're about to free the struct.
1032 + */
1033 +
1034 + sp->unloading = 1;
1035 + tasklet_enable(&sp->rx_tasklet);
1036 + tasklet_kill(&sp->rx_tasklet);
1037 +}
1038 +
1039 +static void __exit ar2313_module_cleanup(void)
1040 +{
1041 + rx_tasklet_cleanup(root_dev);
1042 + ar2313_init_cleanup(root_dev);
1043 + unregister_netdev(root_dev);
1044 + kfree(root_dev);
1045 + release_region(PHYSADDR(ETHERNET_BASE), ETHERNET_SIZE*ETHERNET_MACS);
1046 +}
1047 +
1048 +
1049 +/*
1050 + * Restart the AR2313 ethernet controller.
1051 + */
1052 +static int ar2313_restart(struct net_device *dev)
1053 +{
1054 + /* disable interrupts */
1055 + disable_irq(dev->irq);
1056 +
1057 + /* stop mac */
1058 + ar2313_halt(dev);
1059 +
1060 + /* initialize */
1061 + ar2313_init(dev);
1062 +
1063 + /* enable interrupts */
1064 + enable_irq(dev->irq);
1065 +
1066 + return 0;
1067 +}
1068 +
1069 +extern unsigned long mips_machtype;
1070 +
1071 +int __init ar2313_module_init(void)
1072 +{
1073 + int status=-1;
1074 + switch (mips_machtype){
1075 + case MACH_ARUBA_AP60:
1076 + case MACH_ARUBA_AP65:
1077 + case MACH_ARUBA_AP40:
1078 + root_dev = NULL;
1079 + status = ar2313_probe();
1080 + break;
1081 + }
1082 + return status;
1083 +}
1084 +
1085 +
1086 +module_init(ar2313_module_init);
1087 +module_exit(ar2313_module_cleanup);
1088 +
1089 +
1090 +static void ar2313_free_descriptors(struct net_device *dev)
1091 +{
1092 + struct ar2313_private *sp = dev->priv;
1093 + if (sp->rx_ring != NULL) {
1094 + kfree((void*)KSEG0ADDR(sp->rx_ring));
1095 + sp->rx_ring = NULL;
1096 + sp->tx_ring = NULL;
1097 + }
1098 +}
1099 +
1100 +
1101 +static int ar2313_allocate_descriptors(struct net_device *dev)
1102 +{
1103 + struct ar2313_private *sp = dev->priv;
1104 + int size;
1105 + int j;
1106 + ar2313_descr_t *space;
1107 +
1108 + if(sp->rx_ring != NULL){
1109 + printk("%s: already done.\n", __FUNCTION__);
1110 + return 0;
1111 + }
1112 +
1113 + size = (sizeof(ar2313_descr_t) * (AR2313_DESCR_ENTRIES * AR2313_QUEUES));
1114 + space = kmalloc(size, GFP_KERNEL);
1115 + if (space == NULL)
1116 + return 1;
1117 +
1118 + /* invalidate caches */
1119 + dma_cache_inv((unsigned int)space, size);
1120 +
1121 + /* now convert pointer to KSEG1 */
1122 + space = (ar2313_descr_t *)KSEG1ADDR(space);
1123 +
1124 + memset((void *)space, 0, size);
1125 +
1126 + sp->rx_ring = space;
1127 + space += AR2313_DESCR_ENTRIES;
1128 +
1129 + sp->tx_ring = space;
1130 + space += AR2313_DESCR_ENTRIES;
1131 +
1132 + /* Initialize the transmit Descriptors */
1133 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
1134 + ar2313_descr_t *td = &sp->tx_ring[j];
1135 + td->status = 0;
1136 + td->devcs = DMA_TX1_CHAINED;
1137 + td->addr = 0;
1138 + td->descr = K1_TO_PHYS(&sp->tx_ring[(j+1) & (AR2313_DESCR_ENTRIES-1)]);
1139 + }
1140 +
1141 + return 0;
1142 +}
1143 +
1144 +
1145 +/*
1146 + * Generic cleanup handling data allocated during init. Used when the
1147 + * module is unloaded or if an error occurs during initialization
1148 + */
1149 +static void ar2313_init_cleanup(struct net_device *dev)
1150 +{
1151 + struct ar2313_private *sp = dev->priv;
1152 + struct sk_buff *skb;
1153 + int j;
1154 +
1155 + ar2313_free_descriptors(dev);
1156 +
1157 + if (sp->eth_regs) iounmap((void*)sp->eth_regs);
1158 + if (sp->dma_regs) iounmap((void*)sp->dma_regs);
1159 +
1160 + if (sp->rx_skb) {
1161 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
1162 + skb = sp->rx_skb[j];
1163 + if (skb) {
1164 + sp->rx_skb[j] = NULL;
1165 + dev_kfree_skb(skb);
1166 + }
1167 + }
1168 + kfree(sp->rx_skb);
1169 + sp->rx_skb = NULL;
1170 + }
1171 +
1172 + if (sp->tx_skb) {
1173 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
1174 + skb = sp->tx_skb[j];
1175 + if (skb) {
1176 + sp->tx_skb[j] = NULL;
1177 + dev_kfree_skb(skb);
1178 + }
1179 + }
1180 + kfree(sp->tx_skb);
1181 + sp->tx_skb = NULL;
1182 + }
1183 +}
1184 +
1185 +static int ar2313_setup_timer(struct net_device *dev)
1186 +{
1187 + struct ar2313_private *sp = dev->priv;
1188 +
1189 + init_timer(&sp->link_timer);
1190 +
1191 + sp->link_timer.function = ar2313_link_timer_fn;
1192 + sp->link_timer.data = (int) dev;
1193 + sp->link_timer.expires = jiffies + HZ;
1194 +
1195 + add_timer(&sp->link_timer);
1196 + return 0;
1197 +
1198 +}
1199 +
1200 +static void ar2313_link_timer_fn(unsigned long data)
1201 +{
1202 + struct net_device *dev = (struct net_device *) data;
1203 + struct ar2313_private *sp = dev->priv;
1204 +
1205 + // see if the link status changed
1206 + // This was needed to make sure we set the PHY to the
1207 + // autonegotiated value of half or full duplex.
1208 + ar2313_check_link(dev);
1209 +
1210 + // Loop faster when we don't have link.
1211 + // This was needed to speed up the AP bootstrap time.
1212 + if(sp->link == 0) {
1213 + mod_timer(&sp->link_timer, jiffies + HZ/2);
1214 + } else {
1215 + mod_timer(&sp->link_timer, jiffies + LINK_TIMER);
1216 + }
1217 +}
1218 +
1219 +static void ar2313_check_link(struct net_device *dev)
1220 +{
1221 + struct ar2313_private *sp = dev->priv;
1222 + u16 phyData;
1223 +
1224 + phyData = armiiread(sp->phy, MII_BMSR);
1225 + if (sp->phyData != phyData) {
1226 + if (phyData & BMSR_LSTATUS) {
1227 + /* link is present, ready link partner ability to deterine duplexity */
1228 + int duplex = 0;
1229 + u16 reg;
1230 +
1231 + sp->link = 1;
1232 + reg = armiiread(sp->phy, MII_BMCR);
1233 + if (reg & BMCR_ANENABLE) {
1234 + /* auto neg enabled */
1235 + reg = armiiread(sp->phy, MII_LPA);
1236 + duplex = (reg & (LPA_100FULL|LPA_10FULL))? 1:0;
1237 + } else {
1238 + /* no auto neg, just read duplex config */
1239 + duplex = (reg & BMCR_FULLDPLX)? 1:0;
1240 + }
1241 +
1242 + printk(KERN_INFO "%s: Configuring MAC for %s duplex\n", dev->name,
1243 + (duplex)? "full":"half");
1244 +
1245 + if (duplex) {
1246 + /* full duplex */
1247 + sp->eth_regs->mac_control = ((sp->eth_regs->mac_control | MAC_CONTROL_F) &
1248 + ~MAC_CONTROL_DRO);
1249 + } else {
1250 + /* half duplex */
1251 + sp->eth_regs->mac_control = ((sp->eth_regs->mac_control | MAC_CONTROL_DRO) &
1252 + ~MAC_CONTROL_F);
1253 + }
1254 + } else {
1255 + /* no link */
1256 + sp->link = 0;
1257 + }
1258 + sp->phyData = phyData;
1259 + }
1260 +}
1261 +
1262 +static int
1263 +ar2313_reset_reg(struct net_device *dev)
1264 +{
1265 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
1266 + unsigned int ethsal, ethsah;
1267 + unsigned int flags;
1268 +
1269 + *sp->int_regs |= ar_int_mac_mask;
1270 + mdelay(10);
1271 + *sp->int_regs &= ~ar_int_mac_mask;
1272 + mdelay(10);
1273 + *sp->int_regs |= ar_int_phy_mask;
1274 + mdelay(10);
1275 + *sp->int_regs &= ~ar_int_phy_mask;
1276 + mdelay(10);
1277 +
1278 + sp->dma_regs->bus_mode = (DMA_BUS_MODE_SWR);
1279 + mdelay(10);
1280 + sp->dma_regs->bus_mode = ((32 << DMA_BUS_MODE_PBL_SHIFT) | DMA_BUS_MODE_BLE);
1281 +
1282 + /* enable interrupts */
1283 + sp->dma_regs->intr_ena = (DMA_STATUS_AIS |
1284 + DMA_STATUS_NIS |
1285 + DMA_STATUS_RI |
1286 + DMA_STATUS_TI |
1287 + DMA_STATUS_FBE);
1288 + sp->dma_regs->xmt_base = K1_TO_PHYS(sp->tx_ring);
1289 + sp->dma_regs->rcv_base = K1_TO_PHYS(sp->rx_ring);
1290 + sp->dma_regs->control = (DMA_CONTROL_SR | DMA_CONTROL_ST | DMA_CONTROL_SF);
1291 +
1292 + sp->eth_regs->flow_control = (FLOW_CONTROL_FCE);
1293 + sp->eth_regs->vlan_tag = (0x8100);
1294 +
1295 + /* Enable Ethernet Interface */
1296 + flags = (MAC_CONTROL_TE | /* transmit enable */
1297 + MAC_CONTROL_PM | /* pass mcast */
1298 + MAC_CONTROL_F | /* full duplex */
1299 + MAC_CONTROL_HBD); /* heart beat disabled */
1300 +
1301 + if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
1302 + flags |= MAC_CONTROL_PR;
1303 + }
1304 + sp->eth_regs->mac_control = flags;
1305 +
1306 + /* Set all Ethernet station address registers to their initial values */
1307 + ethsah = ((((u_int)(dev->dev_addr[5]) << 8) & (u_int)0x0000FF00) |
1308 + (((u_int)(dev->dev_addr[4]) << 0) & (u_int)0x000000FF));
1309 +
1310 + ethsal = ((((u_int)(dev->dev_addr[3]) << 24) & (u_int)0xFF000000) |
1311 + (((u_int)(dev->dev_addr[2]) << 16) & (u_int)0x00FF0000) |
1312 + (((u_int)(dev->dev_addr[1]) << 8) & (u_int)0x0000FF00) |
1313 + (((u_int)(dev->dev_addr[0]) << 0) & (u_int)0x000000FF) );
1314 +
1315 + sp->eth_regs->mac_addr[0] = ethsah;
1316 + sp->eth_regs->mac_addr[1] = ethsal;
1317 +
1318 + mdelay(10);
1319 +
1320 + return(0);
1321 +}
1322 +
1323 +
1324 +static int ar2313_init(struct net_device *dev)
1325 +{
1326 + struct ar2313_private *sp = dev->priv;
1327 + int ecode=0;
1328 +
1329 + /*
1330 + * Allocate descriptors
1331 + */
1332 + if (ar2313_allocate_descriptors(dev)) {
1333 + printk("%s: %s: ar2313_allocate_descriptors failed\n",
1334 + dev->name, __FUNCTION__);
1335 + ecode = -EAGAIN;
1336 + goto init_error;
1337 + }
1338 +
1339 + /*
1340 + * Get the memory for the skb rings.
1341 + */
1342 + if(sp->rx_skb == NULL) {
1343 + sp->rx_skb = kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES, GFP_KERNEL);
1344 + if (!(sp->rx_skb)) {
1345 + printk("%s: %s: rx_skb kmalloc failed\n",
1346 + dev->name, __FUNCTION__);
1347 + ecode = -EAGAIN;
1348 + goto init_error;
1349 + }
1350 + }
1351 + memset(sp->rx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
1352 +
1353 + if(sp->tx_skb == NULL) {
1354 + sp->tx_skb = kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES, GFP_KERNEL);
1355 + if (!(sp->tx_skb)) {
1356 + printk("%s: %s: tx_skb kmalloc failed\n",
1357 + dev->name, __FUNCTION__);
1358 + ecode = -EAGAIN;
1359 + goto init_error;
1360 + }
1361 + }
1362 + memset(sp->tx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
1363 +
1364 + /*
1365 + * Set tx_csm before we start receiving interrupts, otherwise
1366 + * the interrupt handler might think it is supposed to process
1367 + * tx ints before we are up and running, which may cause a null
1368 + * pointer access in the int handler.
1369 + */
1370 + sp->rx_skbprd = 0;
1371 + sp->cur_rx = 0;
1372 + sp->tx_prd = 0;
1373 + sp->tx_csm = 0;
1374 +
1375 + /*
1376 + * Zero the stats before starting the interface
1377 + */
1378 + memset(&sp->stats, 0, sizeof(sp->stats));
1379 +
1380 + /*
1381 + * We load the ring here as there seem to be no way to tell the
1382 + * firmware to wipe the ring without re-initializing it.
1383 + */
1384 + ar2313_load_rx_ring(dev, RX_RING_SIZE);
1385 +
1386 + /*
1387 + * Init hardware
1388 + */
1389 + ar2313_reset_reg(dev);
1390 +
1391 + /*
1392 + * Get the IRQ
1393 + */
1394 + ecode = request_irq(dev->irq, &ar2313_interrupt, SA_SHIRQ | SA_INTERRUPT, dev->name, dev);
1395 + if (ecode) {
1396 + printk(KERN_WARNING "%s: %s: Requested IRQ %d is busy\n",
1397 + dev->name, __FUNCTION__, dev->irq);
1398 + goto init_error;
1399 + }
1400 +
1401 +#if 0
1402 + // commented out, for now
1403 +
1404 + if(mips_machtype == MACH_ARUBA_SAMSUNG) {
1405 + int i;
1406 + /* configure Marvell 88E6060 */
1407 + /* reset chip */
1408 + armiiwrite(0x1f, 0xa, 0xa130);
1409 + do {
1410 + udelay(1000);
1411 + i = armiiread(sp->phy, 0xa);
1412 + } while (i & 0x8000);
1413 +
1414 + /* configure MAC address */
1415 + armiiwrite(sp->phy, 0x1, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1416 + armiiwrite(sp->phy, 0x2, dev->dev_addr[2] << 8 | dev->dev_addr[3]);
1417 + armiiwrite(sp->phy, 0x3, dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1418 +
1419 + /* set ports to forwarding */
1420 + armiiwrite(0x18, 0x4, 0x3);
1421 + armiiwrite(0x1c, 0x4, 0x3);
1422 + armiiwrite(0x1d, 0x4, 0x3);
1423 + }
1424 +#endif
1425 +
1426 + tasklet_enable(&sp->rx_tasklet);
1427 +
1428 + return 0;
1429 +
1430 + init_error:
1431 + ar2313_init_cleanup(dev);
1432 + return ecode;
1433 +}
1434 +
1435 +/*
1436 + * Load the rx ring.
1437 + *
1438 + * Loading rings is safe without holding the spin lock since this is
1439 + * done only before the device is enabled, thus no interrupts are
1440 + * generated and by the interrupt handler/tasklet handler.
1441 + */
1442 +static void ar2313_load_rx_ring(struct net_device *dev, int nr_bufs)
1443 +{
1444 +
1445 + struct ar2313_private *sp = ((struct net_device *)dev)->priv;
1446 + short i, idx;
1447 +
1448 + idx = sp->rx_skbprd;
1449 +
1450 + for (i = 0; i < nr_bufs; i++) {
1451 + struct sk_buff *skb;
1452 + ar2313_descr_t *rd;
1453 +
1454 + if (sp->rx_skb[idx]) {
1455 +#if DEBUG_RX
1456 + printk(KERN_INFO "ar2313 rx refill full\n");
1457 +#endif /* DEBUG */
1458 + break;
1459 + }
1460 +
1461 + // partha: create additional room for the second GRE fragment
1462 + skb = alloc_skb(AR2313_BUFSIZE+128, GFP_ATOMIC);
1463 + if (!skb) {
1464 + printk("\n\n\n\n %s: No memory in system\n\n\n\n", __FUNCTION__);
1465 + break;
1466 + }
1467 + // partha: create additional room in the front for tx pkt capture
1468 + skb_reserve(skb, 32);
1469 +
1470 + /*
1471 + * Make sure IP header starts on a fresh cache line.
1472 + */
1473 + skb->dev = dev;
1474 + skb_reserve(skb, RX_OFFSET);
1475 + sp->rx_skb[idx] = skb;
1476 +
1477 + rd = (ar2313_descr_t *) &sp->rx_ring[idx];
1478 +
1479 + /* initialize dma descriptor */
1480 + rd->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
1481 + DMA_RX1_CHAINED);
1482 + rd->addr = virt_to_phys(skb->data);
1483 + rd->descr = virt_to_phys(&sp->rx_ring[(idx+1) & (AR2313_DESCR_ENTRIES-1)]);
1484 + rd->status = DMA_RX_OWN;
1485 +
1486 + idx = DSC_NEXT(idx);
1487 + }
1488 +
1489 + if (!i) {
1490 +#if DEBUG_ERR
1491 + printk(KERN_INFO "Out of memory when allocating standard receive buffers\n");
1492 +#endif /* DEBUG */
1493 + } else {
1494 + sp->rx_skbprd = idx;
1495 + }
1496 +
1497 + return;
1498 +}
1499 +
1500 +#define AR2313_MAX_PKTS_PER_CALL 64
1501 +
1502 +static int ar2313_rx_int(struct net_device *dev)
1503 +{
1504 + struct ar2313_private *sp = dev->priv;
1505 + struct sk_buff *skb, *skb_new;
1506 + ar2313_descr_t *rxdesc;
1507 + unsigned int status;
1508 + u32 idx;
1509 + int pkts = 0;
1510 + int rval;
1511 +
1512 + idx = sp->cur_rx;
1513 +
1514 + /* process at most the entire ring and then wait for another interrupt */
1515 + while(1) {
1516 +
1517 + rxdesc = &sp->rx_ring[idx];
1518 + status = rxdesc->status;
1519 + if (status & DMA_RX_OWN) {
1520 + /* SiByte owns descriptor or descr not yet filled in */
1521 + rval = 0;
1522 + break;
1523 + }
1524 +
1525 + if (++pkts > AR2313_MAX_PKTS_PER_CALL) {
1526 + rval = 1;
1527 + break;
1528 + }
1529 +
1530 +#if DEBUG_RX
1531 + printk("index %d\n", idx);
1532 + printk("RX status %08x\n", rxdesc->status);
1533 + printk("RX devcs %08x\n", rxdesc->devcs );
1534 + printk("RX addr %08x\n", rxdesc->addr );
1535 + printk("RX descr %08x\n", rxdesc->descr );
1536 +#endif
1537 +
1538 + if ((status & (DMA_RX_ERROR|DMA_RX_ERR_LENGTH)) &&
1539 + (!(status & DMA_RX_LONG))){
1540 +#if DEBUG_RX
1541 + printk("%s: rx ERROR %08x\n", __FUNCTION__, status);
1542 +#endif
1543 + sp->stats.rx_errors++;
1544 + sp->stats.rx_dropped++;
1545 +
1546 + /* add statistics counters */
1547 + if (status & DMA_RX_ERR_CRC) sp->stats.rx_crc_errors++;
1548 + if (status & DMA_RX_ERR_COL) sp->stats.rx_over_errors++;
1549 + if (status & DMA_RX_ERR_LENGTH)
1550 + sp->stats.rx_length_errors++;
1551 + if (status & DMA_RX_ERR_RUNT) sp->stats.rx_over_errors++;
1552 + if (status & DMA_RX_ERR_DESC) sp->stats.rx_over_errors++;
1553 +
1554 + } else {
1555 + /* alloc new buffer. */
1556 + skb_new = dev_alloc_skb(AR2313_BUFSIZE + RX_OFFSET + 128);
1557 + if (skb_new != NULL) {
1558 +
1559 + skb = sp->rx_skb[idx];
1560 + /* set skb */
1561 + skb_put(skb, ((status >> DMA_RX_LEN_SHIFT) & 0x3fff) - CRC_LEN);
1562 +
1563 +#ifdef CONFIG_MERLOT
1564 + if ((dev->am_pkt_handler == NULL) ||
1565 + (dev->am_pkt_handler(skb, dev) == 0)) {
1566 +#endif
1567 + sp->stats.rx_bytes += skb->len;
1568 + skb->protocol = eth_type_trans(skb, dev);
1569 + /* pass the packet to upper layers */
1570 +
1571 +#ifdef CONFIG_MERLOT
1572 + if (dev->asap_netif_rx)
1573 + dev->asap_netif_rx(skb);
1574 + else
1575 +#endif
1576 + netif_rx(skb);
1577 +#ifdef CONFIG_MERLOT
1578 + }
1579 +#endif
1580 + skb_new->dev = dev;
1581 + /* 16 bit align */
1582 + skb_reserve(skb_new, RX_OFFSET+32);
1583 + /* reset descriptor's curr_addr */
1584 + rxdesc->addr = virt_to_phys(skb_new->data);
1585 +
1586 + sp->stats.rx_packets++;
1587 + sp->rx_skb[idx] = skb_new;
1588 +
1589 + } else {
1590 + sp->stats.rx_dropped++;
1591 + }
1592 + }
1593 +
1594 + rxdesc->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
1595 + DMA_RX1_CHAINED);
1596 + rxdesc->status = DMA_RX_OWN;
1597 +
1598 + idx = DSC_NEXT(idx);
1599 + }
1600 +
1601 + sp->cur_rx = idx;
1602 +
1603 + return rval;
1604 +}
1605 +
1606 +
1607 +static void ar2313_tx_int(struct net_device *dev)
1608 +{
1609 + struct ar2313_private *sp = dev->priv;
1610 + u32 idx;
1611 + struct sk_buff *skb;
1612 + ar2313_descr_t *txdesc;
1613 + unsigned int status=0;
1614 +
1615 + idx = sp->tx_csm;
1616 +
1617 + while (idx != sp->tx_prd) {
1618 +
1619 + txdesc = &sp->tx_ring[idx];
1620 +
1621 +#if DEBUG_TX
1622 + printk("%s: TXINT: csm=%d idx=%d prd=%d status=%x devcs=%x addr=%08x descr=%x\n",
1623 + dev->name, sp->tx_csm, idx, sp->tx_prd,
1624 + txdesc->status, txdesc->devcs, txdesc->addr, txdesc->descr);
1625 +#endif /* DEBUG */
1626 +
1627 + if ((status = txdesc->status) & DMA_TX_OWN) {
1628 + /* ar2313 dma still owns descr */
1629 + break;
1630 + }
1631 + /* done with this descriptor */
1632 + txdesc->status = 0;
1633 +
1634 + if (status & DMA_TX_ERROR){
1635 + sp->stats.tx_errors++;
1636 + sp->stats.tx_dropped++;
1637 + if(status & DMA_TX_ERR_UNDER)
1638 + sp->stats.tx_fifo_errors++;
1639 + if(status & DMA_TX_ERR_HB)
1640 + sp->stats.tx_heartbeat_errors++;
1641 + if(status & (DMA_TX_ERR_LOSS |
1642 + DMA_TX_ERR_LINK))
1643 + sp->stats.tx_carrier_errors++;
1644 + if (status & (DMA_TX_ERR_LATE|
1645 + DMA_TX_ERR_COL |
1646 + DMA_TX_ERR_JABBER |
1647 + DMA_TX_ERR_DEFER))
1648 + sp->stats.tx_aborted_errors++;
1649 + } else {
1650 + /* transmit OK */
1651 + sp->stats.tx_packets++;
1652 + }
1653 +
1654 + skb = sp->tx_skb[idx];
1655 + sp->tx_skb[idx] = NULL;
1656 + idx = DSC_NEXT(idx);
1657 + sp->stats.tx_bytes += skb->len;
1658 + dev_kfree_skb_irq(skb);
1659 + }
1660 +
1661 + sp->tx_csm = idx;
1662 +
1663 + return;
1664 +}
1665 +
1666 +
1667 +static void
1668 +rx_tasklet_func(unsigned long data)
1669 +{
1670 + struct net_device *dev = (struct net_device *) data;
1671 + struct ar2313_private *sp = dev->priv;
1672 +
1673 + if (sp->unloading) {
1674 + return;
1675 + }
1676 +
1677 + if (ar2313_rx_int(dev)) {
1678 + tasklet_hi_schedule(&sp->rx_tasklet);
1679 + }
1680 + else {
1681 + unsigned long flags;
1682 + spin_lock_irqsave(&sp->lock, flags);
1683 + sp->dma_regs->intr_ena |= DMA_STATUS_RI;
1684 + spin_unlock_irqrestore(&sp->lock, flags);
1685 + }
1686 +}
1687 +
1688 +static void
1689 +rx_schedule(struct net_device *dev)
1690 +{
1691 + struct ar2313_private *sp = dev->priv;
1692 +
1693 + sp->dma_regs->intr_ena &= ~DMA_STATUS_RI;
1694 +
1695 + tasklet_hi_schedule(&sp->rx_tasklet);
1696 +}
1697 +
1698 +static irqreturn_t ar2313_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
1699 +{
1700 + struct net_device *dev = (struct net_device *)dev_id;
1701 + struct ar2313_private *sp = dev->priv;
1702 + unsigned int status, enabled;
1703 +
1704 + /* clear interrupt */
1705 + /*
1706 + * Don't clear RI bit if currently disabled.
1707 + */
1708 + status = sp->dma_regs->status;
1709 + enabled = sp->dma_regs->intr_ena;
1710 + sp->dma_regs->status = status & enabled;
1711 +
1712 + if (status & DMA_STATUS_NIS) {
1713 + /* normal status */
1714 + /*
1715 + * Don't schedule rx processing if interrupt
1716 + * is already disabled.
1717 + */
1718 + if (status & enabled & DMA_STATUS_RI) {
1719 + /* receive interrupt */
1720 + rx_schedule(dev);
1721 + }
1722 + if (status & DMA_STATUS_TI) {
1723 + /* transmit interrupt */
1724 + ar2313_tx_int(dev);
1725 + }
1726 + }
1727 +
1728 + if (status & DMA_STATUS_AIS) {
1729 +#if DEBUG_INT
1730 + printk("%s: AIS set %08x & %x\n", __FUNCTION__,
1731 + status, (DMA_STATUS_FBE | DMA_STATUS_TPS));
1732 +#endif
1733 + /* abnormal status */
1734 + if (status & (DMA_STATUS_FBE | DMA_STATUS_TPS)) {
1735 + ar2313_restart(dev);
1736 + }
1737 + }
1738 + return IRQ_RETVAL(0);
1739 +}
1740 +
1741 +
1742 +static int ar2313_open(struct net_device *dev)
1743 +{
1744 + struct ar2313_private *sp;
1745 +
1746 + sp = dev->priv;
1747 +
1748 + dev->mtu = 1500;
1749 + netif_start_queue(dev);
1750 +
1751 + sp->eth_regs->mac_control |= MAC_CONTROL_RE;
1752 +
1753 + AR2313_MOD_INC_USE_COUNT;
1754 +
1755 + return 0;
1756 +}
1757 +
1758 +static void ar2313_halt(struct net_device *dev)
1759 +{
1760 + struct ar2313_private *sp = dev->priv;
1761 + int j;
1762 +
1763 + tasklet_disable(&sp->rx_tasklet);
1764 +
1765 + /* kill the MAC */
1766 + sp->eth_regs->mac_control &= ~(MAC_CONTROL_RE | /* disable Receives */
1767 + MAC_CONTROL_TE); /* disable Transmits */
1768 + /* stop dma */
1769 + sp->dma_regs->control = 0;
1770 + sp->dma_regs->bus_mode = DMA_BUS_MODE_SWR;
1771 +
1772 + /* place phy and MAC in reset */
1773 + *sp->int_regs |= (ar_int_mac_mask | ar_int_phy_mask);
1774 +
1775 + /* free buffers on tx ring */
1776 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
1777 + struct sk_buff *skb;
1778 + ar2313_descr_t *txdesc;
1779 +
1780 + txdesc = &sp->tx_ring[j];
1781 + txdesc->descr = 0;
1782 +
1783 + skb = sp->tx_skb[j];
1784 + if (skb) {
1785 + dev_kfree_skb(skb);
1786 + sp->tx_skb[j] = NULL;
1787 + }
1788 + }
1789 +}
1790 +
1791 +/*
1792 + * close should do nothing. Here's why. It's called when
1793 + * 'ifconfig bond0 down' is run. If it calls free_irq then
1794 + * the irq is gone forever ! When bond0 is made 'up' again,
1795 + * the ar2313_open () does not call request_irq (). Worse,
1796 + * the call to ar2313_halt() generates a WDOG reset due to
1797 + * the write to 'sp->int_regs' and the box reboots.
1798 + * Commenting this out is good since it allows the
1799 + * system to resume when bond0 is made up again.
1800 + */
1801 +static int ar2313_close(struct net_device *dev)
1802 +{
1803 +#if 0
1804 + /*
1805 + * Disable interrupts
1806 + */
1807 + disable_irq(dev->irq);
1808 +
1809 + /*
1810 + * Without (or before) releasing irq and stopping hardware, this
1811 + * is an absolute non-sense, by the way. It will be reset instantly
1812 + * by the first irq.
1813 + */
1814 + netif_stop_queue(dev);
1815 +
1816 + /* stop the MAC and DMA engines */
1817 + ar2313_halt(dev);
1818 +
1819 + /* release the interrupt */
1820 + free_irq(dev->irq, dev);
1821 +
1822 +#endif
1823 + AR2313_MOD_DEC_USE_COUNT;
1824 + return 0;
1825 +}
1826 +
1827 +static int ar2313_start_xmit(struct sk_buff *skb, struct net_device *dev)
1828 +{
1829 + struct ar2313_private *sp = dev->priv;
1830 + ar2313_descr_t *td;
1831 + u32 idx;
1832 +
1833 + idx = sp->tx_prd;
1834 + td = &sp->tx_ring[idx];
1835 +
1836 + if (td->status & DMA_TX_OWN) {
1837 +#if DEBUG_TX
1838 + printk("%s: No space left to Tx\n", __FUNCTION__);
1839 +#endif
1840 + /* free skbuf and lie to the caller that we sent it out */
1841 + sp->stats.tx_dropped++;
1842 + dev_kfree_skb(skb);
1843 +
1844 + /* restart transmitter in case locked */
1845 + sp->dma_regs->xmt_poll = 0;
1846 + return 0;
1847 + }
1848 +
1849 + /* Setup the transmit descriptor. */
1850 + td->devcs = ((skb->len << DMA_TX1_BSIZE_SHIFT) |
1851 + (DMA_TX1_LS|DMA_TX1_IC|DMA_TX1_CHAINED));
1852 + td->addr = virt_to_phys(skb->data);
1853 + td->status = DMA_TX_OWN;
1854 +
1855 + /* kick transmitter last */
1856 + sp->dma_regs->xmt_poll = 0;
1857 +
1858 +#if DEBUG_TX
1859 + printk("index %d\n", idx);
1860 + printk("TX status %08x\n", td->status);
1861 + printk("TX devcs %08x\n", td->devcs );
1862 + printk("TX addr %08x\n", td->addr );
1863 + printk("TX descr %08x\n", td->descr );
1864 +#endif
1865 +
1866 + sp->tx_skb[idx] = skb;
1867 + idx = DSC_NEXT(idx);
1868 + sp->tx_prd = idx;
1869 +
1870 + //dev->trans_start = jiffies;
1871 +
1872 + return 0;
1873 +}
1874 +
1875 +static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1876 +{
1877 + struct ar2313_private *np = dev->priv;
1878 + u32 tmp;
1879 +
1880 + ecmd->supported =
1881 + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1882 + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1883 + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
1884 +
1885 + ecmd->port = PORT_TP;
1886 + /* only supports internal transceiver */
1887 + ecmd->transceiver = XCVR_INTERNAL;
1888 + /* not sure what this is for */
1889 + ecmd->phy_address = 1;
1890 +
1891 + ecmd->advertising = ADVERTISED_MII;
1892 + tmp = armiiread(np->phy, MII_ADVERTISE);
1893 + if (tmp & ADVERTISE_10HALF)
1894 + ecmd->advertising |= ADVERTISED_10baseT_Half;
1895 + if (tmp & ADVERTISE_10FULL)
1896 + ecmd->advertising |= ADVERTISED_10baseT_Full;
1897 + if (tmp & ADVERTISE_100HALF)
1898 + ecmd->advertising |= ADVERTISED_100baseT_Half;
1899 + if (tmp & ADVERTISE_100FULL)
1900 + ecmd->advertising |= ADVERTISED_100baseT_Full;
1901 +
1902 + tmp = armiiread(np->phy, MII_BMCR);
1903 + if (tmp & BMCR_ANENABLE) {
1904 + ecmd->advertising |= ADVERTISED_Autoneg;
1905 + ecmd->autoneg = AUTONEG_ENABLE;
1906 + } else {
1907 + ecmd->autoneg = AUTONEG_DISABLE;
1908 + }
1909 +
1910 + if (ecmd->autoneg == AUTONEG_ENABLE) {
1911 + tmp = armiiread(np->phy, MII_LPA);
1912 + if (tmp & (LPA_100FULL|LPA_10FULL)) {
1913 + ecmd->duplex = DUPLEX_FULL;
1914 + } else {
1915 + ecmd->duplex = DUPLEX_HALF;
1916 + }
1917 + if (tmp & (LPA_100FULL|LPA_100HALF)) {
1918 + ecmd->speed = SPEED_100;
1919 + } else {
1920 + ecmd->speed = SPEED_10;
1921 + }
1922 + } else {
1923 + if (tmp & BMCR_FULLDPLX) {
1924 + ecmd->duplex = DUPLEX_FULL;
1925 + } else {
1926 + ecmd->duplex = DUPLEX_HALF;
1927 + }
1928 + if (tmp & BMCR_SPEED100) {
1929 + ecmd->speed = SPEED_100;
1930 + } else {
1931 + ecmd->speed = SPEED_10;
1932 + }
1933 + }
1934 +
1935 + /* ignore maxtxpkt, maxrxpkt for now */
1936 +
1937 + return 0;
1938 +}
1939 +
1940 +static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1941 +{
1942 + struct ar2313_private *np = dev->priv;
1943 + u32 tmp;
1944 +
1945 + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
1946 + return -EINVAL;
1947 + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1948 + return -EINVAL;
1949 + if (ecmd->port != PORT_TP)
1950 + return -EINVAL;
1951 + if (ecmd->transceiver != XCVR_INTERNAL)
1952 + return -EINVAL;
1953 + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1954 + return -EINVAL;
1955 + /* ignore phy_address, maxtxpkt, maxrxpkt for now */
1956 +
1957 + /* WHEW! now lets bang some bits */
1958 +
1959 + tmp = armiiread(np->phy, MII_BMCR);
1960 + if (ecmd->autoneg == AUTONEG_ENABLE) {
1961 + /* turn on autonegotiation */
1962 + tmp |= BMCR_ANENABLE;
1963 + printk("%s: Enabling auto-neg\n", dev->name);
1964 + } else {
1965 + /* turn off auto negotiation, set speed and duplexity */
1966 + tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1967 + if (ecmd->speed == SPEED_100)
1968 + tmp |= BMCR_SPEED100;
1969 + if (ecmd->duplex == DUPLEX_FULL)
1970 + tmp |= BMCR_FULLDPLX;
1971 + printk("%s: Hard coding %d/%s\n", dev->name,
1972 + (ecmd->speed == SPEED_100)? 100:10,
1973 + (ecmd->duplex == DUPLEX_FULL)? "full":"half");
1974 + }
1975 + armiiwrite(np->phy, MII_BMCR, tmp);
1976 + np->phyData = 0;
1977 + return 0;
1978 +}
1979 +
1980 +static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1981 +{
1982 + struct ar2313_private *np = dev->priv;
1983 + u32 cmd;
1984 +
1985 + if (get_user(cmd, (u32 *)useraddr))
1986 + return -EFAULT;
1987 +
1988 + switch (cmd) {
1989 + /* get settings */
1990 + case ETHTOOL_GSET: {
1991 + struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1992 + spin_lock_irq(&np->lock);
1993 + netdev_get_ecmd(dev, &ecmd);
1994 + spin_unlock_irq(&np->lock);
1995 + if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1996 + return -EFAULT;
1997 + return 0;
1998 + }
1999 + /* set settings */
2000 + case ETHTOOL_SSET: {
2001 + struct ethtool_cmd ecmd;
2002 + int r;
2003 + if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
2004 + return -EFAULT;
2005 + spin_lock_irq(&np->lock);
2006 + r = netdev_set_ecmd(dev, &ecmd);
2007 + spin_unlock_irq(&np->lock);
2008 + return r;
2009 + }
2010 + /* restart autonegotiation */
2011 + case ETHTOOL_NWAY_RST: {
2012 + int tmp;
2013 + int r = -EINVAL;
2014 + /* if autoneg is off, it's an error */
2015 + tmp = armiiread(np->phy, MII_BMCR);
2016 + if (tmp & BMCR_ANENABLE) {
2017 + tmp |= (BMCR_ANRESTART);
2018 + armiiwrite(np->phy, MII_BMCR, tmp);
2019 + r = 0;
2020 + }
2021 + return r;
2022 + }
2023 + /* get link status */
2024 + case ETHTOOL_GLINK: {
2025 + struct ethtool_value edata = {ETHTOOL_GLINK};
2026 + edata.data = (armiiread(np->phy, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2027 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
2028 + return -EFAULT;
2029 + return 0;
2030 + }
2031 + }
2032 +
2033 + return -EOPNOTSUPP;
2034 +}
2035 +
2036 +static int ar2313_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2037 +{
2038 + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
2039 +
2040 + switch (cmd) {
2041 + case SIOCDEVPRIVATE: {
2042 + struct ar2313_cmd scmd;
2043 +
2044 + if (copy_from_user(&scmd, ifr->ifr_data, sizeof(scmd)))
2045 + return -EFAULT;
2046 +
2047 +#if DEBUG
2048 + printk("%s: ioctl devprivate c=%d a=%x l=%d m=%d d=%x,%x\n",
2049 + dev->name, scmd.cmd,
2050 + scmd.address, scmd.length,
2051 + scmd.mailbox, scmd.data[0], scmd.data[1]);
2052 +#endif /* DEBUG */
2053 +
2054 + switch (scmd.cmd) {
2055 + case AR2313_READ_DATA:
2056 + if(scmd.length==4){
2057 + scmd.data[0] = *((u32*)scmd.address);
2058 + } else if(scmd.length==2) {
2059 + scmd.data[0] = *((u16*)scmd.address);
2060 + } else if (scmd.length==1) {
2061 + scmd.data[0] = *((u8*)scmd.address);
2062 + } else {
2063 + return -EOPNOTSUPP;
2064 + }
2065 + if(copy_to_user(ifr->ifr_data, &scmd, sizeof(scmd)))
2066 + return -EFAULT;
2067 + break;
2068 +
2069 + case AR2313_WRITE_DATA:
2070 + if(scmd.length==4){
2071 + *((u32*)scmd.address) = scmd.data[0];
2072 + } else if(scmd.length==2) {
2073 + *((u16*)scmd.address) = scmd.data[0];
2074 + } else if (scmd.length==1) {
2075 + *((u8*)scmd.address) = scmd.data[0];
2076 + } else {
2077 + return -EOPNOTSUPP;
2078 + }
2079 + break;
2080 +
2081 + case AR2313_GET_VERSION:
2082 + // SAMEER: sprintf((char*) &scmd, "%s", ARUBA_VERSION);
2083 + if(copy_to_user(ifr->ifr_data, &scmd, sizeof(scmd)))
2084 + return -EFAULT;
2085 + break;
2086 +
2087 + default:
2088 + return -EOPNOTSUPP;
2089 + }
2090 + return 0;
2091 + }
2092 +
2093 + case SIOCETHTOOL:
2094 + return netdev_ethtool_ioctl(dev, (void *) ifr->ifr_data);
2095 +
2096 + case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2097 + data->phy_id = 1;
2098 + /* Fall Through */
2099 +
2100 + case SIOCGMIIREG: /* Read MII PHY register. */
2101 + case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
2102 + data->val_out = armiiread(data->phy_id & 0x1f,
2103 + data->reg_num & 0x1f);
2104 + return 0;
2105 + case SIOCSMIIREG: /* Write MII PHY register. */
2106 + case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
2107 + if (!capable(CAP_NET_ADMIN))
2108 + return -EPERM;
2109 + armiiwrite(data->phy_id & 0x1f,
2110 + data->reg_num & 0x1f, data->val_in);
2111 + return 0;
2112 +
2113 + case SIOCSIFHWADDR:
2114 + if (copy_from_user(dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
2115 + return -EFAULT;
2116 + return 0;
2117 +
2118 + case SIOCGIFHWADDR:
2119 + if (copy_to_user(ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
2120 + return -EFAULT;
2121 + return 0;
2122 +
2123 + default:
2124 + break;
2125 + }
2126 +
2127 + return -EOPNOTSUPP;
2128 +}
2129 +
2130 +static struct net_device_stats *ar2313_get_stats(struct net_device *dev)
2131 +{
2132 + struct ar2313_private *sp = dev->priv;
2133 + return &sp->stats;
2134 +}
2135 +
2136 +static short
2137 +armiiread(short phy, short reg)
2138 +{
2139 + volatile ETHERNET_STRUCT * ethernet;
2140 +
2141 + ethernet = (volatile ETHERNET_STRUCT *)ETHERNET_BASE; /* always MAC 0 */
2142 + ethernet->mii_addr = ((reg << MII_ADDR_REG_SHIFT) |
2143 + (phy << MII_ADDR_PHY_SHIFT));
2144 + while (ethernet->mii_addr & MII_ADDR_BUSY);
2145 + return (ethernet->mii_data >> MII_DATA_SHIFT);
2146 +}
2147 +
2148 +static void
2149 +armiiwrite(short phy, short reg, short data)
2150 +{
2151 + volatile ETHERNET_STRUCT * ethernet;
2152 +
2153 + ethernet = (volatile ETHERNET_STRUCT *)ETHERNET_BASE; /* always MAC 0 */
2154 + while (ethernet->mii_addr & MII_ADDR_BUSY);
2155 + ethernet->mii_data = data << MII_DATA_SHIFT;
2156 + ethernet->mii_addr = ((reg << MII_ADDR_REG_SHIFT) |
2157 + (phy << MII_ADDR_PHY_SHIFT) |
2158 + MII_ADDR_WRITE);
2159 +}
2160 +
This page took 0.129068 seconds and 5 git commands to generate.