add a netconfig override for the wl-500g (fixes #1116)
[openwrt.git] / target / linux / ixp4xx-2.6 / patches / 100-npe_driver.patch
1 diff -Nur linux-2.6.17/Documentation/networking/ixp4xx/IxNpeMicrocode.h linux-2.6.17-owrt/Documentation/networking/ixp4xx/IxNpeMicrocode.h
2 --- linux-2.6.17/Documentation/networking/ixp4xx/IxNpeMicrocode.h 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-2.6.17-owrt/Documentation/networking/ixp4xx/IxNpeMicrocode.h 2006-10-27 12:48:52.000000000 +0200
4 @@ -0,0 +1,149 @@
5 +/*
6 + * IxNpeMicrocode.h - Headerfile for compiling the Intel microcode C file
7 + *
8 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
9 + *
10 + * This file is released under the GPLv2
11 + *
12 + *
13 + * compile with
14 + *
15 + * gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode
16 + *
17 + * Executing the resulting binary on your build-host creates the
18 + * "NPE-[ABC].xxxxxxxx" files containing the selected microcode
19 + * The options -le and -be controll the output format of the microcode
20 + * the default is -be independent of the host endianess
21 + *
22 + * The download functions in the driver are smart enough to discover
23 + * and correct firmware with wrong endianess
24 + *
25 + * fetch the IxNpeMicrocode.c from the Intel Access Library.
26 + * It will include this header.
27 + *
28 + * select Images for every NPE from the following
29 + * (used C++ comments for easy uncommenting ....)
30 + */
31 +
32 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
33 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
34 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
35 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_TSLOT_SWITCH
36 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
37 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
38 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL
39 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_2_PORT
40 +// #define IX_NPEDL_NPEIMAGE_NPEA_DMA
41 +// #define IX_NPEDL_NPEIMAGE_NPEA_ATM_MPHY_12_PORT
42 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_MPHY_1_PORT
43 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_SPHY_1_PORT
44 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0
45 +// #define IX_NPEDL_NPEIMAGE_NPEA_WEP
46 +
47 +
48 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
49 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
50 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
51 +// #define IX_NPEDL_NPEIMAGE_NPEB_DMA
52 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
53 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
54 +#define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL
55 +
56 +
57 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
58 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
59 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
60 +// #define IX_NPEDL_NPEIMAGE_NPEC_DMA
61 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_SPAN
62 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_FIREWALL
63 +#define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_CCM_ETH
64 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_ETH_LEARN_FILTER_SPAN_FIREWALL
65 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
66 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
67 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL
68 +
69 +
70 +#include <stdio.h>
71 +#include <unistd.h>
72 +#include <stdlib.h>
73 +#include <netinet/in.h>
74 +#include <sys/types.h>
75 +#include <sys/stat.h>
76 +#include <fcntl.h>
77 +#include <errno.h>
78 +#include <endian.h>
79 +#include <byteswap.h>
80 +#include <string.h>
81 +
82 +#if __BYTE_ORDER == __LITTLE_ENDIAN
83 +#define to_le32(x) (x)
84 +#define to_be32(x) bswap_32(x)
85 +#else
86 +#define to_be32(x) (x)
87 +#define to_le32(x) bswap_32(x)
88 +#endif
89 +
90 +struct dl_image {
91 + unsigned magic;
92 + unsigned id;
93 + unsigned size;
94 + unsigned data[0];
95 +};
96 +
97 +const unsigned IxNpeMicrocode_array[];
98 +
99 +int main(int argc, char *argv[])
100 +{
101 + struct dl_image *image = (struct dl_image *)IxNpeMicrocode_array;
102 + int imgsiz, i, fd, cnt;
103 + const unsigned *arrayptr = IxNpeMicrocode_array;
104 + const char *names[] = { "IXP425", "IXP465", "unknown" };
105 + int bigendian = 1;
106 +
107 + if (argc > 1) {
108 + if (!strcmp(argv[1], "-le"))
109 + bigendian = 0;
110 + else if (!strcmp(argv[1], "-be"))
111 + bigendian = 1;
112 + else {
113 + printf("Usage: %s <-le|-be>\n", argv[0]);
114 + return EXIT_FAILURE;
115 + }
116 + }
117 + printf("Output format is %s endian\n", bigendian ? "big" : "little");
118 +
119 + for (image = (struct dl_image *)arrayptr, cnt=0;
120 + (image->id != 0xfeedf00d) && (image->magic == 0xfeedf00d);
121 + image = (struct dl_image *)(arrayptr), cnt++)
122 + {
123 + unsigned char field[4];
124 + imgsiz = image->size + 3;
125 + *(unsigned*)field = to_be32(image->id);
126 + char filename[40], slnk[10];
127 +
128 + sprintf(filename, "NPE-%c.%08x", (field[0] & 0xf) + 'A',
129 + image->id);
130 + sprintf(slnk, "NPE-%c", (field[0] & 0xf) + 'A');
131 + printf("Writing image: %s.NPE_%c Func: %2x Rev: %02x.%02x "
132 + "Size: %5d to: '%s'\n",
133 + names[field[0] >> 4], (field[0] & 0xf) + 'A',
134 + field[1], field[2], field[3], imgsiz*4, filename);
135 + fd = open(filename, O_CREAT | O_RDWR | O_TRUNC, 0644);
136 + if (fd >= 0) {
137 + for (i=0; i<imgsiz; i++) {
138 + *(unsigned*)field = bigendian ?
139 + to_be32(arrayptr[i]) :
140 + to_le32(arrayptr[i]);
141 + write(fd, field, sizeof(field));
142 + }
143 + close(fd);
144 + unlink(slnk);
145 + symlink(filename, slnk);
146 + } else {
147 + perror(filename);
148 + }
149 + arrayptr += imgsiz;
150 + }
151 + close(fd);
152 + return 0;
153 +}
154 diff -Nur linux-2.6.17/Documentation/networking/ixp4xx/README linux-2.6.17-owrt/Documentation/networking/ixp4xx/README
155 --- linux-2.6.17/Documentation/networking/ixp4xx/README 1970-01-01 01:00:00.000000000 +0100
156 +++ linux-2.6.17-owrt/Documentation/networking/ixp4xx/README 2006-10-27 12:48:52.000000000 +0200
157 @@ -0,0 +1,72 @@
158 +Informations about the Networking Driver using the IXP4XX CPU internal NPEs
159 +and Queue manager.
160 +
161 +If this driver is used, the IAL (Intel Access Library) must not be loaded.
162 +However, the IAL may be loaded, if this Modules are unloaded:
163 + ixp4xx_npe.ko, ixp4xx_qmgr.ko ixp4xx_mac.ko
164 +
165 +This also means that HW crypto accelleration does NOT work when using this
166 +driver, unless I have finished my crypto driver for NPE-C
167 +
168 +
169 +Adoption to your custom board:
170 +------------------------------
171 +use "arch/arm/mach-ixp4xx/ixdp425-setup.c" as template:
172 +
173 +in "static struct mac_plat_info" adopt the entry "phy_id" to your needs
174 +(Ask your hardware designer about the PHY id)
175 +If in doubt, try the values from the ixdp425 board.
176 +
177 +The order of "&mac0" and "&mac1" in the "struct platform_device"
178 +determines which of them becomes eth0 and eth1.
179 +
180 +
181 +The Microcode:
182 +---------------
183 +
184 +The Download functions below are endianess independent.
185 +If the image comes in wrong endianess, it is swapped automatically.
186 +
187 +Solution 1)
188 + Configure "CONFIG_HOTPLUG" and "CONFIG_FW_LOADER" and configure
189 + IXP4XX_NPE as module.
190 + The default hotplug script will load the Firmware from
191 + /usr/lib/hotplug/firmware/NPE-[ABC]
192 + see Documentation/firmware_class/hotplug-script
193 +
194 + You should take care, that $ACTION is "add" and $SUBSYSTEM is "firmware"
195 + to avoid unnessecary calls:
196 + test $ACTION = "remove" -o $SUBSYSTEM != "firmware" && exit
197 +
198 +Solution 2)
199 + create a char-dev: "mknod /dev/ixp4xx_ucode c 10 184".
200 + If you are using "udev" or busybox "mdev", they will do this
201 + for you automatically during module load.
202 + cat the Microcode into it:
203 + cat /usr/lib/hotplug/firmware/NPE-* > /dev/ixp4xx_ucode
204 + This also works if the driver is linked to the kernel
205 +
206 +Having a mix of both (e.g. solution 1 for NPE-B and solution 2 for NPE-C)
207 +is perfectly ok and works.
208 +
209 +The state of the NPEs can be seen and changed at:
210 +/sys/bus/platform/devices/ixp4xx_npe.X/state
211 +
212 +
213 +Obtaining the Microcode:
214 +------------------------
215 +1) IxNpeMicrocode.h in this directory:
216 + Download IPL_IXP400NPELIBRARYWITHCRYPTO-2_1.ZIP from Intel
217 + It unpacks the Microcode IxNpeMicrocode.c
218 + Read the Licence !
219 + Read the top of IxNpeMicrocode.h for more details.
220 + Compile it with "gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode" on your host.
221 + The resulting images can be moved to "/usr/lib/hotplug/firmware"
222 + The endianeess of the written microcode can be controlled by the
223 + switches -le -be. Default is big-endian.
224 +
225 +2) mc_grab.c in this directory:
226 + Compile and execute it either on the host or on the target
227 + to grab the microcode from a binary image like the RedBoot bootloader.
228 + (big-endian images only)
229 +
230 diff -Nur linux-2.6.17/Documentation/networking/ixp4xx/mc_grab.c linux-2.6.17-owrt/Documentation/networking/ixp4xx/mc_grab.c
231 --- linux-2.6.17/Documentation/networking/ixp4xx/mc_grab.c 1970-01-01 01:00:00.000000000 +0100
232 +++ linux-2.6.17-owrt/Documentation/networking/ixp4xx/mc_grab.c 2006-10-27 12:48:52.000000000 +0200
233 @@ -0,0 +1,97 @@
234 +/*
235 + * mc_grab.c - grabs IXP4XX microcode from a binary datastream
236 + * e.g. The redboot bootloader....
237 + *
238 + * usage: mc_grab 1010200 2010200 < /dev/mtd/0 > /dev/misc/npe
239 + *
240 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
241 + *
242 + * This file is released under the GPLv2
243 + */
244 +
245 +
246 +#include <stdlib.h>
247 +#include <stdio.h>
248 +#include <unistd.h>
249 +#include <netinet/in.h>
250 +#include <sys/types.h>
251 +#include <sys/stat.h>
252 +#include <fcntl.h>
253 +#include <errno.h>
254 +#include <string.h>
255 +
256 +#define MAX_IMG 6
257 +
258 +static void print_mc_info(unsigned id, int siz)
259 +{
260 + unsigned char buf[sizeof(unsigned)];
261 + *(unsigned*)buf = id;
262 + unsigned idx;
263 + const char *names[] = { "IXP425", "IXP465", "unknown" };
264 +
265 + idx = (buf[0] >> 4) < 2 ? (buf[0] >> 4) : 2;
266 +
267 + fprintf(stderr, "Device: %s:NPE_%c Func: %2x Rev: %02x.%02x "
268 + "Size: %5d bytes ID:%08x\n", names[idx], (buf[0] & 0xf)+'A',
269 + buf[1], buf[2], buf[3], siz*4, ntohl(id));
270 +}
271 +
272 +int main(int argc, char *argv[])
273 +{
274 + int i,j;
275 + unsigned char buf[sizeof(unsigned)];
276 + unsigned magic = htonl(0xfeedf00d);
277 + unsigned id, my_ids[MAX_IMG+1], siz, sizbe;
278 + int ret=1, verbose=0;
279 +
280 + for (i=0, j=0; i<argc-1 && j<MAX_IMG; i++) {
281 + if (!strcmp(argv[i+1], "-v"))
282 + verbose = 1;
283 + else
284 + my_ids[j++] = htonl(strtoul(argv[i+1], NULL, 16));
285 + }
286 + my_ids[j] = 0;
287 + if (my_ids[0] == 0 && !verbose) {
288 + fprintf(stderr, "Usage: %s <-v> [ID1] [ID2] [IDn]\n", argv[0]);
289 + return 1;
290 + }
291 +
292 + while ((ret=read(0, buf, sizeof(unsigned))) == sizeof(unsigned)) {
293 + if (*(unsigned*)buf != magic)
294 + continue;
295 + if ((ret=read(0, buf, sizeof(unsigned))) != sizeof(unsigned) )
296 + break;
297 + id = *(unsigned*)buf;
298 +
299 + if (read(0, buf, sizeof(siz)) != sizeof(siz) )
300 + break;
301 + sizbe = *(unsigned*)buf;
302 + siz = ntohl(sizbe);
303 +
304 + if (verbose)
305 + print_mc_info(id, siz);
306 +
307 + for(i=0; my_ids[i]; i++)
308 + if (id == my_ids[i])
309 + break;
310 + if (!my_ids[i])
311 + continue;
312 +
313 + if (!verbose)
314 + print_mc_info(id, siz);
315 +
316 + write(1, &magic, sizeof(magic));
317 + write(1, &id, sizeof(id));
318 + write(1, &sizbe, sizeof(sizbe));
319 + for (i=0; i<siz; i++) {
320 + if (read(0, buf, sizeof(unsigned)) != sizeof(unsigned))
321 + break;
322 + write(1, buf, sizeof(unsigned));
323 + }
324 + if (i != siz)
325 + break;
326 + }
327 + if (ret)
328 + fprintf(stderr, "Error reading Microcode\n");
329 + return ret;
330 +}
331 diff -Nur linux-2.6.17/arch/arm/mach-ixp4xx/common.c linux-2.6.17-owrt/arch/arm/mach-ixp4xx/common.c
332 --- linux-2.6.17/arch/arm/mach-ixp4xx/common.c 2006-06-18 03:49:35.000000000 +0200
333 +++ linux-2.6.17-owrt/arch/arm/mach-ixp4xx/common.c 2006-10-27 12:50:32.000000000 +0200
334 @@ -341,6 +341,97 @@
335 &ixp46x_i2c_controller
336 };
337
338 +static struct npe_plat_data npea = {
339 + .name = "NPE-A",
340 + .data_size = 0x800,
341 + .inst_size = 0x1000,
342 + .id = 0,
343 +};
344 +
345 +static struct npe_plat_data npeb = {
346 + .name = "NPE-B",
347 + .data_size = 0x800,
348 + .inst_size = 0x800,
349 + .id = 1,
350 +};
351 +
352 +static struct npe_plat_data npec = {
353 + .name = "NPE-C",
354 + .data_size = 0x800,
355 + .inst_size = 0x800,
356 + .id = 2,
357 +};
358 +
359 +static struct resource res_npea = {
360 + .start = IXP4XX_NPEA_BASE_PHYS,
361 + .end = IXP4XX_NPEA_BASE_PHYS + 0xfff,
362 + .flags = IORESOURCE_MEM,
363 +};
364 +
365 +static struct resource res_npeb = {
366 + .start = IXP4XX_NPEB_BASE_PHYS,
367 + .end = IXP4XX_NPEB_BASE_PHYS + 0xfff,
368 + .flags = IORESOURCE_MEM,
369 +};
370 +
371 +static struct resource res_npec = {
372 + .start = IXP4XX_NPEC_BASE_PHYS,
373 + .end = IXP4XX_NPEC_BASE_PHYS + 0xfff,
374 + .flags = IORESOURCE_MEM,
375 +};
376 +
377 +static struct platform_device dev_npea = {
378 + .name = "ixp4xx_npe",
379 + .id = 0,
380 + .dev.platform_data = &npea,
381 + .num_resources = 1,
382 + .resource = &res_npea,
383 +};
384 +
385 +static struct platform_device dev_npeb = {
386 + .name = "ixp4xx_npe",
387 + .id = 1,
388 + .dev.platform_data = &npeb,
389 + .num_resources = 1,
390 + .resource = &res_npeb,
391 +};
392 +
393 +static struct platform_device dev_npec = {
394 + .name = "ixp4xx_npe",
395 + .id = 2,
396 + .dev.platform_data = &npec,
397 + .num_resources = 1,
398 + .resource = &res_npec,
399 +};
400 +
401 +/* QMGR */
402 +static struct resource res_qmgr[] = {
403 +{
404 + .start = IXP4XX_QMGR_BASE_PHYS,
405 + .end = IXP4XX_QMGR_BASE_PHYS + IXP4XX_QMGR_REGION_SIZE -1,
406 + .flags = IORESOURCE_MEM,
407 +}, {
408 + .start = IRQ_IXP4XX_QM1,
409 + .flags = IORESOURCE_IRQ,
410 +} };
411 +
412 +static struct platform_device qmgr = {
413 + .name = "ixp4xx_qmgr",
414 + .id = 0,
415 + .dev = {
416 + .coherent_dma_mask = DMA_31BIT_MASK,
417 + },
418 + .num_resources = ARRAY_SIZE(res_qmgr),
419 + .resource = res_qmgr,
420 +};
421 +
422 +static struct platform_device *npes_qmgr[] __initdata = {
423 + &qmgr,
424 + &dev_npea,
425 + &dev_npeb,
426 + &dev_npec,
427 +};
428 +
429 unsigned long ixp4xx_exp_bus_size;
430 EXPORT_SYMBOL(ixp4xx_exp_bus_size);
431
432 @@ -360,7 +451,10 @@
433 break;
434 }
435 }
436 + npeb.inst_size = 0x1000;
437 + npec.inst_size = 0x1000;
438 }
439 + platform_add_devices(npes_qmgr, ARRAY_SIZE(npes_qmgr));
440
441 printk("IXP4xx: Using %luMiB expansion bus window size\n",
442 ixp4xx_exp_bus_size >> 20);
443 diff -Nur linux-2.6.17/arch/arm/mach-ixp4xx/ixdp425-setup.c linux-2.6.17-owrt/arch/arm/mach-ixp4xx/ixdp425-setup.c
444 --- linux-2.6.17/arch/arm/mach-ixp4xx/ixdp425-setup.c 2006-06-18 03:49:35.000000000 +0200
445 +++ linux-2.6.17-owrt/arch/arm/mach-ixp4xx/ixdp425-setup.c 2006-10-27 12:48:54.000000000 +0200
446 @@ -101,10 +101,57 @@
447 .resource = ixdp425_uart_resources
448 };
449
450 +/* MACs */
451 +static struct resource res_mac0 = {
452 + .start = IXP4XX_EthB_BASE_PHYS,
453 + .end = IXP4XX_EthB_BASE_PHYS + 0x1ff,
454 + .flags = IORESOURCE_MEM,
455 +};
456 +
457 +static struct resource res_mac1 = {
458 + .start = IXP4XX_EthC_BASE_PHYS,
459 + .end = IXP4XX_EthC_BASE_PHYS + 0x1ff,
460 + .flags = IORESOURCE_MEM,
461 +};
462 +
463 +static struct mac_plat_info plat_mac0 = {
464 + .npe_id = 1,
465 + .phy_id = 0,
466 + .eth_id = 0,
467 + .rxq_id = 27,
468 + .txq_id = 24,
469 +};
470 +
471 +static struct mac_plat_info plat_mac1 = {
472 + .npe_id = 2,
473 + .phy_id = 1,
474 + .eth_id = 1,
475 + .rxq_id = 28,
476 + .txq_id = 25,
477 +};
478 +
479 +static struct platform_device mac0 = {
480 + .name = "ixp4xx_mac",
481 + .id = 0,
482 + .dev.platform_data = &plat_mac0,
483 + .num_resources = 1,
484 + .resource = &res_mac0,
485 +};
486 +
487 +static struct platform_device mac1 = {
488 + .name = "ixp4xx_mac",
489 + .id = 1,
490 + .dev.platform_data = &plat_mac1,
491 + .num_resources = 1,
492 + .resource = &res_mac1,
493 +};
494 +
495 static struct platform_device *ixdp425_devices[] __initdata = {
496 &ixdp425_i2c_controller,
497 &ixdp425_flash,
498 - &ixdp425_uart
499 + &ixdp425_uart,
500 + &mac0,
501 + &mac1,
502 };
503
504 static void __init ixdp425_init(void)
505 diff -Nur linux-2.6.17/drivers/net/Kconfig linux-2.6.17-owrt/drivers/net/Kconfig
506 --- linux-2.6.17/drivers/net/Kconfig 2006-06-18 03:49:35.000000000 +0200
507 +++ linux-2.6.17-owrt/drivers/net/Kconfig 2006-10-27 12:48:54.000000000 +0200
508 @@ -187,6 +187,8 @@
509
510 source "drivers/net/arm/Kconfig"
511
512 +source "drivers/net/ixp4xx/Kconfig"
513 +
514 config MACE
515 tristate "MACE (Power Mac ethernet) support"
516 depends on NET_ETHERNET && PPC_PMAC && PPC32
517 diff -Nur linux-2.6.17/drivers/net/Makefile linux-2.6.17-owrt/drivers/net/Makefile
518 --- linux-2.6.17/drivers/net/Makefile 2006-06-18 03:49:35.000000000 +0200
519 +++ linux-2.6.17-owrt/drivers/net/Makefile 2006-10-27 12:48:54.000000000 +0200
520 @@ -208,6 +208,7 @@
521 obj-$(CONFIG_IRDA) += irda/
522 obj-$(CONFIG_ETRAX_ETHERNET) += cris/
523 obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
524 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx/
525
526 obj-$(CONFIG_NETCONSOLE) += netconsole.o
527
528 diff -Nur linux-2.6.17/drivers/net/ixp4xx/Kconfig linux-2.6.17-owrt/drivers/net/ixp4xx/Kconfig
529 --- linux-2.6.17/drivers/net/ixp4xx/Kconfig 1970-01-01 01:00:00.000000000 +0100
530 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/Kconfig 2006-10-27 12:48:54.000000000 +0200
531 @@ -0,0 +1,40 @@
532 +config IXP4XX_QMGR
533 + tristate "IXP4xx Queue Manager support"
534 + depends on ARCH_IXP4XX
535 + depends on NET_ETHERNET
536 + help
537 + The IXP4XX Queue manager is a configurable hardware ringbuffer.
538 + It is used by the NPEs to exchange data from and to the CPU.
539 + You can either use this OR the Intel Access Library (IAL)
540 +
541 +config IXP4XX_NPE
542 + tristate "IXP4xx NPE support"
543 + depends on ARCH_IXP4XX
544 + depends on NET_ETHERNET
545 + help
546 + The IXP4XX NPE driver supports the 3 CPU co-processors called
547 + "Network Processing Engines" (NPE). It adds support fo downloading
548 + the Microcode (firmware) via Hotplug or character-special-device.
549 + More about this at: Documentation/networking/ixp4xx/README.
550 + You can either use this OR the Intel Access Library (IAL)
551 +
552 +config IXP4XX_FW_LOAD
553 + bool "Use Firmware hotplug for Microcode download"
554 + depends on IXP4XX_NPE
555 + select HOTPLUG
556 + select FW_LOADER
557 + help
558 + The default hotplug script will load the Firmware from
559 + /usr/lib/hotplug/firmware/NPE-[ABC]
560 + see Documentation/firmware_class/hotplug-script
561 +
562 +config IXP4XX_MAC
563 + tristate "IXP4xx MAC support"
564 + depends on IXP4XX_NPE
565 + depends on IXP4XX_QMGR
566 + depends on NET_ETHERNET
567 + select MII
568 + help
569 + The IXP4XX MAC driver supports the MACs on the IXP4XX CPUs.
570 + There are 2 on ixp425 and up to 5 on ixdp465.
571 + You can either use this OR the Intel Access Library (IAL)
572 diff -Nur linux-2.6.17/drivers/net/ixp4xx/Makefile linux-2.6.17-owrt/drivers/net/ixp4xx/Makefile
573 --- linux-2.6.17/drivers/net/ixp4xx/Makefile 1970-01-01 01:00:00.000000000 +0100
574 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/Makefile 2006-10-27 12:48:54.000000000 +0200
575 @@ -0,0 +1,6 @@
576 +obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o
577 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o
578 +obj-$(CONFIG_IXP4XX_MAC) += ixp4xx_mac.o
579 +
580 +ixp4xx_npe-objs := ucode_dl.o npe_mh.o
581 +ixp4xx_mac-objs := mac_driver.o qmgr_eth.o phy.o
582 diff -Nur linux-2.6.17/drivers/net/ixp4xx/ixp4xx_qmgr.c linux-2.6.17-owrt/drivers/net/ixp4xx/ixp4xx_qmgr.c
583 --- linux-2.6.17/drivers/net/ixp4xx/ixp4xx_qmgr.c 1970-01-01 01:00:00.000000000 +0100
584 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/ixp4xx_qmgr.c 2006-10-27 12:48:54.000000000 +0200
585 @@ -0,0 +1,390 @@
586 +/*
587 + * qmgr.c - reimplementation of the queue configuration interface.
588 + *
589 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
590 + *
591 + * This file is released under the GPLv2
592 + */
593 +
594 +#include <linux/kernel.h>
595 +#include <linux/module.h>
596 +#include <linux/platform_device.h>
597 +#include <linux/fs.h>
598 +#include <linux/init.h>
599 +#include <linux/slab.h>
600 +#include <linux/dmapool.h>
601 +#include <linux/interrupt.h>
602 +#include <linux/err.h>
603 +#include <asm/uaccess.h>
604 +#include <asm/io.h>
605 +
606 +#include <linux/ixp_qmgr.h>
607 +#include <linux/ixp_npe.h>
608 +
609 +#define IXQMGR_VERSION "IXP4XX Q Manager 0.2.0"
610 +
611 +static struct device *qmgr_dev = NULL;
612 +
613 +int queue_len(struct qm_queue *queue)
614 +{
615 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
616 + int diff, offs;
617 + u32 val;
618 +
619 + offs = queue->id/8 + QUE_LOW_STAT0;
620 + val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
621 +
622 + diff = (val - (val >> 7)) & 0x7f;
623 + if (!diff) {
624 + /* diff == 0 means either empty or full, must look at STAT0 */
625 + if ((*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x04)
626 + diff = queue->len;
627 + }
628 + return diff;
629 +}
630 +
631 +static int request_pool(struct device *dev, int count)
632 +{
633 + int i;
634 + struct npe_cont *cont;
635 + struct qm_qmgr *qmgr = dev_get_drvdata(dev);
636 + dma_addr_t handle;
637 +
638 + for (i=0; i<count; i++) {
639 + cont = dma_pool_alloc(qmgr->dmapool, GFP_KERNEL, &handle);
640 + if (!cont) {
641 + return -ENOMEM;
642 + }
643 + cont->phys = handle;
644 + cont->virt = cont;
645 + write_lock(&qmgr->lock);
646 + cont->next = qmgr->pool;
647 + qmgr->pool = cont;
648 + write_unlock(&qmgr->lock);
649 + }
650 + return 0;
651 +}
652 +
653 +static int free_pool(struct device *dev, int count)
654 +{
655 + int i;
656 + struct npe_cont *cont;
657 + struct qm_qmgr *qmgr = dev_get_drvdata(dev);
658 +
659 + for (i=0; i<count; i++) {
660 + write_lock(&qmgr->lock);
661 + cont = qmgr->pool;
662 + if (!cont) {
663 + write_unlock(&qmgr->lock);
664 + return -1;
665 + }
666 + qmgr->pool = cont->next;
667 + write_unlock(&qmgr->lock);
668 + dma_pool_free(qmgr->dmapool, cont, cont->phys);
669 + }
670 + return 0;
671 +}
672 +
673 +static int get_free_qspace(struct qm_qmgr *qmgr, int len)
674 +{
675 + int words = (qmgr->res->end - qmgr->res->start + 1) / 4 -
676 + IX_QMGR_SRAM_SPACE;
677 + int i,q;
678 +
679 + for (i=0; i<words; i+=len) {
680 + for (q=0; q<MAX_QUEUES; q++) {
681 + struct qm_queue *qu = qmgr->queues[q];
682 + if (!qu)
683 + continue;
684 + if ((qu->addr + qu->len > i) && (qu->addr < i + len))
685 + break;
686 + }
687 + if (q == MAX_QUEUES) {
688 + /* we have a free address */
689 + return i;
690 + }
691 + }
692 + return -1;
693 +}
694 +
695 +static inline int log2(int x)
696 +{
697 + int r=0;
698 + while(x>>=1)
699 + r++;
700 + return r;
701 +}
702 +
703 +/*
704 + * 32bit Config registers at IX_QMGR_QUECONFIG_BASE_OFFSET[Qid]
705 + * 0 - 6 WRPTR Word offset to baseaddr (index 0 .. BSIZE-1)
706 + * 7 -13 RDPTR ''
707 + * 14 -21 BADDR baseaddr = (offset to IX_QMGR_QUEBUFFER_SPACE_OFFSET) >> 6
708 + * 22 -23 ESIZE entrySizeInWords (always 00 because entrySizeInWords==1)
709 + * 24 -25 BSIZE qSizeInWords 00=16,01=32,10=64,11=128
710 + * 26 -28 NE nearly empty
711 + * 29 -31 NF nearly full
712 + */
713 +static int conf_q_regs(struct qm_queue *queue)
714 +{
715 + int bsize = log2(queue->len/16);
716 + int baddr = queue->addr + IX_QMGR_QCFG_SIZE;
717 +
718 + /* +2, because baddr is in words and not in bytes */
719 + queue_write_cfg_reg(queue, (bsize << 24) | (baddr<<(14-6+2)) );
720 +
721 + return 0;
722 +}
723 +
724 +void queue_set_watermarks(struct qm_queue *queue, unsigned ne, unsigned nf)
725 +{
726 + u32 val;
727 + /* calculate the register values
728 + * 0->0, 1->1, 2->2, 4->3, 8->4 16->5...*/
729 + ne = log2(ne<<1) & 0x7;
730 + nf = log2(nf<<1) & 0x7;
731 +
732 + /* Mask out old watermarks */
733 + val = queue_read_cfg_reg(queue) & ~0xfc000000;
734 + queue_write_cfg_reg(queue, val | (ne << 26) | (nf << 29));
735 +}
736 +
737 +int queue_set_irq_src(struct qm_queue *queue, int flag)
738 +{
739 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
740 + u32 reg;
741 + int offs, bitoffs;
742 +
743 + /* Q 0-7 are in REG0, 8-15 are in REG1, etc. They occupy 4 bits/Q */
744 + offs = queue->id/8 + INT0_SRC_SELREG0;
745 + bitoffs = (queue->id % 8)*4;
746 +
747 + reg = *(qmgr->addr + offs) & ~(0xf << bitoffs);
748 + *(qmgr->addr + offs) = reg | (flag << bitoffs);
749 +
750 + return 0;
751 +}
752 +
753 +static irqreturn_t irq_qm1(int irq, void *dev_id)
754 +{
755 + struct qm_qmgr *qmgr = dev_id;
756 + int offs, reg;
757 + struct qm_queue *queue;
758 +
759 + reg = *(qmgr->addr + QUE_INT_REG0);
760 + while(reg) {
761 + /*
762 + * count leading zeros. "offs" gets
763 + * the amount of leading 0 in "reg"
764 + */
765 + asm ("clz %0, %1;" : "=r"(offs) : "r"(reg));
766 + offs = 31 - offs;
767 + reg &= ~(1 << offs);
768 + queue = qmgr->queues[offs];
769 + if (likely(queue)) {
770 + if (likely(queue->irq_cb)) {
771 + queue->irq_cb(queue);
772 + } else {
773 + printk(KERN_ERR "Missing callback for Q %d\n",
774 + offs);
775 + }
776 + } else {
777 + printk(KERN_ERR "IRQ for unregistered Q %d\n", offs);
778 + }
779 + }
780 + return IRQ_HANDLED;
781 +}
782 +
783 +struct qm_queue *request_queue(int qid, int len)
784 +{
785 + int ram;
786 + struct qm_qmgr *qmgr;
787 + struct qm_queue *queue;
788 +
789 + if (!qmgr_dev)
790 + return ERR_PTR(-ENODEV);
791 +
792 + if ((qid < 0) || (qid > MAX_QUEUES))
793 + return ERR_PTR(-ERANGE);
794 +
795 + switch (len) {
796 + case 16:
797 + case 32:
798 + case 64:
799 + case 128: break;
800 + default : return ERR_PTR(-EINVAL);
801 + }
802 +
803 + qmgr = dev_get_drvdata(qmgr_dev);
804 +
805 + if (qmgr->queues[qid]) {
806 + /* not an error, just in use already */
807 + return NULL;
808 + }
809 + if ((ram = get_free_qspace(qmgr, len)) < 0) {
810 + printk(KERN_ERR "No free SRAM space for this queue\n");
811 + return ERR_PTR(-ENOMEM);
812 + }
813 + if (!(queue = kzalloc(sizeof(struct qm_queue), GFP_KERNEL)))
814 + return ERR_PTR(-ENOMEM);
815 +
816 + if (!try_module_get(THIS_MODULE)) {
817 + kfree(queue);
818 + return ERR_PTR(-ENODEV);
819 + }
820 +
821 + queue->addr = ram;
822 + queue->len = len;
823 + queue->id = qid;
824 + queue->dev = get_device(qmgr_dev);
825 + queue->acc_reg = qmgr->addr + (4 * qid);
826 + qmgr->queues[qid] = queue;
827 + if (request_pool(qmgr_dev, len)) {
828 + printk(KERN_ERR "Failed to request DMA pool of Q %d\n", qid);
829 + }
830 +
831 + conf_q_regs(queue);
832 + return queue;
833 +}
834 +
835 +void release_queue(struct qm_queue *queue)
836 +{
837 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
838 +
839 + BUG_ON(qmgr->queues[queue->id] != queue);
840 + qmgr->queues[queue->id] = NULL;
841 +
842 + if (free_pool(queue->dev, queue->len)) {
843 + printk(KERN_ERR "Failed to release DMA pool of Q %d\n",
844 + queue->id);
845 + }
846 + queue_disable_irq(queue);
847 + queue_write_cfg_reg(queue, 0);
848 +
849 + module_put(THIS_MODULE);
850 + put_device(queue->dev);
851 + kfree(queue);
852 +}
853 +
854 +static int qmgr_probe(struct platform_device *pdev)
855 +{
856 + struct resource *res;
857 + struct qm_qmgr *qmgr;
858 + int size, ret=0, i;
859 +
860 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
861 + return -EIO;
862 +
863 + if ((i = platform_get_irq(pdev, 0)) < 0)
864 + return -EIO;
865 +
866 + if (!(qmgr = kzalloc(sizeof(struct qm_qmgr), GFP_KERNEL)))
867 + return -ENOMEM;
868 +
869 + qmgr->irq = i;
870 + size = res->end - res->start +1;
871 + qmgr->res = request_mem_region(res->start, size, "ixp_qmgr");
872 + if (!qmgr->res) {
873 + ret = -EBUSY;
874 + goto out_free;
875 + }
876 +
877 + qmgr->addr = ioremap(res->start, size);
878 + if (!qmgr->addr) {
879 + ret = -ENOMEM;
880 + goto out_rel;
881 + }
882 +
883 + /* Reset Q registers */
884 + for (i=0; i<4; i++)
885 + *(qmgr->addr + QUE_LOW_STAT0 +i) = 0x33333333;
886 + for (i=0; i<10; i++)
887 + *(qmgr->addr + QUE_UO_STAT0 +i) = 0x0;
888 + for (i=0; i<4; i++)
889 + *(qmgr->addr + INT0_SRC_SELREG0 +i) = 0x0;
890 + for (i=0; i<2; i++) {
891 + *(qmgr->addr + QUE_IE_REG0 +i) = 0x00;
892 + *(qmgr->addr + QUE_INT_REG0 +i) = 0xffffffff;
893 + }
894 + for (i=0; i<64; i++) {
895 + *(qmgr->addr + IX_QMGR_QCFG_BASE + i) = 0x0;
896 + }
897 +
898 + ret = request_irq(qmgr->irq, irq_qm1, SA_SHIRQ | SA_INTERRUPT,
899 + "qmgr", qmgr);
900 + if (ret) {
901 + printk(KERN_ERR "Failed to request IRQ(%d)\n", qmgr->irq);
902 + ret = -EIO;
903 + goto out_rel;
904 + }
905 +
906 + rwlock_init(&qmgr->lock);
907 + qmgr->dmapool = dma_pool_create("qmgr", &pdev->dev,
908 + sizeof(struct npe_cont), 32, 0);
909 + platform_set_drvdata(pdev, qmgr);
910 +
911 + qmgr_dev = &pdev->dev;
912 +
913 + printk(KERN_INFO IXQMGR_VERSION " initialized.\n");
914 +
915 + return 0;
916 +
917 +out_rel:
918 + release_resource(qmgr->res);
919 +out_free:
920 + kfree(qmgr);
921 + return ret;
922 +}
923 +
924 +static int qmgr_remove(struct platform_device *pdev)
925 +{
926 + struct qm_qmgr *qmgr = platform_get_drvdata(pdev);
927 + int i;
928 +
929 + for (i=0; i<MAX_QUEUES; i++) {
930 + if (qmgr->queues[i]) {
931 + printk(KERN_ERR "WARNING Unreleased Q: %d\n", i);
932 + release_queue(qmgr->queues[i]);
933 + }
934 + }
935 +
936 + synchronize_irq (qmgr->irq);
937 + free_irq(qmgr->irq, qmgr);
938 +
939 + dma_pool_destroy(qmgr->dmapool);
940 + iounmap(qmgr->addr);
941 + release_resource(qmgr->res);
942 + platform_set_drvdata(pdev, NULL);
943 + qmgr_dev = NULL;
944 + kfree(qmgr);
945 + return 0;
946 +}
947 +
948 +static struct platform_driver ixp4xx_qmgr = {
949 + .driver.name = "ixp4xx_qmgr",
950 + .probe = qmgr_probe,
951 + .remove = qmgr_remove,
952 +};
953 +
954 +
955 +static int __init init_qmgr(void)
956 +{
957 + return platform_driver_register(&ixp4xx_qmgr);
958 +}
959 +
960 +static void __exit finish_qmgr(void)
961 +{
962 + platform_driver_unregister(&ixp4xx_qmgr);
963 +}
964 +
965 +module_init(init_qmgr);
966 +module_exit(finish_qmgr);
967 +
968 +MODULE_LICENSE("GPL");
969 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
970 +
971 +EXPORT_SYMBOL(request_queue);
972 +EXPORT_SYMBOL(release_queue);
973 +EXPORT_SYMBOL(queue_set_irq_src);
974 +EXPORT_SYMBOL(queue_set_watermarks);
975 +EXPORT_SYMBOL(queue_len);
976 diff -Nur linux-2.6.17/drivers/net/ixp4xx/mac.h linux-2.6.17-owrt/drivers/net/ixp4xx/mac.h
977 --- linux-2.6.17/drivers/net/ixp4xx/mac.h 1970-01-01 01:00:00.000000000 +0100
978 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/mac.h 2006-10-27 12:48:54.000000000 +0200
979 @@ -0,0 +1,221 @@
980 +/*
981 + * Copyright (C) 2002-2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
982 + *
983 + * This file is released under the GPLv2
984 + */
985 +
986 +#include <linux/resource.h>
987 +#include <linux/netdevice.h>
988 +#include <linux/io.h>
989 +#include <linux/mii.h>
990 +#include <linux/workqueue.h>
991 +#include <asm/hardware.h>
992 +#include <linux/ixp_qmgr.h>
993 +
994 +
995 +/* 32 bit offsets to be added to u32 *pointers */
996 +#define MAC_TX_CNTRL1 0x00 // 0x000
997 +#define MAC_TX_CNTRL2 0x01 // 0x004
998 +#define MAC_RX_CNTRL1 0x04 // 0x010
999 +#define MAC_RX_CNTRL2 0x05 // 0x014
1000 +#define MAC_RANDOM_SEED 0x08 // 0x020
1001 +#define MAC_THRESH_P_EMPTY 0x0c // 0x030
1002 +#define MAC_THRESH_P_FULL 0x0e // 0x038
1003 +#define MAC_BUF_SIZE_TX 0x10 // 0x040
1004 +#define MAC_TX_DEFER 0x14 // 0x050
1005 +#define MAC_RX_DEFER 0x15 // 0x054
1006 +#define MAC_TX_TWO_DEFER_1 0x18 // 0x060
1007 +#define MAC_TX_TWO_DEFER_2 0x19 // 0x064
1008 +#define MAC_SLOT_TIME 0x1c // 0x070
1009 +#define MAC_MDIO_CMD 0x20 // 0x080 4 registers 0x20 - 0x23
1010 +#define MAC_MDIO_STS 0x24 // 0x090 4 registers 0x24 - 0x27
1011 +#define MAC_ADDR_MASK 0x28 // 0x0A0 6 registers 0x28 - 0x2d
1012 +#define MAC_ADDR 0x30 // 0x0C0 6 registers 0x30 - 0x35
1013 +#define MAC_INT_CLK_THRESH 0x38 // 0x0E0 1 register
1014 +#define MAC_UNI_ADDR 0x3c // 0x0F0 6 registers 0x3c - 0x41
1015 +#define MAC_CORE_CNTRL 0x7f // 0x1fC
1016 +
1017 +/* TX Control Register 1*/
1018 +
1019 +#define TX_CNTRL1_TX_EN BIT(0)
1020 +#define TX_CNTRL1_DUPLEX BIT(1)
1021 +#define TX_CNTRL1_RETRY BIT(2)
1022 +#define TX_CNTRL1_PAD_EN BIT(3)
1023 +#define TX_CNTRL1_FCS_EN BIT(4)
1024 +#define TX_CNTRL1_2DEFER BIT(5)
1025 +#define TX_CNTRL1_RMII BIT(6)
1026 +
1027 +/* TX Control Register 2 */
1028 +#define TX_CNTRL2_RETRIES_MASK 0xf
1029 +
1030 +/* RX Control Register 1 */
1031 +#define RX_CNTRL1_RX_EN BIT(0)
1032 +#define RX_CNTRL1_PADSTRIP_EN BIT(1)
1033 +#define RX_CNTRL1_CRC_EN BIT(2)
1034 +#define RX_CNTRL1_PAUSE_EN BIT(3)
1035 +#define RX_CNTRL1_LOOP_EN BIT(4)
1036 +#define RX_CNTRL1_ADDR_FLTR_EN BIT(5)
1037 +#define RX_CNTRL1_RX_RUNT_EN BIT(6)
1038 +#define RX_CNTRL1_BCAST_DIS BIT(7)
1039 +
1040 +/* RX Control Register 2 */
1041 +#define RX_CNTRL2_DEFER_EN BIT(0)
1042 +
1043 +/* Core Control Register */
1044 +#define CORE_RESET BIT(0)
1045 +#define CORE_RX_FIFO_FLUSH BIT(1)
1046 +#define CORE_TX_FIFO_FLUSH BIT(2)
1047 +#define CORE_SEND_JAM BIT(3)
1048 +#define CORE_MDC_EN BIT(4)
1049 +
1050 +/* Definitions for MII access routines*/
1051 +
1052 +#define MII_REG_SHL 16
1053 +#define MII_ADDR_SHL 21
1054 +
1055 +#define MII_GO BIT(31)
1056 +#define MII_WRITE BIT(26)
1057 +#define MII_READ_FAIL BIT(31)
1058 +
1059 +#define MII_TIMEOUT_10TH_SECS 5
1060 +#define MII_10TH_SEC_IN_MILLIS 100
1061 +
1062 +/*
1063 + *
1064 + * Default values
1065 + *
1066 + */
1067 +
1068 +
1069 +#define MAC_TX_CNTRL1_DEFAULT (\
1070 + TX_CNTRL1_TX_EN | \
1071 + TX_CNTRL1_RETRY | \
1072 + TX_CNTRL1_FCS_EN | \
1073 + TX_CNTRL1_2DEFER | \
1074 + TX_CNTRL1_PAD_EN )
1075 +
1076 +#define MAC_TX_MAX_RETRIES_DEFAULT 0x0f
1077 +
1078 +#define MAC_RX_CNTRL1_DEFAULT ( \
1079 + RX_CNTRL1_PADSTRIP_EN | \
1080 + RX_CNTRL1_CRC_EN | \
1081 + RX_CNTRL1_RX_EN )
1082 +
1083 +#define MAC_RX_CNTRL2_DEFAULT 0x0
1084 +#define MAC_TX_CNTRL2_DEFAULT TX_CNTRL2_RETRIES_MASK
1085 +
1086 +/* Thresholds determined by NPE firmware FS */
1087 +#define MAC_THRESH_P_EMPTY_DEFAULT 0x12
1088 +#define MAC_THRESH_P_FULL_DEFAULT 0x30
1089 +
1090 +/* Number of bytes that must be in the tx fifo before
1091 + * transmission commences */
1092 +#define MAC_BUF_SIZE_TX_DEFAULT 0x8
1093 +
1094 +/* One-part deferral values */
1095 +#define MAC_TX_DEFER_DEFAULT 0x15
1096 +#define MAC_RX_DEFER_DEFAULT 0x16
1097 +
1098 +/* Two-part deferral values... */
1099 +#define MAC_TX_TWO_DEFER_1_DEFAULT 0x08
1100 +#define MAC_TX_TWO_DEFER_2_DEFAULT 0x07
1101 +
1102 +/* This value applies to MII */
1103 +#define MAC_SLOT_TIME_DEFAULT 0x80
1104 +
1105 +/* This value applies to RMII */
1106 +#define MAC_SLOT_TIME_RMII_DEFAULT 0xFF
1107 +
1108 +#define MAC_ADDR_MASK_DEFAULT 0xFF
1109 +
1110 +#define MAC_INT_CLK_THRESH_DEFAULT 0x1
1111 +/* The following is a value chosen at random */
1112 +#define RANDOM_SEED_DEFAULT 0x8
1113 +
1114 +/* By default we must configure the MAC to generate the MDC clock*/
1115 +#define CORE_DEFAULT (CORE_MDC_EN)
1116 +
1117 +/* End of Intel provided register information */
1118 +
1119 +extern int
1120 +mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg);
1121 +extern void
1122 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val);
1123 +extern void init_mdio(struct net_device *dev, int phy_id);
1124 +
1125 +struct mac_info {
1126 + u32 __iomem *addr;
1127 + struct resource *res;
1128 + struct device *npe_dev;
1129 + struct qm_qmgr *qmgr;
1130 + struct qm_queue *rxq;
1131 + struct qm_queue *txq;
1132 + u32 irqflags;
1133 + struct net_device_stats stat;
1134 + struct mii_if_info mii;
1135 + struct work_struct mdio_thread;
1136 + int rxq_pkt;
1137 + int unloading;
1138 + struct mac_plat_info *plat;
1139 +};
1140 +
1141 +static inline void mac_write_reg(struct mac_info *mac, int offset, u32 val)
1142 +{
1143 + *(mac->addr + offset) = val;
1144 +}
1145 +static inline u32 mac_read_reg(struct mac_info *mac, int offset)
1146 +{
1147 + return *(mac->addr + offset);
1148 +}
1149 +static inline void mac_set_regbit(struct mac_info *mac, int offset, u32 bit)
1150 +{
1151 + mac_write_reg(mac, offset, mac_read_reg(mac, offset) | bit);
1152 +}
1153 +static inline void mac_reset_regbit(struct mac_info *mac, int offset, u32 bit)
1154 +{
1155 + mac_write_reg(mac, offset, mac_read_reg(mac, offset) & ~bit);
1156 +}
1157 +
1158 +static inline void mac_mdio_cmd_write(struct mac_info *mac, u32 cmd)
1159 +{
1160 + int i;
1161 + for(i=0; i<4; i++) {
1162 + mac_write_reg(mac, MAC_MDIO_CMD + i, cmd & 0xff);
1163 + cmd >>=8;
1164 + }
1165 +}
1166 +
1167 +#define mac_mdio_cmd_read(mac) mac_mdio_read((mac), MAC_MDIO_CMD)
1168 +#define mac_mdio_status_read(mac) mac_mdio_read((mac), MAC_MDIO_STS)
1169 +static inline u32 mac_mdio_read(struct mac_info *mac, int offset)
1170 +{
1171 + int i;
1172 + u32 data = 0;
1173 + for(i=0; i<4; i++) {
1174 + data |= (mac_read_reg(mac, offset + i) & 0xff) << (i*8);
1175 + }
1176 + return data;
1177 +}
1178 +
1179 +static inline u32 mdio_cmd(int phy_addr, int phy_reg)
1180 +{
1181 + return phy_addr << MII_ADDR_SHL |
1182 + phy_reg << MII_REG_SHL |
1183 + MII_GO;
1184 +}
1185 +
1186 +#define MAC_REG_LIST { \
1187 + MAC_TX_CNTRL1, MAC_TX_CNTRL2, \
1188 + MAC_RX_CNTRL1, MAC_RX_CNTRL2, \
1189 + MAC_RANDOM_SEED, MAC_THRESH_P_EMPTY, MAC_THRESH_P_FULL, \
1190 + MAC_BUF_SIZE_TX, MAC_TX_DEFER, MAC_RX_DEFER, \
1191 + MAC_TX_TWO_DEFER_1, MAC_TX_TWO_DEFER_2, MAC_SLOT_TIME, \
1192 + MAC_ADDR_MASK +0, MAC_ADDR_MASK +1, MAC_ADDR_MASK +2, \
1193 + MAC_ADDR_MASK +3, MAC_ADDR_MASK +4, MAC_ADDR_MASK +5, \
1194 + MAC_ADDR +0, MAC_ADDR +1, MAC_ADDR +2, \
1195 + MAC_ADDR +3, MAC_ADDR +4, MAC_ADDR +5, \
1196 + MAC_INT_CLK_THRESH, \
1197 + MAC_UNI_ADDR +0, MAC_UNI_ADDR +1, MAC_UNI_ADDR +2, \
1198 + MAC_UNI_ADDR +3, MAC_UNI_ADDR +4, MAC_UNI_ADDR +5, \
1199 + MAC_CORE_CNTRL \
1200 +}
1201 diff -Nur linux-2.6.17/drivers/net/ixp4xx/mac_driver.c linux-2.6.17-owrt/drivers/net/ixp4xx/mac_driver.c
1202 --- linux-2.6.17/drivers/net/ixp4xx/mac_driver.c 1970-01-01 01:00:00.000000000 +0100
1203 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/mac_driver.c 2006-10-27 12:48:54.000000000 +0200
1204 @@ -0,0 +1,578 @@
1205 +/*
1206 + * mac_driver.c - provide a network interface for each MAC
1207 + *
1208 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1209 + *
1210 + * This file is released under the GPLv2
1211 + */
1212 +
1213 +#include <linux/kernel.h>
1214 +#include <linux/module.h>
1215 +#include <linux/platform_device.h>
1216 +#include <linux/netdevice.h>
1217 +#include <linux/etherdevice.h>
1218 +#include <linux/ethtool.h>
1219 +#include <linux/slab.h>
1220 +#include <linux/delay.h>
1221 +#include <linux/err.h>
1222 +#include <asm/io.h>
1223 +#include <asm/irq.h>
1224 +
1225 +
1226 +#include <linux/ixp_qmgr.h>
1227 +#include <linux/ixp_npe.h>
1228 +#include "mac.h"
1229 +
1230 +#define MDIO_INTERVAL (3*HZ)
1231 +#define RX_QUEUE_PREFILL 64
1232 +
1233 +#define IXMAC_NAME "ixp4xx_mac"
1234 +#define IXMAC_VERSION "0.2.1"
1235 +
1236 +#define MAC_DEFAULT_REG(mac, name) \
1237 + mac_write_reg(mac, MAC_ ## name, MAC_ ## name ## _DEFAULT)
1238 +
1239 +#define RX_DONE_QID 4
1240 +#define TX_DONE_QID 31
1241 +
1242 +extern int queue_send_skb(struct qm_queue *queue, struct sk_buff *skb);
1243 +extern int queue_fill_skb(struct qm_queue *queue, struct net_device *dev);
1244 +extern int queue_drain(struct qm_queue *queue);
1245 +extern struct sk_buff *queue_return_skb(struct qm_queue *queue);
1246 +
1247 +
1248 +/* Since the NPEs use 1 Return Q for sent frames, we need a device
1249 + * independent return Q. We call it tx_doneq.
1250 + * It will be initialized during module load and uninitialized
1251 + * during module unload. Evil hack, but there is no choice :-(
1252 + */
1253 +
1254 +static struct qm_queue *tx_doneq = NULL;
1255 +static struct qm_queue *rx_doneq = NULL;
1256 +
1257 +static void mac_init(struct mac_info *mac)
1258 +{
1259 + MAC_DEFAULT_REG(mac, TX_CNTRL2);
1260 + MAC_DEFAULT_REG(mac, THRESH_P_EMPTY);
1261 + MAC_DEFAULT_REG(mac, THRESH_P_FULL);
1262 + MAC_DEFAULT_REG(mac, TX_DEFER);
1263 + MAC_DEFAULT_REG(mac, TX_TWO_DEFER_1);
1264 + MAC_DEFAULT_REG(mac, TX_TWO_DEFER_2);
1265 + MAC_DEFAULT_REG(mac, SLOT_TIME);
1266 + MAC_DEFAULT_REG(mac, INT_CLK_THRESH);
1267 + MAC_DEFAULT_REG(mac, BUF_SIZE_TX);
1268 + MAC_DEFAULT_REG(mac, TX_CNTRL1);
1269 + MAC_DEFAULT_REG(mac, RX_CNTRL1);
1270 +}
1271 +
1272 +static void mac_set_uniaddr(struct net_device *dev)
1273 +{
1274 + int i;
1275 + struct mac_info *mac = netdev_priv(dev);
1276 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
1277 +
1278 + /* check for multicast */
1279 + if (dev->dev_addr[0] & 1)
1280 + return;
1281 +
1282 + npe_mh_setportaddr(npe, mac->plat, dev->dev_addr);
1283 + npe_mh_disable_firewall(npe, mac->plat);
1284 + for (i=0; i<dev->addr_len; i++)
1285 + mac_write_reg(mac, MAC_UNI_ADDR + i, dev->dev_addr[i]);
1286 +}
1287 +
1288 +static void update_duplex_mode(struct net_device *dev)
1289 +{
1290 + struct mac_info *mac = netdev_priv(dev);
1291 + printk("Duplex mode %s =%d\n", dev->name, mac->mii.full_duplex);
1292 + if (mac->mii.full_duplex) {
1293 + mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
1294 + } else {
1295 + mac_set_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
1296 + }
1297 +}
1298 +
1299 +static int media_check(struct net_device *dev, int init)
1300 +{
1301 + struct mac_info *mac = netdev_priv(dev);
1302 +
1303 + if (mii_check_media(&mac->mii, 1, init)) {
1304 + update_duplex_mode(dev);
1305 + return 1;
1306 + }
1307 + return 0;
1308 +}
1309 +
1310 +static void irqcb_recv(struct qm_queue *queue)
1311 +{
1312 + struct net_device *dev;
1313 + struct mac_info *mac;
1314 + struct sk_buff *skb;
1315 +
1316 + queue_ack_irq(queue);
1317 + skb = queue_return_skb(queue);
1318 + while (skb) {
1319 + int rc;
1320 + dev = skb->dev;
1321 + mac = netdev_priv(dev);
1322 + skb->protocol = eth_type_trans(skb, dev);
1323 + dev->last_rx = jiffies;
1324 + rc = netif_rx(skb);
1325 + if (rc == NET_RX_DROP) {
1326 + mac->stat.rx_dropped++;
1327 + } else {
1328 + mac->stat.rx_packets++;
1329 + mac->stat.rx_bytes += skb->len;
1330 + }
1331 +
1332 + if (!mac->unloading)
1333 + queue_fill_skb(mac->rxq, dev);
1334 + else
1335 + mac->rxq_pkt--;
1336 +
1337 + skb = queue_return_skb(queue);
1338 + }
1339 +}
1340 +
1341 +void irqcb_txdone(struct qm_queue *queue)
1342 +{
1343 + queue_ack_irq(queue);
1344 + while (queue_drain(queue));
1345 +}
1346 +
1347 +static void ixmac_set_rx_mode (struct net_device *dev)
1348 +{
1349 + struct mac_info *mac = netdev_priv(dev);
1350 + struct dev_mc_list *mclist;
1351 + u8 aset[dev->addr_len], aclear[dev->addr_len];
1352 + int i,j;
1353 +
1354 + if (dev->flags & IFF_PROMISC) {
1355 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
1356 + } else {
1357 + mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
1358 +
1359 + mclist = dev->mc_list;
1360 + memset(aset, 0xff, dev->addr_len);
1361 + memset(aclear, 0x00, dev->addr_len);
1362 + for (i = 0; mclist && i < dev->mc_count; i++) {
1363 + for (j=0; j< dev->addr_len; j++) {
1364 + aset[j] &= mclist->dmi_addr[j];
1365 + aclear[j] |= mclist->dmi_addr[j];
1366 + }
1367 + mclist = mclist->next;
1368 + }
1369 + for (j=0; j< dev->addr_len; j++) {
1370 + aclear[j] = aset[j] | ~aclear[j];
1371 + }
1372 + for (i=0; i<dev->addr_len; i++) {
1373 + mac_write_reg(mac, MAC_ADDR + i, aset[i]);
1374 + mac_write_reg(mac, MAC_ADDR_MASK + i, aclear[i]);
1375 + }
1376 + }
1377 +}
1378 +
1379 +static int ixmac_open (struct net_device *dev)
1380 +{
1381 + struct mac_info *mac = netdev_priv(dev);
1382 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
1383 + int i;
1384 +
1385 + /* first check if Microcode was downloaded into this NPE */
1386 + if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
1387 + printk(KERN_ERR "Missing microcode for %s\n", npe->plat->name);
1388 + return -EIO;
1389 + }
1390 +
1391 + for (i=0; i<RX_QUEUE_PREFILL; i++) {
1392 + queue_fill_skb(mac->rxq, dev);
1393 + }
1394 + mac->rxq_pkt += RX_QUEUE_PREFILL;
1395 +
1396 + mac_init(mac);
1397 + npe_mh_set_rxqid(npe, mac->plat, RX_DONE_QID);
1398 + mac_set_uniaddr(dev);
1399 +
1400 + media_check(dev, 1);
1401 +
1402 + ixmac_set_rx_mode(dev);
1403 +
1404 + netif_start_queue(dev);
1405 + schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
1406 + return 0;
1407 +}
1408 +
1409 +static int ixmac_start_xmit (struct sk_buff *skb, struct net_device *dev)
1410 +{
1411 + struct mac_info *mac = netdev_priv(dev);
1412 +
1413 + if (queue_send_skb(mac->txq, skb)) {
1414 + mac->stat.tx_packets++;
1415 + mac->stat.tx_bytes += skb->len;
1416 + } else {
1417 + mac->stat.tx_errors++;
1418 + dev_kfree_skb(skb);
1419 + }
1420 +
1421 + dev->trans_start = jiffies;
1422 + return 0;
1423 +}
1424 +
1425 +static int ixmac_close (struct net_device *dev)
1426 +{
1427 + struct mac_info *mac = netdev_priv(dev);
1428 +
1429 + netif_stop_queue (dev);
1430 +
1431 + if (mac->mdio_thread.pending)
1432 + cancel_rearming_delayed_work(&mac->mdio_thread);
1433 +
1434 +
1435 + /* After doing all our business, the rxfreeq must
1436 + * carry as much packets as we gave it during setup.
1437 + * Here we calc the missing packets.
1438 + */
1439 + mac->rxq_pkt -= queue_len(mac->rxq);
1440 +
1441 + while (queue_drain(mac->txq));
1442 + while (queue_drain(mac->rxq));
1443 +
1444 + return 0;
1445 +}
1446 +
1447 +static int ixmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1448 +{
1449 + struct mac_info *mac = netdev_priv(dev);
1450 + int rc, duplex_changed;
1451 +
1452 + if (!netif_running(dev))
1453 + return -EINVAL;
1454 +
1455 +
1456 + if (!try_module_get(THIS_MODULE))
1457 + return -ENODEV;
1458 + rc = generic_mii_ioctl(&mac->mii, if_mii(rq), cmd, &duplex_changed);
1459 + module_put(THIS_MODULE);
1460 + if (duplex_changed)
1461 + update_duplex_mode(dev);
1462 + return rc;
1463 +}
1464 +
1465 +static struct net_device_stats *ixmac_stats (struct net_device *dev)
1466 +{
1467 + struct mac_info *mac = netdev_priv(dev);
1468 + return &mac->stat;
1469 +}
1470 +
1471 +static void ixmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1472 +{
1473 + struct mac_info *mac = netdev_priv(dev);
1474 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
1475 +
1476 + strcpy(info->driver, IXMAC_NAME);
1477 + strcpy(info->version, IXMAC_VERSION);
1478 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
1479 + snprintf(info->fw_version, 32, "%d.%d func [%d]",
1480 + npe->img_info[2], npe->img_info[3], npe->img_info[1]);
1481 + }
1482 + strncpy(info->bus_info, npe->plat->name, ETHTOOL_BUSINFO_LEN);
1483 +}
1484 +
1485 +static int ixmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1486 +{
1487 + struct mac_info *mac = netdev_priv(dev);
1488 + mii_ethtool_gset(&mac->mii, cmd);
1489 + return 0;
1490 +}
1491 +
1492 +static int ixmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1493 +{
1494 + struct mac_info *mac = netdev_priv(dev);
1495 + int rc;
1496 + rc = mii_ethtool_sset(&mac->mii, cmd);
1497 + return rc;
1498 +}
1499 +
1500 +static int ixmac_nway_reset(struct net_device *dev)
1501 +{
1502 + struct mac_info *mac = netdev_priv(dev);
1503 + return mii_nway_restart(&mac->mii);
1504 +}
1505 +
1506 +static u32 ixmac_get_link(struct net_device *dev)
1507 +{
1508 + struct mac_info *mac = netdev_priv(dev);
1509 + return mii_link_ok(&mac->mii);
1510 +}
1511 +
1512 +static const int mac_reg_list[] = MAC_REG_LIST;
1513 +
1514 +static int ixmac_get_regs_len(struct net_device *dev)
1515 +{
1516 + return ARRAY_SIZE(mac_reg_list);
1517 +}
1518 +
1519 +static void
1520 +ixmac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
1521 +{
1522 + int i;
1523 + struct mac_info *mac = netdev_priv(dev);
1524 + u8 *buf = regbuf;
1525 +
1526 + for (i=0; i<regs->len; i++) {
1527 + buf[i] = mac_read_reg(mac, mac_reg_list[i]);
1528 + }
1529 +}
1530 +
1531 +static struct ethtool_ops ixmac_ethtool_ops = {
1532 + .get_drvinfo = ixmac_get_drvinfo,
1533 + .get_settings = ixmac_get_settings,
1534 + .set_settings = ixmac_set_settings,
1535 + .nway_reset = ixmac_nway_reset,
1536 + .get_link = ixmac_get_link,
1537 + .get_regs_len = ixmac_get_regs_len,
1538 + .get_regs = ixmac_get_regs,
1539 + .get_perm_addr = ethtool_op_get_perm_addr,
1540 +};
1541 +static void mac_mdio_thread (void *_data)
1542 +{
1543 + struct net_device *dev = _data;
1544 + struct mac_info *mac = netdev_priv(dev);
1545 +
1546 + media_check(dev, 0);
1547 + schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
1548 +}
1549 +
1550 +static int mac_probe(struct platform_device *pdev)
1551 +{
1552 + struct resource *res;
1553 + struct mac_info *mac;
1554 + struct net_device* dev;
1555 + struct npe_info *npe;
1556 + struct mac_plat_info *plat = pdev->dev.platform_data;
1557 + int size, ret;
1558 +
1559 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) {
1560 + return -EIO;
1561 + }
1562 + if (!(dev = alloc_etherdev (sizeof(struct mac_info)))) {
1563 + return -ENOMEM;
1564 + }
1565 + SET_MODULE_OWNER(dev);
1566 + SET_NETDEV_DEV(dev, &pdev->dev);
1567 + mac = netdev_priv(dev);
1568 +
1569 + size = res->end - res->start +1;
1570 + mac->res = request_mem_region(res->start, size, IXMAC_NAME);
1571 + if (!mac->res) {
1572 + ret = -EBUSY;
1573 + goto out_free;
1574 + }
1575 +
1576 + mac->addr = ioremap(res->start, size);
1577 + if (!mac->addr) {
1578 + ret = -ENOMEM;
1579 + goto out_rel;
1580 + }
1581 +
1582 + dev->open = ixmac_open;
1583 + dev->hard_start_xmit = ixmac_start_xmit;
1584 + dev->stop = ixmac_close;
1585 + dev->get_stats = ixmac_stats;
1586 + dev->do_ioctl = ixmac_ioctl;
1587 + dev->set_multicast_list = ixmac_set_rx_mode;
1588 + dev->ethtool_ops = &ixmac_ethtool_ops;
1589 +
1590 + mac->npe_dev = get_npe_by_id(plat->npe_id);
1591 + if (!mac->npe_dev) {
1592 + ret = -EIO;
1593 + goto out_unmap;
1594 + }
1595 + if (!try_module_get(mac->npe_dev->driver->owner)) {
1596 + put_device(mac->npe_dev);
1597 + ret = -EIO;
1598 + goto out_unmap;
1599 + }
1600 +
1601 + npe = dev_get_drvdata(mac->npe_dev);
1602 +
1603 + mac->rxq = request_queue(plat->rxq_id, 128);
1604 + if (IS_ERR(mac->rxq)) {
1605 + printk(KERN_ERR "Error requesting Q: %d\n", plat->rxq_id);
1606 + ret = -EBUSY;
1607 + goto out_putmod;
1608 + }
1609 + mac->txq = request_queue(plat->txq_id, 128);
1610 + if (IS_ERR(mac->txq)) {
1611 + printk(KERN_ERR "Error requesting Q: %d\n", plat->txq_id);
1612 + release_queue(mac->rxq);
1613 + ret = -EBUSY;
1614 + goto out_putmod;
1615 + }
1616 +
1617 + mac->qmgr = dev_get_drvdata(mac->rxq->dev);
1618 + if (register_netdev (dev)) {
1619 + release_queue(mac->rxq);
1620 + release_queue(mac->txq);
1621 + ret = -EIO;
1622 + goto out_putmod;
1623 + }
1624 +
1625 + mac->plat = plat;
1626 + platform_set_drvdata(pdev, dev);
1627 +
1628 + mac_write_reg(mac, MAC_CORE_CNTRL, CORE_RESET);
1629 + udelay(500);
1630 + mac_write_reg(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
1631 +
1632 + init_mdio(dev, plat->phy_id);
1633 +
1634 + INIT_WORK(&mac->mdio_thread, mac_mdio_thread, dev);
1635 +
1636 + /* The place of the MAC address is very system dependent.
1637 + * Here we use a random one to be replaced by one of the
1638 + * following commands:
1639 + * "ip link set address 02:03:04:04:04:01 dev eth0"
1640 + * "ifconfig eth0 hw ether 02:03:04:04:04:07"
1641 + */
1642 + random_ether_addr(dev->dev_addr);
1643 + dev->dev_addr[5] = plat->phy_id;
1644 +
1645 + printk(KERN_INFO IXMAC_NAME " driver " IXMAC_VERSION
1646 + ": %s on %s with PHY[%d] initialized\n",
1647 + dev->name, npe->plat->name, plat->phy_id);
1648 +
1649 + return 0;
1650 +
1651 +out_putmod:
1652 + module_put(mac->npe_dev->driver->owner);
1653 +out_unmap:
1654 + iounmap(mac->addr);
1655 +out_rel:
1656 + release_resource(mac->res);
1657 +out_free:
1658 + kfree(mac);
1659 + return ret;
1660 +}
1661 +
1662 +static int mac_remove(struct platform_device *pdev)
1663 +{
1664 + struct net_device* dev = platform_get_drvdata(pdev);
1665 + struct mac_info *mac = netdev_priv(dev);
1666 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
1667 + int loop = 0;
1668 + struct sk_buff *skb;
1669 +
1670 + ixmac_close(dev);
1671 +
1672 + mac->unloading = 1;
1673 +
1674 + /* Now there are some skb hold by the NPE.
1675 + * We switch the MAC in loopback mode and send a pseudo packet
1676 + * that will be returned by the NPE in its last SKB.
1677 + * We will also try to isolate the PHY to keep the packets internal.
1678 + */
1679 +
1680 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
1681 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
1682 + mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
1683 +
1684 + npe_mh_npe_loopback_mode(npe, mac->plat, 1);
1685 + mdelay(200);
1686 +
1687 + while (mac->rxq_pkt && loop++ < 2000 ) {
1688 + skb = dev_alloc_skb(128);
1689 + skb_put(skb, 64);
1690 + /* actually the packets should never leave the system,
1691 + * but if they do, they shall contain 0s instead of
1692 + * intresting random data....
1693 + */
1694 + memset(skb->data, 0, skb->len);
1695 + queue_send_skb(mac->txq, skb);
1696 +
1697 + mdelay(1);
1698 + }
1699 +
1700 + npe_mh_npe_loopback_mode(npe, mac->plat, 0);
1701 + }
1702 + /* Flush MAC TX fifo to drain the bogus packages */
1703 + mac_set_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
1704 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_RX_EN);
1705 + mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_TX_EN);
1706 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
1707 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
1708 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
1709 +
1710 + unregister_netdev(dev);
1711 +
1712 + while (queue_drain(mac->txq));
1713 + release_queue(mac->txq);
1714 + while (queue_drain(mac->rxq));
1715 + release_queue(mac->rxq);
1716 +
1717 + module_put(mac->npe_dev->driver->owner);
1718 + put_device(mac->npe_dev);
1719 +
1720 + iounmap(mac->addr);
1721 + release_resource(mac->res);
1722 + platform_set_drvdata(pdev, NULL);
1723 + free_netdev(dev);
1724 + return 0;
1725 +}
1726 +
1727 +static struct platform_driver ixp4xx_mac = {
1728 + .driver.name = IXMAC_NAME,
1729 + .probe = mac_probe,
1730 + .remove = mac_remove,
1731 +};
1732 +
1733 +static int __init init_mac(void)
1734 +{
1735 + /* The TX done Queue handles skbs sent out by the NPE */
1736 + tx_doneq = request_queue(TX_DONE_QID, 128);
1737 + if (IS_ERR(tx_doneq)) {
1738 + printk(KERN_ERR "Error requesting Q: %d\n", TX_DONE_QID);
1739 + return -EBUSY;
1740 + }
1741 + tx_doneq->irq_cb = irqcb_txdone;
1742 + /* drain the TX queue if it is half full */
1743 + queue_set_watermarks(tx_doneq, 0, 64);
1744 + queue_set_irq_src(tx_doneq, Q_IRQ_ID_NF);
1745 + queue_enable_irq(tx_doneq);
1746 +
1747 + /* RX Queue handles SKBs with a valid frame */
1748 + rx_doneq = request_queue(RX_DONE_QID, 128);
1749 + if (IS_ERR(rx_doneq)) {
1750 + printk(KERN_ERR "Error requesting Q: %d\n", RX_DONE_QID);
1751 + return -EBUSY;
1752 + }
1753 + irqcb_recv(rx_doneq);
1754 + rx_doneq->irq_cb = irqcb_recv;
1755 + queue_set_watermarks(rx_doneq, 0, 0);
1756 + queue_set_irq_src(rx_doneq, Q_IRQ_ID_NOT_E);
1757 + queue_enable_irq(rx_doneq);
1758 +
1759 + return platform_driver_register(&ixp4xx_mac);
1760 +}
1761 +
1762 +static void __exit finish_mac(void)
1763 +{
1764 + platform_driver_unregister(&ixp4xx_mac);
1765 + if (tx_doneq) {
1766 + queue_disable_irq(tx_doneq);
1767 + while (queue_drain(tx_doneq));
1768 + release_queue(tx_doneq);
1769 + }
1770 + if (rx_doneq) {
1771 + queue_disable_irq(rx_doneq);
1772 + while (queue_drain(rx_doneq));
1773 + release_queue(rx_doneq);
1774 + }
1775 +}
1776 +
1777 +module_init(init_mac);
1778 +module_exit(finish_mac);
1779 +
1780 +MODULE_LICENSE("GPL");
1781 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1782 +
1783 diff -Nur linux-2.6.17/drivers/net/ixp4xx/npe_mh.c linux-2.6.17-owrt/drivers/net/ixp4xx/npe_mh.c
1784 --- linux-2.6.17/drivers/net/ixp4xx/npe_mh.c 1970-01-01 01:00:00.000000000 +0100
1785 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/npe_mh.c 2006-10-27 12:48:54.000000000 +0200
1786 @@ -0,0 +1,137 @@
1787 +/*
1788 + * npe_mh.c - NPE message handler.
1789 + *
1790 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1791 + *
1792 + * This file is released under the GPLv2
1793 + */
1794 +
1795 +#include <linux/ixp_npe.h>
1796 +#include <linux/slab.h>
1797 +
1798 +#define MAX_RETRY 200
1799 +
1800 +struct npe_mh_msg {
1801 + union {
1802 + u8 byte[8]; /* Very desciptive name, I know ... */
1803 + u32 data[2];
1804 + } u;
1805 +};
1806 +
1807 +/*
1808 + * The whole code in this function must be reworked.
1809 + * It is in a state that works but is not rock solid
1810 + */
1811 +static int send_message(struct npe_info *npe, struct npe_mh_msg *msg)
1812 +{
1813 + int i,j;
1814 + u32 send[2], recv[2];
1815 +
1816 + for (i=0; i<2; i++)
1817 + send[i] = be32_to_cpu(msg->u.data[i]);
1818 +
1819 + if ((npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
1820 + IX_NPEMH_NPE_STAT_IFNE))
1821 + return -1;
1822 +
1823 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[0]);
1824 + for(i=0; i<MAX_RETRY; i++) {
1825 + /* if the IFNF status bit is unset then the inFIFO is full */
1826 + if (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
1827 + IX_NPEMH_NPE_STAT_IFNF)
1828 + break;
1829 + }
1830 + if (i>=MAX_RETRY)
1831 + return -1;
1832 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[1]);
1833 + i=0;
1834 + while (!(npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
1835 + IX_NPEMH_NPE_STAT_OFNE)) {
1836 + if (i++>MAX_RETRY) {
1837 + printk("Waiting for Output FIFO NotEmpty failed\n");
1838 + return -1;
1839 + }
1840 + }
1841 + //printk("Output FIFO Not Empty. Loops: %d\n", i);
1842 + j=0;
1843 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
1844 + IX_NPEMH_NPE_STAT_OFNE) {
1845 + recv[j&1] = npe_reg_read(npe,IX_NPEDL_REG_OFFSET_FIFO);
1846 + j++;
1847 + }
1848 + if ((recv[0] != send[0]) || (recv[1] != send[1])) {
1849 + printk("Unexpected answer: Send %08x:%08x Ret %08x:%08x\n",
1850 + send[0], send[1], recv[0], recv[1]);
1851 + }
1852 + return 0;
1853 +}
1854 +
1855 +#define CMD 0
1856 +#define PORT 1
1857 +#define MAC 2
1858 +
1859 +#define IX_ETHNPE_EDB_SETPORTADDRESS 0x01
1860 +#define IX_ETHNPE_FW_SETFIREWALLMODE 0x0E
1861 +#define IX_ETHNPE_VLAN_SETRXQOSENTRY 0x0B
1862 +#define IX_ETHNPE_SETLOOPBACK_MODE 0x12
1863 +
1864 +#define logical_id(mp) (((mp)->npe_id << 4) | ((mp)->port_id & 0xf))
1865 +
1866 +int npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp,
1867 + u8 *macaddr)
1868 +{
1869 + struct npe_mh_msg msg;
1870 +
1871 + msg.u.byte[CMD] = IX_ETHNPE_EDB_SETPORTADDRESS;
1872 + msg.u.byte[PORT] = mp->eth_id;
1873 + memcpy(msg.u.byte + MAC, macaddr, 6);
1874 +
1875 + return send_message(npe, &msg);
1876 +}
1877 +
1878 +int npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp)
1879 +{
1880 + struct npe_mh_msg msg;
1881 +
1882 + memset(&msg, 0, sizeof(msg));
1883 + msg.u.byte[CMD] = IX_ETHNPE_FW_SETFIREWALLMODE;
1884 + msg.u.byte[PORT] = logical_id(mp);
1885 +
1886 + return send_message(npe, &msg);
1887 +}
1888 +
1889 +int npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp,
1890 + int enable)
1891 +{
1892 + struct npe_mh_msg msg;
1893 +
1894 + memset(&msg, 0, sizeof(msg));
1895 + msg.u.byte[CMD] = IX_ETHNPE_SETLOOPBACK_MODE;
1896 + msg.u.byte[PORT] = logical_id(mp);
1897 + msg.u.byte[3] = enable ? 1 : 0;
1898 +
1899 + return send_message(npe, &msg);
1900 +}
1901 +
1902 +int npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid)
1903 +{
1904 + struct npe_mh_msg msg;
1905 + int i, ret;
1906 +
1907 + memset(&msg, 0, sizeof(msg));
1908 + msg.u.byte[CMD] = IX_ETHNPE_VLAN_SETRXQOSENTRY;
1909 + msg.u.byte[PORT] = logical_id(mp);
1910 + msg.u.byte[5] = qid | 0x80;
1911 + msg.u.byte[7] = qid<<4;
1912 + for(i=0; i<8; i++) {
1913 + msg.u.byte[3] = i;
1914 + if ((ret = send_message(npe, &msg)))
1915 + return ret;
1916 + }
1917 + return 0;
1918 +}
1919 +
1920 +EXPORT_SYMBOL(npe_mh_setportaddr);
1921 +EXPORT_SYMBOL(npe_mh_disable_firewall);
1922 +EXPORT_SYMBOL(npe_mh_set_rxqid);
1923 +EXPORT_SYMBOL(npe_mh_npe_loopback_mode);
1924 diff -Nur linux-2.6.17/drivers/net/ixp4xx/phy.c linux-2.6.17-owrt/drivers/net/ixp4xx/phy.c
1925 --- linux-2.6.17/drivers/net/ixp4xx/phy.c 1970-01-01 01:00:00.000000000 +0100
1926 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/phy.c 2006-10-27 12:48:54.000000000 +0200
1927 @@ -0,0 +1,113 @@
1928 +/*
1929 + * phy.c - MDIO functions and mii initialisation
1930 + *
1931 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1932 + *
1933 + * This file is released under the GPLv2
1934 + */
1935 +
1936 +
1937 +#include <linux/mutex.h>
1938 +#include "mac.h"
1939 +
1940 +#define MAX_PHYS (1<<5)
1941 +
1942 +/*
1943 + * We must always use the same MAC for acessing the MDIO
1944 + * We may not use each MAC for its PHY :-(
1945 + */
1946 +
1947 +static struct net_device *phy_dev = NULL;
1948 +static struct mutex mtx;
1949 +
1950 +/* here we remember if the PHY is alive, to avoid log dumping */
1951 +static int phy_works[MAX_PHYS];
1952 +
1953 +int mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg)
1954 +{
1955 + struct mac_info *mac;
1956 + u32 cmd, reg;
1957 + int cnt = 0;
1958 +
1959 + if (!phy_dev)
1960 + return 0;
1961 +
1962 + mac = netdev_priv(phy_dev);
1963 + cmd = mdio_cmd(phy_addr, phy_reg);
1964 + mutex_lock_interruptible(&mtx);
1965 + mac_mdio_cmd_write(mac, cmd);
1966 + while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
1967 + if (++cnt >= 100) {
1968 + printk("%s: PHY[%d] access failed\n",
1969 + dev->name, phy_addr);
1970 + break;
1971 + }
1972 + schedule();
1973 + }
1974 + reg = mac_mdio_status_read(mac);
1975 + mutex_unlock(&mtx);
1976 + if (reg & MII_READ_FAIL) {
1977 + if (phy_works[phy_addr]) {
1978 + printk("%s: PHY[%d] unresponsive\n",
1979 + dev->name, phy_addr);
1980 + }
1981 + reg = 0;
1982 + phy_works[phy_addr] = 0;
1983 + } else {
1984 + if ( !phy_works[phy_addr]) {
1985 + printk("%s: PHY[%d] responsive again\n",
1986 + dev->name, phy_addr);
1987 + }
1988 + phy_works[phy_addr] = 1;
1989 + }
1990 + return reg & 0xffff;
1991 +}
1992 +
1993 +void
1994 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val)
1995 +{
1996 + struct mac_info *mac;
1997 + u32 cmd;
1998 + int cnt=0;
1999 +
2000 + if (!phy_dev)
2001 + return;
2002 +
2003 + mac = netdev_priv(phy_dev);
2004 + cmd = mdio_cmd(phy_addr, phy_reg) | MII_WRITE | val;
2005 +
2006 + mutex_lock_interruptible(&mtx);
2007 + mac_mdio_cmd_write(mac, cmd);
2008 + while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
2009 + if (++cnt >= 100) {
2010 + printk("%s: PHY[%d] access failed\n",
2011 + dev->name, phy_addr);
2012 + break;
2013 + }
2014 + schedule();
2015 + }
2016 + mutex_unlock(&mtx);
2017 +}
2018 +
2019 +void init_mdio(struct net_device *dev, int phy_id)
2020 +{
2021 + struct mac_info *mac = netdev_priv(dev);
2022 + int i;
2023 +
2024 + /* All phy operations should use the same MAC
2025 + * (my experience)
2026 + */
2027 + if (mac->plat->eth_id == 0) {
2028 + mutex_init(&mtx);
2029 + phy_dev = dev;
2030 + for (i=0; i<MAX_PHYS; i++)
2031 + phy_works[i] = 1;
2032 + }
2033 + mac->mii.dev = dev;
2034 + mac->mii.phy_id = phy_id;
2035 + mac->mii.phy_id_mask = MAX_PHYS - 1;
2036 + mac->mii.reg_num_mask = 0x1f;
2037 + mac->mii.mdio_read = mdio_read_register;
2038 + mac->mii.mdio_write = mdio_write_register;
2039 +}
2040 +
2041 diff -Nur linux-2.6.17/drivers/net/ixp4xx/qmgr_eth.c linux-2.6.17-owrt/drivers/net/ixp4xx/qmgr_eth.c
2042 --- linux-2.6.17/drivers/net/ixp4xx/qmgr_eth.c 1970-01-01 01:00:00.000000000 +0100
2043 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/qmgr_eth.c 2006-10-27 12:48:54.000000000 +0200
2044 @@ -0,0 +1,127 @@
2045 +/*
2046 + * qmgr_eth.c - Glue between qmgr and MAC. Linked to mac to keep qmgr.ko
2047 + * more virtual
2048 + *
2049 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
2050 + *
2051 + * This file is released under the GPLv2
2052 + */
2053 +
2054 +#include <linux/skbuff.h>
2055 +#include <linux/dma-mapping.h>
2056 +#include <linux/netdevice.h>
2057 +#include <linux/ixp_qmgr.h>
2058 +
2059 +#define SKB_SIZE 1688
2060 +
2061 +int queue_send_skb(struct qm_queue *queue, struct sk_buff *skb)
2062 +{
2063 + struct npe_cont *cont;
2064 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
2065 +
2066 + cont = qmgr_get_cont(qmgr);
2067 + if (!cont)
2068 + return 0;
2069 +
2070 + cont->h.skb = skb;
2071 +#ifndef __ARMEB__
2072 + /* swap the payload of the SKB */
2073 + {
2074 + u32 *p = (u32*)((unsigned)skb->data & ~0x3);
2075 + u32 *e = (u32*)(((unsigned)skb->data + skb->len + 3) & ~0x3);
2076 + while (p < e)
2077 + *p = cpu_to_be32(*p), ++p;
2078 + }
2079 +#endif
2080 + /* fill the NPE information record */
2081 + cont->ctl.eth.next = 0;
2082 + cont->ctl.eth.buf_len = skb->end - skb->head;
2083 + cont->ctl.eth.pkt_len = skb->len;
2084 + cont->ctl.eth.phys_addr =
2085 + dma_map_single(queue->dev, skb->data, skb->len, DMA_TO_DEVICE);
2086 +
2087 + queue_put_entry(queue, cont->phys);
2088 +
2089 + if (queue_stat(queue) == 2) { /* overflow */
2090 + return 0;
2091 + }
2092 + return 1;
2093 +}
2094 +
2095 +int queue_fill_skb(struct qm_queue *queue, struct net_device *dev)
2096 +{
2097 + struct npe_cont *cont;
2098 + struct sk_buff *skb;
2099 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
2100 + int len;
2101 +
2102 + cont = qmgr_get_cont(qmgr);
2103 + if (!cont)
2104 + return 0;
2105 + skb = dev_alloc_skb(SKB_SIZE);
2106 + if (!skb) {
2107 + qmgr_return_cont(qmgr, cont);
2108 + return 0;
2109 + }
2110 + len = skb->end - skb->data;
2111 + skb->dev = dev;
2112 + cont->h.skb = skb;
2113 + cont->ctl.eth.next = 0;
2114 + cont->ctl.eth.buf_len = len;
2115 + cont->ctl.eth.pkt_len = 0;
2116 + cont->ctl.eth.phys_addr =
2117 + dma_map_single(queue->dev, skb->data, len, DMA_FROM_DEVICE);
2118 +
2119 + queue_put_entry(queue, cont->phys);
2120 +
2121 + /* TODO: check quelen ?
2122 + * The current use guarantees that this queues will never overflow.
2123 + */
2124 + return 1;
2125 +}
2126 +
2127 +int queue_drain(struct qm_queue *queue)
2128 +{
2129 + u32 phys = *queue->acc_reg & ~0xf;
2130 + struct npe_cont *cont;
2131 +
2132 + if (!phys)
2133 + return 0;
2134 + cont = dma_to_virt(queue->dev, phys);
2135 + cont = cont->virt;
2136 + dev_kfree_skb_any(cont->h.skb);
2137 + qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
2138 + return 1;
2139 +}
2140 +
2141 +struct sk_buff *queue_return_skb(struct qm_queue *queue)
2142 +{
2143 + u32 phys = *queue->acc_reg & ~0xf;
2144 + struct sk_buff *skb;
2145 + struct npe_cont *cont;
2146 + int len, buflen;
2147 +
2148 + if (!phys)
2149 + return NULL;
2150 +
2151 + cont = dma_to_virt(queue->dev, phys);
2152 + cont = cont->virt;
2153 + skb = cont->h.skb;
2154 + buflen = cont->ctl.eth.buf_len;
2155 + len = cont->ctl.eth.pkt_len;
2156 + dma_unmap_single(queue->dev, cont->ctl.eth.phys_addr,
2157 + buflen, DMA_FROM_DEVICE);
2158 + qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
2159 + skb_put(skb, len);
2160 +#ifndef __ARMEB__
2161 + /* swap the payload of the SKB */
2162 + {
2163 + u32 *p = (u32*)((unsigned)skb->data & ~0x3);
2164 + u32 *e = (u32*)(((unsigned)skb->data + skb->len + 3) & ~0x3);
2165 + while (p < e)
2166 + *p = cpu_to_be32(*p), ++p;
2167 + }
2168 +#endif
2169 + return skb;
2170 +}
2171 +
2172 diff -Nur linux-2.6.17/drivers/net/ixp4xx/ucode_dl.c linux-2.6.17-owrt/drivers/net/ixp4xx/ucode_dl.c
2173 --- linux-2.6.17/drivers/net/ixp4xx/ucode_dl.c 1970-01-01 01:00:00.000000000 +0100
2174 +++ linux-2.6.17-owrt/drivers/net/ixp4xx/ucode_dl.c 2006-10-27 12:48:54.000000000 +0200
2175 @@ -0,0 +1,466 @@
2176 +/*
2177 + * ucode_dl.c - provide an NPE device and a char-dev for microcode download
2178 + *
2179 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
2180 + *
2181 + * This file is released under the GPLv2
2182 + */
2183 +
2184 +#include <linux/kernel.h>
2185 +#include <linux/module.h>
2186 +#include <linux/miscdevice.h>
2187 +#include <linux/platform_device.h>
2188 +#include <linux/fs.h>
2189 +#include <linux/init.h>
2190 +#include <linux/slab.h>
2191 +#include <linux/firmware.h>
2192 +#include <linux/dma-mapping.h>
2193 +#include <linux/byteorder/swab.h>
2194 +#include <asm/uaccess.h>
2195 +#include <asm/io.h>
2196 +
2197 +#include <linux/ixp_npe.h>
2198 +
2199 +#define IXNPE_VERSION "IXP4XX NPE driver Version 0.2.0"
2200 +
2201 +#define DL_MAGIC 0xfeedf00d
2202 +#define DL_MAGIC_SWAP 0x0df0edfe
2203 +
2204 +#define EOF_BLOCK 0xf
2205 +#define IMG_SIZE(image) (((image)->size * sizeof(u32)) + \
2206 + sizeof(struct dl_image))
2207 +
2208 +#define BT_INSTR 0
2209 +#define BT_DATA 1
2210 +
2211 +enum blk_type {
2212 + instruction,
2213 + data,
2214 +};
2215 +
2216 +struct dl_block {
2217 + u32 type;
2218 + u32 offset;
2219 +};
2220 +
2221 +struct dl_image {
2222 + u32 magic;
2223 + u32 id;
2224 + u32 size;
2225 + union {
2226 + u32 data[0];
2227 + struct dl_block block[0];
2228 + } u;
2229 +};
2230 +
2231 +struct dl_codeblock {
2232 + u32 npe_addr;
2233 + u32 size;
2234 + u32 data[0];
2235 +};
2236 +
2237 +static struct platform_driver ixp4xx_npe_driver;
2238 +
2239 +static void npe_stop(struct npe_info *npe)
2240 +{
2241 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STOP);
2242 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
2243 +}
2244 +static void npe_reset_active(struct npe_info *npe, u32 reg)
2245 +{
2246 + u32 regval;
2247 +
2248 + regval = npe_read_ecs_reg(npe, reg);
2249 + regval &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
2250 + npe_write_ecs_reg(npe, reg, regval);
2251 +}
2252 +
2253 +static void npe_start(struct npe_info *npe)
2254 +{
2255 + npe_reset_active(npe, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
2256 + npe_reset_active(npe, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
2257 + npe_reset_active(npe, IX_NPEDL_ECS_DBG_CTXT_REG_0);
2258 +
2259 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
2260 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_START);
2261 +}
2262 +
2263 +static int
2264 +download_block(struct npe_info *npe, struct dl_codeblock *cb, unsigned type)
2265 +{
2266 + int i;
2267 + int cmd;
2268 +
2269 + switch (type) {
2270 + case BT_DATA:
2271 + cmd = IX_NPEDL_EXCTL_CMD_WR_DATA_MEM;
2272 + if (cb->npe_addr + cb->size > npe->plat->data_size) {
2273 + printk(KERN_INFO "Data size too large: %d+%d > %d\n",
2274 + cb->npe_addr, cb->size, npe->plat->data_size);
2275 + return -EIO;
2276 + }
2277 + break;
2278 + case BT_INSTR:
2279 + cmd = IX_NPEDL_EXCTL_CMD_WR_INS_MEM;
2280 + if (cb->npe_addr + cb->size > npe->plat->inst_size) {
2281 + printk(KERN_INFO "Instr size too large: %d+%d > %d\n",
2282 + cb->npe_addr, cb->size, npe->plat->inst_size);
2283 + return -EIO;
2284 + }
2285 + break;
2286 + default:
2287 + printk(KERN_INFO "Unknown CMD: %d\n", type);
2288 + return -EIO;
2289 + }
2290 +
2291 + for (i=0; i < cb->size; i++) {
2292 + npe_write_cmd(npe, cb->npe_addr + i, cb->data[i], cmd);
2293 + }
2294 +
2295 + return 0;
2296 +}
2297 +
2298 +static int match_by_npeid(struct device *dev, void *id)
2299 +{
2300 + struct npe_info *npe = dev_get_drvdata(dev);
2301 + if (!npe->plat)
2302 + return 0;
2303 + return (npe->plat->id == *(int*)id);
2304 +}
2305 +
2306 +struct device *get_npe_by_id(int id)
2307 +{
2308 + return driver_find_device(&ixp4xx_npe_driver.driver, NULL,
2309 + &id, match_by_npeid);
2310 +}
2311 +
2312 +static int store_npe_image(struct dl_image *image, struct device *dev)
2313 +{
2314 + struct dl_block *blk;
2315 + struct dl_codeblock *cb;
2316 + struct npe_info *npe;
2317 + int ret=0;
2318 +
2319 + if (!dev) {
2320 + dev = get_npe_by_id( (image->id >> 24) & 0xf);
2321 + put_device(dev);
2322 + }
2323 + if (!dev)
2324 + return -ENODEV;
2325 +
2326 + npe = dev_get_drvdata(dev);
2327 +
2328 + if ( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2329 + printk(KERN_INFO "Cowardly refusing to reload an Image "
2330 + "into the running %s\n", npe->plat->name);
2331 + return 0; /* indicate success anyway... */
2332 + }
2333 + npe_stop(npe);
2334 +
2335 + for (blk = image->u.block; blk->type != EOF_BLOCK; blk++) {
2336 + if (blk->offset > image->size) {
2337 + printk(KERN_INFO "Block offset out of range\n");
2338 + return -EIO;
2339 + }
2340 + cb = (struct dl_codeblock*)&image->u.data[blk->offset];
2341 + if (blk->offset + cb->size + 2 > image->size) {
2342 + printk(KERN_INFO "Codeblock size out of range\n");
2343 + return -EIO;
2344 + }
2345 + if ((ret = download_block(npe, cb, blk->type)))
2346 + return ret;
2347 + }
2348 + *(u32*)npe->img_info = cpu_to_be32(image->id);
2349 + npe_start(npe);
2350 +
2351 + printk(KERN_INFO "Image loaded to %s Func:%x, Rel: %x:%x, Status: %x\n",
2352 + npe->plat->name, npe->img_info[1], npe->img_info[2],
2353 + npe->img_info[3], npe_status(npe));
2354 + return 0;
2355 +}
2356 +
2357 +static int ucode_open(struct inode *inode, struct file *file)
2358 +{
2359 + file->private_data = kmalloc(sizeof(struct dl_image), GFP_KERNEL);
2360 + if (!file->private_data)
2361 + return -ENOMEM;
2362 + return 0;
2363 +}
2364 +
2365 +static int ucode_close(struct inode *inode, struct file *file)
2366 +{
2367 + kfree(file->private_data);
2368 + return 0;
2369 +}
2370 +
2371 +static ssize_t ucode_write(struct file *file, const char __user *buf,
2372 + size_t count, loff_t *ppos)
2373 +{
2374 + union {
2375 + char *data;
2376 + struct dl_image *image;
2377 + } u;
2378 + const char __user *cbuf = buf;
2379 +
2380 + u.data = file->private_data;
2381 +
2382 + while (count) {
2383 + int len;
2384 + if (*ppos < sizeof(struct dl_image)) {
2385 + len = sizeof(struct dl_image) - *ppos;
2386 + len = len > count ? count : len;
2387 + if (copy_from_user(u.data + *ppos, cbuf, len))
2388 + return -EFAULT;
2389 + count -= len;
2390 + *ppos += len;
2391 + cbuf += len;
2392 + continue;
2393 + } else if (*ppos == sizeof(struct dl_image)) {
2394 + void *data;
2395 + if (u.image->magic == DL_MAGIC_SWAP) {
2396 + printk(KERN_INFO "swapped image found\n");
2397 + u.image->id = swab32(u.image->id);
2398 + u.image->size = swab32(u.image->size);
2399 + } else if (u.image->magic != DL_MAGIC) {
2400 + printk(KERN_INFO "Bad magic:%x\n",
2401 + u.image->magic);
2402 + return -EFAULT;
2403 + }
2404 + len = IMG_SIZE(u.image);
2405 + data = kmalloc(len, GFP_KERNEL);
2406 + if (!data)
2407 + return -ENOMEM;
2408 + memcpy(data, u.data, *ppos);
2409 + kfree(u.data);
2410 + u.data = (char*)data;
2411 + file->private_data = data;
2412 + }
2413 + len = IMG_SIZE(u.image) - *ppos;
2414 + len = len > count ? count : len;
2415 + if (copy_from_user(u.data + *ppos, cbuf, len))
2416 + return -EFAULT;
2417 + count -= len;
2418 + *ppos += len;
2419 + cbuf += len;
2420 + if (*ppos == IMG_SIZE(u.image)) {
2421 + int ret, i;
2422 + *ppos = 0;
2423 + if (u.image->magic == DL_MAGIC_SWAP) {
2424 + for (i=0; i<u.image->size; i++) {
2425 + u.image->u.data[i] =
2426 + swab32(u.image->u.data[i]);
2427 + }
2428 + u.image->magic = swab32(u.image->magic);
2429 + }
2430 + ret = store_npe_image(u.image, NULL);
2431 + if (ret) {
2432 + printk(KERN_INFO "Error in NPE image: %x\n",
2433 + u.image->id);
2434 + return ret;
2435 + }
2436 + }
2437 + }
2438 + return (cbuf-buf);
2439 +}
2440 +
2441 +static void npe_firmware_probe(struct device *dev)
2442 +{
2443 +#if (defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)) \
2444 + && defined(MODULE)
2445 + const struct firmware *fw_entry;
2446 + struct npe_info *npe = dev_get_drvdata(dev);
2447 + struct dl_image *image;
2448 + int ret = -1, i;
2449 +
2450 + if (request_firmware(&fw_entry, npe->plat->name, dev) != 0) {
2451 + return;
2452 + }
2453 + image = (struct dl_image*)fw_entry->data;
2454 + /* Sanity checks */
2455 + if (fw_entry->size < sizeof(struct dl_image)) {
2456 + printk(KERN_ERR "Firmware error: too small\n");
2457 + goto out;
2458 + }
2459 + if (image->magic == DL_MAGIC_SWAP) {
2460 + printk(KERN_INFO "swapped image found\n");
2461 + image->id = swab32(image->id);
2462 + image->size = swab32(image->size);
2463 + } else if (image->magic != DL_MAGIC) {
2464 + printk(KERN_ERR "Bad magic:%x\n", image->magic);
2465 + goto out;
2466 + }
2467 + if (IMG_SIZE(image) != fw_entry->size) {
2468 + printk(KERN_ERR "Firmware error: bad size\n");
2469 + goto out;
2470 + }
2471 + if (((image->id >> 24) & 0xf) != npe->plat->id) {
2472 + printk(KERN_ERR "NPE id missmatch\n");
2473 + goto out;
2474 + }
2475 + if (image->magic == DL_MAGIC_SWAP) {
2476 + for (i=0; i<image->size; i++) {
2477 + image->u.data[i] = swab32(image->u.data[i]);
2478 + }
2479 + image->magic = swab32(image->magic);
2480 + }
2481 +
2482 + ret = store_npe_image(image, dev);
2483 +out:
2484 + if (ret) {
2485 + printk(KERN_ERR "Error downloading Firmware for %s\n",
2486 + npe->plat->name);
2487 + }
2488 + release_firmware(fw_entry);
2489 +#endif
2490 +}
2491 +
2492 +static void disable_npe_irq(struct npe_info *npe)
2493 +{
2494 + u32 reg;
2495 + reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
2496 + reg &= ~(IX_NPEMH_NPE_CTL_OFE | IX_NPEMH_NPE_CTL_IFE);
2497 + reg |= IX_NPEMH_NPE_CTL_OFEWE | IX_NPEMH_NPE_CTL_IFEWE;
2498 +}
2499 +
2500 +static ssize_t show_npe_state(struct device *dev, struct device_attribute *attr,
2501 + char *buf)
2502 +{
2503 + struct npe_info *npe = dev_get_drvdata(dev);
2504 +
2505 + strcpy(buf, npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN ?
2506 + "start\n" : "stop\n");
2507 + return strlen(buf);
2508 +}
2509 +
2510 +static ssize_t set_npe_state(struct device *dev, struct device_attribute *attr,
2511 + const char *buf, size_t count)
2512 +{
2513 + struct npe_info *npe = dev_get_drvdata(dev);
2514 +
2515 + if (!strncmp(buf, "start", 5)) {
2516 + printk("NPE start\n");
2517 + npe_start(npe);
2518 + }
2519 + if (!strncmp(buf, "stop", 4)) {
2520 + printk("NPE stop\n");
2521 + npe_stop(npe);
2522 + }
2523 + return count;
2524 +}
2525 +
2526 +static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_npe_state, set_npe_state);
2527 +
2528 +static int npe_probe(struct platform_device *pdev)
2529 +{
2530 + struct resource *res;
2531 + struct npe_info *npe;
2532 + struct npe_plat_data *plat = pdev->dev.platform_data;
2533 + int size, ret=0;
2534 +
2535 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
2536 + return -EIO;
2537 +
2538 + if (!(npe = kzalloc(sizeof(struct npe_info), GFP_KERNEL)))
2539 + return -ENOMEM;
2540 +
2541 + size = res->end - res->start +1;
2542 + npe->res = request_mem_region(res->start, size, plat->name);
2543 + if (!npe->res) {
2544 + ret = -EBUSY;
2545 + printk(KERN_ERR "Failed to get memregion(%x, %x)\n",
2546 + res->start, size);
2547 + goto out_free;
2548 + }
2549 +
2550 + npe->addr = ioremap(res->start, size);
2551 + if (!npe->addr) {
2552 + ret = -ENOMEM;
2553 + printk(KERN_ERR "Failed to ioremap(%x, %x)\n",
2554 + res->start, size);
2555 + goto out_rel;
2556 + }
2557 +
2558 + pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
2559 +
2560 + platform_set_drvdata(pdev, npe);
2561 +
2562 + device_create_file(&pdev->dev, &dev_attr_state);
2563 +
2564 + npe->plat = plat;
2565 + disable_npe_irq(npe);
2566 + if (! (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN))
2567 + npe_firmware_probe(&pdev->dev);
2568 +
2569 + return 0;
2570 +
2571 +out_rel:
2572 + release_resource(npe->res);
2573 +out_free:
2574 + kfree(npe);
2575 + return ret;
2576 +}
2577 +
2578 +static struct file_operations ucode_dl_fops = {
2579 + .owner = THIS_MODULE,
2580 + .write = ucode_write,
2581 + .open = ucode_open,
2582 + .release = ucode_close,
2583 +};
2584 +
2585 +static struct miscdevice ucode_dl_dev = {
2586 + .minor = MICROCODE_MINOR,
2587 + .name = "ixp4xx_ucode",
2588 + .fops = &ucode_dl_fops,
2589 +};
2590 +
2591 +static int npe_remove(struct platform_device *pdev)
2592 +{
2593 + struct npe_info *npe = platform_get_drvdata(pdev);
2594 +
2595 + device_remove_file(&pdev->dev, &dev_attr_state);
2596 +
2597 + iounmap(npe->addr);
2598 + release_resource(npe->res);
2599 + kfree(npe);
2600 + return 0;
2601 +}
2602 +
2603 +static struct platform_driver ixp4xx_npe_driver = {
2604 + .driver = {
2605 + .name = "ixp4xx_npe",
2606 + .owner = THIS_MODULE,
2607 + },
2608 + .probe = npe_probe,
2609 + .remove = npe_remove,
2610 +};
2611 +
2612 +static int __init init_npedriver(void)
2613 +{
2614 + int ret;
2615 + if ((ret = misc_register(&ucode_dl_dev))){
2616 + printk(KERN_ERR "Failed to register misc device %d\n",
2617 + MICROCODE_MINOR);
2618 + return ret;
2619 + }
2620 + if ((ret = platform_driver_register(&ixp4xx_npe_driver)))
2621 + misc_deregister(&ucode_dl_dev);
2622 + else
2623 + printk(KERN_INFO IXNPE_VERSION " initialized\n");
2624 +
2625 + return ret;
2626 +
2627 +}
2628 +
2629 +static void __exit finish_npedriver(void)
2630 +{
2631 + misc_deregister(&ucode_dl_dev);
2632 + platform_driver_unregister(&ixp4xx_npe_driver);
2633 +}
2634 +
2635 +module_init(init_npedriver);
2636 +module_exit(finish_npedriver);
2637 +
2638 +MODULE_LICENSE("GPL");
2639 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
2640 +
2641 +EXPORT_SYMBOL(get_npe_by_id);
2642 diff -Nur linux-2.6.17/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h linux-2.6.17-owrt/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h
2643 --- linux-2.6.17/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2006-06-18 03:49:35.000000000 +0200
2644 +++ linux-2.6.17-owrt/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2006-10-27 12:48:54.000000000 +0200
2645 @@ -22,6 +22,8 @@
2646 #ifndef _ASM_ARM_IXP4XX_H_
2647 #define _ASM_ARM_IXP4XX_H_
2648
2649 +#include "npe_regs.h"
2650 +
2651 /*
2652 * IXP4xx Linux Memory Map:
2653 *
2654 diff -Nur linux-2.6.17/include/asm-arm/arch-ixp4xx/npe_regs.h linux-2.6.17-owrt/include/asm-arm/arch-ixp4xx/npe_regs.h
2655 --- linux-2.6.17/include/asm-arm/arch-ixp4xx/npe_regs.h 1970-01-01 01:00:00.000000000 +0100
2656 +++ linux-2.6.17-owrt/include/asm-arm/arch-ixp4xx/npe_regs.h 2006-10-27 12:48:54.000000000 +0200
2657 @@ -0,0 +1,82 @@
2658 +#ifndef NPE_REGS_H
2659 +#define NPE_REGS_H
2660 +
2661 +/* Execution Address */
2662 +#define IX_NPEDL_REG_OFFSET_EXAD 0x00
2663 +/* Execution Data */
2664 +#define IX_NPEDL_REG_OFFSET_EXDATA 0x04
2665 +/* Execution Control */
2666 +#define IX_NPEDL_REG_OFFSET_EXCTL 0x08
2667 +/* Execution Count */
2668 +#define IX_NPEDL_REG_OFFSET_EXCT 0x0C
2669 +/* Action Point 0 */
2670 +#define IX_NPEDL_REG_OFFSET_AP0 0x10
2671 +/* Action Point 1 */
2672 +#define IX_NPEDL_REG_OFFSET_AP1 0x14
2673 +/* Action Point 2 */
2674 +#define IX_NPEDL_REG_OFFSET_AP2 0x18
2675 +/* Action Point 3 */
2676 +#define IX_NPEDL_REG_OFFSET_AP3 0x1C
2677 +/* Watchpoint FIFO */
2678 +#define IX_NPEDL_REG_OFFSET_WFIFO 0x20
2679 +/* Watch Count */
2680 +#define IX_NPEDL_REG_OFFSET_WC 0x24
2681 +/* Profile Count */
2682 +#define IX_NPEDL_REG_OFFSET_PROFCT 0x28
2683 +
2684 +/* Messaging Status */
2685 +#define IX_NPEDL_REG_OFFSET_STAT 0x2C
2686 +/* Messaging Control */
2687 +#define IX_NPEDL_REG_OFFSET_CTL 0x30
2688 +/* Mailbox Status */
2689 +#define IX_NPEDL_REG_OFFSET_MBST 0x34
2690 +/* messaging in/out FIFO */
2691 +#define IX_NPEDL_REG_OFFSET_FIFO 0x38
2692 +
2693 +
2694 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IF 0x00100000
2695 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IE 0x00080000
2696 +#define IX_NPEDL_MASK_ECS_REG_0_ACTIVE 0x80000000
2697 +
2698 +#define IX_NPEDL_EXCTL_CMD_NPE_STEP 0x01
2699 +#define IX_NPEDL_EXCTL_CMD_NPE_START 0x02
2700 +#define IX_NPEDL_EXCTL_CMD_NPE_STOP 0x03
2701 +#define IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE 0x04
2702 +#define IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT 0x0C
2703 +#define IX_NPEDL_EXCTL_CMD_RD_INS_MEM 0x10
2704 +#define IX_NPEDL_EXCTL_CMD_WR_INS_MEM 0x11
2705 +#define IX_NPEDL_EXCTL_CMD_RD_DATA_MEM 0x12
2706 +#define IX_NPEDL_EXCTL_CMD_WR_DATA_MEM 0x13
2707 +#define IX_NPEDL_EXCTL_CMD_RD_ECS_REG 0x14
2708 +#define IX_NPEDL_EXCTL_CMD_WR_ECS_REG 0x15
2709 +
2710 +#define IX_NPEDL_EXCTL_STATUS_RUN 0x80000000
2711 +#define IX_NPEDL_EXCTL_STATUS_STOP 0x40000000
2712 +#define IX_NPEDL_EXCTL_STATUS_CLEAR 0x20000000
2713 +
2714 +#define IX_NPEDL_MASK_WFIFO_VALID 0x80000000
2715 +#define IX_NPEDL_MASK_STAT_OFNE 0x00010000
2716 +#define IX_NPEDL_MASK_STAT_IFNE 0x00080000
2717 +
2718 +#define IX_NPEDL_ECS_DBG_CTXT_REG_0 0x0C
2719 +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_0 0x04
2720 +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_0 0x08
2721 +
2722 +/* NPE control register bit definitions */
2723 +#define IX_NPEMH_NPE_CTL_OFE (1 << 16) /**< OutFifoEnable */
2724 +#define IX_NPEMH_NPE_CTL_IFE (1 << 17) /**< InFifoEnable */
2725 +#define IX_NPEMH_NPE_CTL_OFEWE (1 << 24) /**< OutFifoEnableWriteEnable */
2726 +#define IX_NPEMH_NPE_CTL_IFEWE (1 << 25) /**< InFifoEnableWriteEnable */
2727 +
2728 +/* NPE status register bit definitions */
2729 +#define IX_NPEMH_NPE_STAT_OFNE (1 << 16) /**< OutFifoNotEmpty */
2730 +#define IX_NPEMH_NPE_STAT_IFNF (1 << 17) /**< InFifoNotFull */
2731 +#define IX_NPEMH_NPE_STAT_OFNF (1 << 18) /**< OutFifoNotFull */
2732 +#define IX_NPEMH_NPE_STAT_IFNE (1 << 19) /**< InFifoNotEmpty */
2733 +#define IX_NPEMH_NPE_STAT_MBINT (1 << 20) /**< Mailbox interrupt */
2734 +#define IX_NPEMH_NPE_STAT_IFINT (1 << 21) /**< InFifo interrupt */
2735 +#define IX_NPEMH_NPE_STAT_OFINT (1 << 22) /**< OutFifo interrupt */
2736 +#define IX_NPEMH_NPE_STAT_WFINT (1 << 23) /**< WatchFifo interrupt */
2737 +
2738 +#endif
2739 +
2740 diff -Nur linux-2.6.17/include/asm-arm/arch-ixp4xx/platform.h linux-2.6.17-owrt/include/asm-arm/arch-ixp4xx/platform.h
2741 --- linux-2.6.17/include/asm-arm/arch-ixp4xx/platform.h 2006-06-18 03:49:35.000000000 +0200
2742 +++ linux-2.6.17-owrt/include/asm-arm/arch-ixp4xx/platform.h 2006-10-27 12:48:54.000000000 +0200
2743 @@ -86,6 +86,22 @@
2744 unsigned long scl_pin;
2745 };
2746
2747 +struct npe_plat_data {
2748 + const char *name;
2749 + int data_size;
2750 + int inst_size;
2751 + int id; /* Node ID */
2752 +};
2753 +
2754 +struct mac_plat_info {
2755 + int npe_id; /* Node ID of the NPE for this port */
2756 + int port_id; /* Port ID for NPE-B @ ixp465 */
2757 + int eth_id; /* Physical ID */
2758 + int phy_id; /* ID of the connected PHY (PCB/platform dependent) */
2759 + int rxq_id; /* Queue ID of the RX-free q*/
2760 + int txq_id; /* Where to push the outgoing packets */
2761 +};
2762 +
2763
2764 struct sys_timer;
2765
2766 diff -Nur linux-2.6.17/include/linux/ixp_npe.h linux-2.6.17-owrt/include/linux/ixp_npe.h
2767 --- linux-2.6.17/include/linux/ixp_npe.h 1970-01-01 01:00:00.000000000 +0100
2768 +++ linux-2.6.17-owrt/include/linux/ixp_npe.h 2006-10-27 12:48:54.000000000 +0200
2769 @@ -0,0 +1,85 @@
2770 +/*
2771 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
2772 + *
2773 + * This file is released under the GPLv2
2774 + */
2775 +
2776 +#ifndef NPE_DEVICE_H
2777 +#define NPE_DEVICE_H
2778 +
2779 +#include <linux/miscdevice.h>
2780 +#include <asm/hardware.h>
2781 +
2782 +struct npe_info {
2783 + struct resource *res;
2784 + void __iomem *addr;
2785 + struct npe_plat_data *plat;
2786 + u8 img_info[4];
2787 + u32 exec_count;
2788 + u32 ctx_reg2;
2789 +};
2790 +
2791 +
2792 +static inline void npe_reg_write(struct npe_info *npe, u32 reg, u32 val)
2793 +{
2794 + *(volatile u32*)((u8*)(npe->addr) + reg) = val;
2795 +}
2796 +
2797 +static inline u32 npe_reg_read(struct npe_info *npe, u32 reg)
2798 +{
2799 + return *(volatile u32*)((u8*)(npe->addr) + reg);
2800 +}
2801 +
2802 +static inline u32 npe_status(struct npe_info *npe)
2803 +{
2804 + return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCTL);
2805 +}
2806 +
2807 +/* ixNpeDlNpeMgrCommandIssue */
2808 +static inline void npe_write_exctl(struct npe_info *npe, u32 cmd)
2809 +{
2810 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
2811 +}
2812 +/* ixNpeDlNpeMgrWriteCommandIssue */
2813 +static inline void
2814 +npe_write_cmd(struct npe_info *npe, u32 addr, u32 data, int cmd)
2815 +{
2816 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXDATA, data);
2817 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
2818 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
2819 +}
2820 +/* ixNpeDlNpeMgrReadCommandIssue */
2821 +static inline u32
2822 +npe_read_cmd(struct npe_info *npe, u32 addr, int cmd)
2823 +{
2824 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
2825 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
2826 + /* Intel reads the data twice - so do we... */
2827 + npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
2828 + return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
2829 +}
2830 +
2831 +/* ixNpeDlNpeMgrExecAccRegWrite */
2832 +static inline void npe_write_ecs_reg(struct npe_info *npe, u32 addr, u32 data)
2833 +{
2834 + npe_write_cmd(npe, addr, data, IX_NPEDL_EXCTL_CMD_WR_ECS_REG);
2835 +}
2836 +/* ixNpeDlNpeMgrExecAccRegRead */
2837 +static inline u32 npe_read_ecs_reg(struct npe_info *npe, u32 addr)
2838 +{
2839 + return npe_read_cmd(npe, addr, IX_NPEDL_EXCTL_CMD_RD_ECS_REG);
2840 +}
2841 +
2842 +extern struct device *get_npe_by_id(int id);
2843 +
2844 +/* NPE Messages */
2845 +extern int
2846 +npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp, u8 *macaddr);
2847 +extern int
2848 +npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp);
2849 +extern int
2850 +npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid);
2851 +extern int
2852 +npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp, int enable);
2853 +
2854 +#endif
2855 diff -Nur linux-2.6.17/include/linux/ixp_qmgr.h linux-2.6.17-owrt/include/linux/ixp_qmgr.h
2856 --- linux-2.6.17/include/linux/ixp_qmgr.h 1970-01-01 01:00:00.000000000 +0100
2857 +++ linux-2.6.17-owrt/include/linux/ixp_qmgr.h 2006-10-27 12:48:54.000000000 +0200
2858 @@ -0,0 +1,188 @@
2859 +/*
2860 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
2861 + *
2862 + * This file is released under the GPLv2
2863 + */
2864 +
2865 +#ifndef IX_QMGR_H
2866 +#define IX_QMGR_H
2867 +
2868 +#include <linux/skbuff.h>
2869 +#include <linux/list.h>
2870 +#include <linux/if_ether.h>
2871 +#include <linux/spinlock.h>
2872 +#include <linux/platform_device.h>
2873 +#include <asm/atomic.h>
2874 +
2875 +/* All offsets are in 32bit words */
2876 +#define QUE_LOW_STAT0 0x100 /* 4x Status of the 32 lower queues 0-31 */
2877 +#define QUE_UO_STAT0 0x104 /* 2x Underflow/Overflow status bits*/
2878 +#define QUE_UPP_STAT0 0x106 /* 2x Status of thew 32 upper queues 32-63 */
2879 +#define INT0_SRC_SELREG0 0x108 /* 4x */
2880 +#define QUE_IE_REG0 0x10c /* 2x */
2881 +#define QUE_INT_REG0 0x10e /* 2x IRQ reg, write 1 to reset IRQ */
2882 +
2883 +#define IX_QMGR_QCFG_BASE 0x800
2884 +#define IX_QMGR_QCFG_SIZE 0x40
2885 +#define IX_QMGR_SRAM_SPACE (IX_QMGR_QCFG_BASE + IX_QMGR_QCFG_SIZE)
2886 +
2887 +#define MAX_QUEUES 32 /* first, we only support the lower 32 queues */
2888 +#define MAX_NPES 3
2889 +
2890 +enum {
2891 + Q_IRQ_ID_E = 0, /* Queue Empty due to last read */
2892 + Q_IRQ_ID_NE, /* Queue Nearly Empty due to last read */
2893 + Q_IRQ_ID_NF, /* Queue Nearly Full due to last write */
2894 + Q_IRQ_ID_F, /* Queue Full due to last write */
2895 + Q_IRQ_ID_NOT_E, /* Queue Not Empty due to last write */
2896 + Q_IRQ_ID_NOT_NE, /* Queue Not Nearly Empty due to last write */
2897 + Q_IRQ_ID_NOT_NF, /* Queue Not Nearly Full due to last read */
2898 + Q_IRQ_ID_NOT_F /* Queue Not Full due to last read */
2899 +};
2900 +
2901 +extern struct qm_queue *request_queue(int qid, int len);
2902 +extern void release_queue(struct qm_queue *queue);
2903 +extern int queue_set_irq_src(struct qm_queue *queue, int flag);
2904 +extern void queue_set_watermarks(struct qm_queue *, unsigned ne, unsigned nf);
2905 +extern int queue_len(struct qm_queue *queue);
2906 +
2907 +struct qm_qmgr;
2908 +struct qm_queue;
2909 +
2910 +typedef void(*queue_cb)(struct qm_queue *);
2911 +
2912 +struct qm_queue {
2913 + int addr; /* word offset from IX_QMGR_SRAM_SPACE */
2914 + int len; /* size in words */
2915 + int id; /* Q Id */
2916 + u32 __iomem *acc_reg;
2917 + struct device *dev;
2918 + atomic_t use;
2919 + queue_cb irq_cb;
2920 + void *cb_data;
2921 +};
2922 +
2923 +struct eth_ctl {
2924 + u32 next;
2925 +#ifdef __ARMEB__
2926 + u16 buf_len;
2927 + u16 pkt_len;
2928 +#else
2929 + u16 pkt_len;
2930 + u16 buf_len;
2931 +#endif
2932 + u32 phys_addr;
2933 + u8 dest_id;
2934 + u8 src_id;
2935 + u16 flags;
2936 + u8 qos;
2937 + u8 padlen;
2938 + u16 vlan_tci;
2939 + u8 dest_mac[ETH_ALEN];
2940 + u8 src_mac[ETH_ALEN];
2941 +};
2942 +
2943 +struct npe_cont {
2944 + union {
2945 + struct eth_ctl eth;
2946 + } ctl;
2947 + union {
2948 + struct sk_buff *skb;
2949 + void *ptr;
2950 + } h;
2951 + struct npe_cont *next;
2952 + struct npe_cont *virt;
2953 + dma_addr_t phys;
2954 +};
2955 +
2956 +struct qm_qmgr {
2957 + u32 __iomem *addr;
2958 + struct resource *res;
2959 + struct qm_queue *queues[MAX_QUEUES];
2960 + rwlock_t lock;
2961 + struct npe_cont *pool;
2962 + struct dma_pool *dmapool;
2963 + int irq;
2964 +};
2965 +
2966 +static inline void queue_write_cfg_reg(struct qm_queue *queue, u32 val)
2967 +{
2968 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
2969 + *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id) = val;
2970 +}
2971 +static inline u32 queue_read_cfg_reg(struct qm_queue *queue)
2972 +{
2973 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
2974 + return *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
2975 +}
2976 +
2977 +static inline void queue_ack_irq(struct qm_queue *queue)
2978 +{
2979 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
2980 + *(qmgr->addr + QUE_INT_REG0) = 1 << queue->id;
2981 +}
2982 +
2983 +static inline void queue_enable_irq(struct qm_queue *queue)
2984 +{
2985 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
2986 + *(qmgr->addr + QUE_IE_REG0) |= 1 << queue->id;
2987 +}
2988 +
2989 +static inline void queue_disable_irq(struct qm_queue *queue)
2990 +{
2991 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
2992 + *(qmgr->addr + QUE_IE_REG0) &= ~(1 << queue->id);
2993 +}
2994 +
2995 +static inline void queue_put_entry(struct qm_queue *queue, u32 entry)
2996 +{
2997 + *(queue->acc_reg) = entry;
2998 +}
2999 +
3000 +static inline struct npe_cont *qmgr_get_cont(struct qm_qmgr *qmgr)
3001 +{
3002 + unsigned long flags;
3003 + struct npe_cont *cont;
3004 +
3005 + if (!qmgr->pool)
3006 + return NULL;
3007 + write_lock_irqsave(&qmgr->lock, flags);
3008 + cont = qmgr->pool;
3009 + qmgr->pool = cont->next;
3010 + write_unlock_irqrestore(&qmgr->lock, flags);
3011 + return cont;
3012 +}
3013 +
3014 +static inline void qmgr_return_cont(struct qm_qmgr *qmgr,struct npe_cont *cont)
3015 +{
3016 + unsigned long flags;
3017 +
3018 + write_lock_irqsave(&qmgr->lock, flags);
3019 + cont->next = qmgr->pool;
3020 + qmgr->pool = cont;
3021 + write_unlock_irqrestore(&qmgr->lock, flags);
3022 +}
3023 +
3024 +static inline int queue_stat(struct qm_queue *queue)
3025 +{
3026 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
3027 + u32 reg = *(qmgr->addr + QUE_UO_STAT0 + (queue->id >> 4));
3028 + return (reg >> (queue->id & 0xf) << 1) & 3;
3029 +}
3030 +
3031 +/* Prints the queue state, which is very, very helpfull for debugging */
3032 +static inline void queue_state(struct qm_queue *queue)
3033 +{
3034 + u32 val=0, lstat=0;
3035 + int offs;
3036 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
3037 +
3038 + offs = queue->id/8 + QUE_LOW_STAT0;
3039 + val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
3040 + lstat = (*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x0f;
3041 +
3042 + printk("Qid[%02d]: Wptr=%4x, Rptr=%4x, diff=%4x, Stat:%x\n", queue->id,
3043 + val&0x7f, (val>>7) &0x7f, (val - (val >> 7)) & 0x7f, lstat);
3044 +}
3045 +
3046 +#endif
This page took 0.154986 seconds and 5 git commands to generate.