swap uart numbering on Pronghorn
[openwrt.git] / target / linux / ixp4xx / patches / 100-npe_driver.patch
1 Index: linux-2.6.21.7/Documentation/networking/ixp4xx/IxNpeMicrocode.h
2 ===================================================================
3 --- /dev/null
4 +++ linux-2.6.21.7/Documentation/networking/ixp4xx/IxNpeMicrocode.h
5 @@ -0,0 +1,143 @@
6 +/*
7 + * IxNpeMicrocode.h - Headerfile for compiling the Intel microcode C file
8 + *
9 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
10 + *
11 + * This file is released under the GPLv2
12 + *
13 + *
14 + * compile with
15 + *
16 + * gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode
17 + *
18 + * Executing the resulting binary on your build-host creates the
19 + * "NPE-[ABC].xxxxxxxx" files containing the selected microcode
20 + *
21 + * fetch the IxNpeMicrocode.c from the Intel Access Library.
22 + * It will include this header.
23 + *
24 + * select Images for every NPE from the following
25 + * (used C++ comments for easy uncommenting ....)
26 + */
27 +
28 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
29 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
30 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
31 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_TSLOT_SWITCH
32 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
33 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
34 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL
35 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_2_PORT
36 +// #define IX_NPEDL_NPEIMAGE_NPEA_DMA
37 +// #define IX_NPEDL_NPEIMAGE_NPEA_ATM_MPHY_12_PORT
38 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_MPHY_1_PORT
39 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_SPHY_1_PORT
40 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0
41 +// #define IX_NPEDL_NPEIMAGE_NPEA_WEP
42 +
43 +
44 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
45 +//#define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
46 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
47 +// #define IX_NPEDL_NPEIMAGE_NPEB_DMA
48 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
49 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
50 + #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL
51 +
52 +
53 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
54 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
55 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
56 +// #define IX_NPEDL_NPEIMAGE_NPEC_DMA
57 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_SPAN
58 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_FIREWALL
59 + #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_CCM_ETH
60 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_ETH_LEARN_FILTER_SPAN_FIREWALL
61 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
62 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
63 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL
64 +
65 +
66 +#include <stdio.h>
67 +#include <unistd.h>
68 +#include <stdlib.h>
69 +#include <netinet/in.h>
70 +#include <sys/types.h>
71 +#include <sys/stat.h>
72 +#include <fcntl.h>
73 +#include <errno.h>
74 +#include <endian.h>
75 +#include <byteswap.h>
76 +#include <string.h>
77 +
78 +#if __BYTE_ORDER == __LITTLE_ENDIAN
79 +#define to_le32(x) (x)
80 +#define to_be32(x) bswap_32(x)
81 +#else
82 +#define to_be32(x) (x)
83 +#define to_le32(x) bswap_32(x)
84 +#endif
85 +
86 +struct dl_image {
87 + unsigned magic;
88 + unsigned id;
89 + unsigned size;
90 + unsigned data[0];
91 +};
92 +
93 +const unsigned IxNpeMicrocode_array[];
94 +
95 +int main(int argc, char *argv[])
96 +{
97 + struct dl_image *image = (struct dl_image *)IxNpeMicrocode_array;
98 + int imgsiz, i, fd, cnt;
99 + const unsigned *arrayptr = IxNpeMicrocode_array;
100 + const char *names[] = { "IXP425", "IXP465", "unknown" };
101 + int bigendian = 1;
102 +
103 + if (argc > 1) {
104 + if (!strcmp(argv[1], "-le"))
105 + bigendian = 0;
106 + else if (!strcmp(argv[1], "-be"))
107 + bigendian = 1;
108 + else {
109 + printf("Usage: %s <-le|-be>\n", argv[0]);
110 + return EXIT_FAILURE;
111 + }
112 + }
113 +
114 + for (image = (struct dl_image *)arrayptr, cnt=0;
115 + (image->id != 0xfeedf00d) && (image->magic == 0xfeedf00d);
116 + image = (struct dl_image *)(arrayptr), cnt++)
117 + {
118 + unsigned char field[4];
119 + imgsiz = image->size + 3;
120 + *(unsigned*)field = to_be32(image->id);
121 + char filename[40], slnk[10];
122 +
123 + sprintf(filename, "NPE-%c.%08x", (field[0] & 0xf) + 'A',
124 + image->id);
125 + sprintf(slnk, "NPE-%c", (field[0] & 0xf) + 'A');
126 + printf("Writing image: %s.NPE_%c Func: %2x Rev: %02x.%02x "
127 + "Size: %5d to: '%s'\n",
128 + names[field[0] >> 4], (field[0] & 0xf) + 'A',
129 + field[1], field[2], field[3], imgsiz*4, filename);
130 + fd = open(filename, O_CREAT | O_RDWR | O_TRUNC, 0644);
131 + if (fd >= 0) {
132 + for (i=0; i<imgsiz; i++) {
133 + *(unsigned*)field = bigendian ?
134 + to_be32(arrayptr[i]) :
135 + to_le32(arrayptr[i]);
136 + write(fd, field, sizeof(field));
137 + }
138 + close(fd);
139 + unlink(slnk);
140 + symlink(filename, slnk);
141 + } else {
142 + perror(filename);
143 + }
144 + arrayptr += imgsiz;
145 + }
146 + close(fd);
147 + return 0;
148 +}
149 Index: linux-2.6.21.7/Documentation/networking/ixp4xx/README
150 ===================================================================
151 --- /dev/null
152 +++ linux-2.6.21.7/Documentation/networking/ixp4xx/README
153 @@ -0,0 +1,62 @@
154 +Informations about the Networking Driver using the IXP4XX CPU internal NPEs
155 +and Queue manager.
156 +
157 +If this driver is used, the IAL (Intel Access Library) must not be loaded.
158 +However, the IAL may be loaded, if this Modules are unloaded:
159 + ixp4xx_npe.ko, ixp4xx_qmgr.ko ixp4xx_mac.ko
160 +
161 +This also means that HW crypto accelleration does NOT work when using this
162 +driver, unless I have finished my crypto driver for NPE-C
163 +
164 +
165 +Adoption to your custom board:
166 +------------------------------
167 +use "arch/arm/mach-ixp4xx/ixdp425-setup.c" as template:
168 +
169 +in "static struct mac_plat_info" adopt the entry "phy_id" to your needs
170 +(Ask your hardware designer about the PHY id)
171 +
172 +The order of "&mac0" and "&mac1" in the "struct platform_device"
173 +determines which of them becomes eth0 and eth1
174 +
175 +
176 +The Microcode:
177 +---------------
178 +Solution 1)
179 + Configure "CONFIG_HOTPLUG" and "CONFIG_FW_LOADER" and configure
180 + IXP4XX_NPE as module.
181 + The default hotplug script will load the Firmware from
182 + /usr/lib/hotplug/firmware/NPE-[ABC]
183 + see Documentation/firmware_class/hotplug-script
184 +
185 + You should take care, that $ACTION is "add" and $SUBSYSTEM is "firmware"
186 + to avoid unnessecary calls:
187 + test $ACTION = "remove" -o $SUBSYSTEM != "firmware" && exit
188 +
189 +Solution 2)
190 + create a char-dev: "mknod /dev/misc/npe c 10 184"
191 + cat the Microcode into it:
192 + cat /usr/lib/hotplug/firmware/NPE-* > /dev/misc/npe
193 + This also works if the driver is linked to the kernel
194 +
195 + Having a mix of both (e.g. solution 1 for NPE-B and solution 2 for NPE-C)
196 + is perfectly ok and works.
197 +
198 + The state of the NPEs can be seen and changed at:
199 + /sys/bus/platform/devices/ixp4xx_npe.X/state
200 +
201 +
202 +Obtaining the Microcode:
203 +------------------------
204 +1) IxNpeMicrocode.h in this directory:
205 + Download IPL_IXP400NPELIBRARYWITHCRYPTO-2_1.ZIP from Intel
206 + It unpacks the Microcode IxNpeMicrocode.c
207 + Read the Licence !
208 + Compile it with "gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode" on your host.
209 + The resulting images can be moved to "/usr/lib/hotplug/firmware"
210 +
211 +2) mc_grab.c in this directory:
212 + Compile and execute it either on the host or on the target
213 + to grab the microcode from a binary image like the RedBoot bootloader.
214 +
215 +
216 Index: linux-2.6.21.7/Documentation/networking/ixp4xx/mc_grab.c
217 ===================================================================
218 --- /dev/null
219 +++ linux-2.6.21.7/Documentation/networking/ixp4xx/mc_grab.c
220 @@ -0,0 +1,97 @@
221 +/*
222 + * mc_grab.c - grabs IXP4XX microcode from a binary datastream
223 + * e.g. The redboot bootloader....
224 + *
225 + * usage: mc_grab 1010200 2010200 < /dev/mtd/0 > /dev/misc/npe
226 + *
227 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
228 + *
229 + * This file is released under the GPLv2
230 + */
231 +
232 +
233 +#include <stdlib.h>
234 +#include <stdio.h>
235 +#include <unistd.h>
236 +#include <netinet/in.h>
237 +#include <sys/types.h>
238 +#include <sys/stat.h>
239 +#include <fcntl.h>
240 +#include <errno.h>
241 +#include <string.h>
242 +
243 +#define MAX_IMG 6
244 +
245 +static void print_mc_info(unsigned id, int siz)
246 +{
247 + unsigned char buf[sizeof(unsigned)];
248 + *(unsigned*)buf = id;
249 + unsigned idx;
250 + const char *names[] = { "IXP425", "IXP465", "unknown" };
251 +
252 + idx = (buf[0] >> 4) < 2 ? (buf[0] >> 4) : 2;
253 +
254 + fprintf(stderr, "Device: %s:NPE_%c Func: %2x Rev: %02x.%02x "
255 + "Size: %5d bytes ID:%08x\n", names[idx], (buf[0] & 0xf)+'A',
256 + buf[1], buf[2], buf[3], siz*4, ntohl(id));
257 +}
258 +
259 +int main(int argc, char *argv[])
260 +{
261 + int i,j;
262 + unsigned char buf[sizeof(unsigned)];
263 + unsigned magic = htonl(0xfeedf00d);
264 + unsigned id, my_ids[MAX_IMG+1], siz, sizbe;
265 + int ret=1, verbose=0;
266 +
267 + for (i=0, j=0; i<argc-1 && j<MAX_IMG; i++) {
268 + if (!strcmp(argv[i+1], "-v"))
269 + verbose = 1;
270 + else
271 + my_ids[j++] = htonl(strtoul(argv[i+1], NULL, 16));
272 + }
273 + my_ids[j] = 0;
274 + if (my_ids[0] == 0 && !verbose) {
275 + fprintf(stderr, "Usage: %s <-v> [ID1] [ID2] [IDn]\n", argv[0]);
276 + return 1;
277 + }
278 +
279 + while ((ret=read(0, buf, sizeof(unsigned))) == sizeof(unsigned)) {
280 + if (*(unsigned*)buf != magic)
281 + continue;
282 + if ((ret=read(0, buf, sizeof(unsigned))) != sizeof(unsigned) )
283 + break;
284 + id = *(unsigned*)buf;
285 +
286 + if (read(0, buf, sizeof(siz)) != sizeof(siz) )
287 + break;
288 + sizbe = *(unsigned*)buf;
289 + siz = ntohl(sizbe);
290 +
291 + if (verbose)
292 + print_mc_info(id, siz);
293 +
294 + for(i=0; my_ids[i]; i++)
295 + if (id == my_ids[i])
296 + break;
297 + if (!my_ids[i])
298 + continue;
299 +
300 + if (!verbose)
301 + print_mc_info(id, siz);
302 +
303 + write(1, &magic, sizeof(magic));
304 + write(1, &id, sizeof(id));
305 + write(1, &sizbe, sizeof(sizbe));
306 + for (i=0; i<siz; i++) {
307 + if (read(0, buf, sizeof(unsigned)) != sizeof(unsigned))
308 + break;
309 + write(1, buf, sizeof(unsigned));
310 + }
311 + if (i != siz)
312 + break;
313 + }
314 + if (ret)
315 + fprintf(stderr, "Error reading Microcode\n");
316 + return ret;
317 +}
318 Index: linux-2.6.21.7/arch/arm/mach-ixp4xx/common.c
319 ===================================================================
320 --- linux-2.6.21.7.orig/arch/arm/mach-ixp4xx/common.c
321 +++ linux-2.6.21.7/arch/arm/mach-ixp4xx/common.c
322 @@ -357,6 +357,90 @@ static struct platform_device *ixp46x_de
323 &ixp46x_i2c_controller
324 };
325
326 +static struct npe_plat_data npea = {
327 + .name = "NPE-A",
328 + .data_size = 0x800,
329 + .inst_size = 0x1000,
330 + .id = 0,
331 +};
332 +
333 +static struct npe_plat_data npeb = {
334 + .name = "NPE-B",
335 + .data_size = 0x800,
336 + .inst_size = 0x800,
337 + .id = 1,
338 +};
339 +
340 +static struct npe_plat_data npec = {
341 + .name = "NPE-C",
342 + .data_size = 0x800,
343 + .inst_size = 0x800,
344 + .id = 2,
345 +};
346 +
347 +static struct resource res_npea = {
348 + .start = IXP4XX_NPEA_BASE_PHYS,
349 + .end = IXP4XX_NPEA_BASE_PHYS + 0xfff,
350 + .flags = IORESOURCE_MEM,
351 +};
352 +
353 +static struct resource res_npeb = {
354 + .start = IXP4XX_NPEB_BASE_PHYS,
355 + .end = IXP4XX_NPEB_BASE_PHYS + 0xfff,
356 + .flags = IORESOURCE_MEM,
357 +};
358 +
359 +static struct resource res_npec = {
360 + .start = IXP4XX_NPEC_BASE_PHYS,
361 + .end = IXP4XX_NPEC_BASE_PHYS + 0xfff,
362 + .flags = IORESOURCE_MEM,
363 +};
364 +
365 +static struct platform_device dev_npea = {
366 + .name = "ixp4xx_npe",
367 + .id = 0,
368 + .dev.platform_data = &npea,
369 + .num_resources = 1,
370 + .resource = &res_npea,
371 +};
372 +
373 +static struct platform_device dev_npeb = {
374 + .name = "ixp4xx_npe",
375 + .id = 1,
376 + .dev.platform_data = &npeb,
377 + .num_resources = 1,
378 + .resource = &res_npeb,
379 +};
380 +
381 +static struct platform_device dev_npec = {
382 + .name = "ixp4xx_npe",
383 + .id = 2,
384 + .dev.platform_data = &npec,
385 + .num_resources = 1,
386 + .resource = &res_npec,
387 +};
388 +
389 +/* QMGR */
390 +static struct resource res_qmgr[] = {
391 +{
392 + .start = IXP4XX_QMGR_BASE_PHYS,
393 + .end = IXP4XX_QMGR_BASE_PHYS + IXP4XX_QMGR_REGION_SIZE -1,
394 + .flags = IORESOURCE_MEM,
395 +}, {
396 + .start = IRQ_IXP4XX_QM1,
397 + .flags = IORESOURCE_IRQ,
398 +} };
399 +
400 +static struct platform_device qmgr = {
401 + .name = "ixp4xx_qmgr",
402 + .id = 0,
403 + .dev = {
404 + .coherent_dma_mask = DMA_32BIT_MASK,
405 + },
406 + .num_resources = ARRAY_SIZE(res_qmgr),
407 + .resource = res_qmgr,
408 +};
409 +
410 unsigned long ixp4xx_exp_bus_size;
411 EXPORT_SYMBOL(ixp4xx_exp_bus_size);
412
413 @@ -378,8 +462,19 @@ void __init ixp4xx_sys_init(void)
414 break;
415 }
416 }
417 + npeb.inst_size = 0x1000;
418 + npec.inst_size = 0x1000;
419 }
420
421 + platform_device_register(&qmgr);
422 +
423 + if (ix_fuse() & IX_FUSE_NPEA)
424 + platform_device_register(&dev_npea);
425 + if (ix_fuse() & IX_FUSE_NPEB)
426 + platform_device_register(&dev_npeb);
427 + if (ix_fuse() & IX_FUSE_NPEC)
428 + platform_device_register(&dev_npec);
429 +
430 printk("IXP4xx: Using %luMiB expansion bus window size\n",
431 ixp4xx_exp_bus_size >> 20);
432 }
433 Index: linux-2.6.21.7/arch/arm/mach-ixp4xx/ixdp425-setup.c
434 ===================================================================
435 --- linux-2.6.21.7.orig/arch/arm/mach-ixp4xx/ixdp425-setup.c
436 +++ linux-2.6.21.7/arch/arm/mach-ixp4xx/ixdp425-setup.c
437 @@ -101,10 +101,59 @@ static struct platform_device ixdp425_ua
438 .resource = ixdp425_uart_resources
439 };
440
441 +/* MACs */
442 +static struct resource res_mac0 = {
443 + .start = IXP4XX_EthB_BASE_PHYS,
444 + .end = IXP4XX_EthB_BASE_PHYS + 0x1ff,
445 + .flags = IORESOURCE_MEM,
446 +};
447 +
448 +static struct resource res_mac1 = {
449 + .start = IXP4XX_EthC_BASE_PHYS,
450 + .end = IXP4XX_EthC_BASE_PHYS + 0x1ff,
451 + .flags = IORESOURCE_MEM,
452 +};
453 +
454 +static struct mac_plat_info plat_mac0 = {
455 + .npe_id = 1,
456 + .phy_id = 0,
457 + .eth_id = 0,
458 + .rxq_id = 27,
459 + .txq_id = 24,
460 + .rxdoneq_id = 4,
461 +};
462 +
463 +static struct mac_plat_info plat_mac1 = {
464 + .npe_id = 2,
465 + .phy_id = 1,
466 + .eth_id = 1,
467 + .rxq_id = 28,
468 + .txq_id = 25,
469 + .rxdoneq_id = 5,
470 +};
471 +
472 +static struct platform_device mac0 = {
473 + .name = "ixp4xx_mac",
474 + .id = 0,
475 + .dev.platform_data = &plat_mac0,
476 + .num_resources = 1,
477 + .resource = &res_mac0,
478 +};
479 +
480 +static struct platform_device mac1 = {
481 + .name = "ixp4xx_mac",
482 + .id = 1,
483 + .dev.platform_data = &plat_mac1,
484 + .num_resources = 1,
485 + .resource = &res_mac1,
486 +};
487 +
488 static struct platform_device *ixdp425_devices[] __initdata = {
489 &ixdp425_i2c_controller,
490 &ixdp425_flash,
491 - &ixdp425_uart
492 + &ixdp425_uart,
493 + &mac0,
494 + &mac1,
495 };
496
497 static void __init ixdp425_init(void)
498 Index: linux-2.6.21.7/drivers/net/Kconfig
499 ===================================================================
500 --- linux-2.6.21.7.orig/drivers/net/Kconfig
501 +++ linux-2.6.21.7/drivers/net/Kconfig
502 @@ -324,6 +324,8 @@ config MACB
503
504 source "drivers/net/arm/Kconfig"
505
506 +source "drivers/net/ixp4xx/Kconfig"
507 +
508 config MACE
509 tristate "MACE (Power Mac ethernet) support"
510 depends on NET_ETHERNET && PPC_PMAC && PPC32
511 Index: linux-2.6.21.7/drivers/net/Makefile
512 ===================================================================
513 --- linux-2.6.21.7.orig/drivers/net/Makefile
514 +++ linux-2.6.21.7/drivers/net/Makefile
515 @@ -213,6 +213,7 @@ obj-$(CONFIG_HAMRADIO) += hamradio/
516 obj-$(CONFIG_IRDA) += irda/
517 obj-$(CONFIG_ETRAX_ETHERNET) += cris/
518 obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
519 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx/
520
521 obj-$(CONFIG_NETCONSOLE) += netconsole.o
522
523 Index: linux-2.6.21.7/drivers/net/ixp4xx/Kconfig
524 ===================================================================
525 --- /dev/null
526 +++ linux-2.6.21.7/drivers/net/ixp4xx/Kconfig
527 @@ -0,0 +1,48 @@
528 +config IXP4XX_QMGR
529 + tristate "IXP4xx Queue Manager support"
530 + depends on ARCH_IXP4XX
531 + depends on NET_ETHERNET
532 + help
533 + The IXP4XX Queue manager is a configurable hardware ringbuffer.
534 + It is used by the NPEs to exchange data from and to the CPU.
535 + You can either use this OR the Intel Access Library (IAL)
536 +
537 +config IXP4XX_NPE
538 + tristate "IXP4xx NPE support"
539 + depends on ARCH_IXP4XX
540 + depends on NET_ETHERNET
541 + help
542 + The IXP4XX NPE driver supports the 3 CPU co-processors called
543 + "Network Processing Engines" (NPE). It adds support fo downloading
544 + the Microcode (firmware) via Hotplug or character-special-device.
545 + More about this at: Documentation/networking/ixp4xx/README.
546 + You can either use this OR the Intel Access Library (IAL)
547 +
548 +config IXP4XX_FW_LOAD
549 + bool "Use Firmware hotplug for Microcode download"
550 + depends on IXP4XX_NPE
551 + select HOTPLUG
552 + select FW_LOADER
553 + help
554 + The default hotplug script will load the Firmware from
555 + /usr/lib/hotplug/firmware/NPE-[ABC]
556 + see Documentation/firmware_class/hotplug-script
557 +
558 +config IXP4XX_MAC
559 + tristate "IXP4xx MAC support"
560 + depends on IXP4XX_NPE
561 + depends on IXP4XX_QMGR
562 + depends on NET_ETHERNET
563 + select MII
564 + help
565 + The IXP4XX MAC driver supports the MACs on the IXP4XX CPUs.
566 + There are 2 on ixp425 and up to 5 on ixdp465.
567 + You can either use this OR the Intel Access Library (IAL)
568 +
569 +config IXP4XX_CRYPTO
570 + tristate "IXP4xx crypto support"
571 + depends on IXP4XX_NPE
572 + depends on IXP4XX_QMGR
573 + help
574 + This driver is a generic NPE-crypto access layer.
575 + You need additional code in OCF for example.
576 Index: linux-2.6.21.7/drivers/net/ixp4xx/Makefile
577 ===================================================================
578 --- /dev/null
579 +++ linux-2.6.21.7/drivers/net/ixp4xx/Makefile
580 @@ -0,0 +1,7 @@
581 +obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o
582 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o
583 +obj-$(CONFIG_IXP4XX_MAC) += ixp4xx_mac.o
584 +obj-$(CONFIG_IXP4XX_CRYPTO) += ixp4xx_crypto.o
585 +
586 +ixp4xx_npe-objs := ucode_dl.o npe_mh.o npe.o
587 +ixp4xx_mac-objs := mac_driver.o phy.o
588 Index: linux-2.6.21.7/drivers/net/ixp4xx/ixp4xx_crypto.c
589 ===================================================================
590 --- /dev/null
591 +++ linux-2.6.21.7/drivers/net/ixp4xx/ixp4xx_crypto.c
592 @@ -0,0 +1,851 @@
593 +/*
594 + * ixp4xx_crypto.c - interface to the HW crypto
595 + *
596 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
597 + *
598 + * This file is released under the GPLv2
599 + */
600 +
601 +#include <linux/ixp_qmgr.h>
602 +#include <linux/ixp_npe.h>
603 +#include <linux/dma-mapping.h>
604 +#include <linux/dmapool.h>
605 +#include <linux/device.h>
606 +#include <linux/delay.h>
607 +#include <linux/slab.h>
608 +#include <linux/kernel.h>
609 +#include <linux/ixp_crypto.h>
610 +
611 +#define SEND_QID 29
612 +#define RECV_QID 30
613 +
614 +#define NPE_ID 2 /* NPE C */
615 +
616 +#define QUEUE_SIZE 64
617 +#define MY_VERSION "0.0.1"
618 +
619 +/* local head for all sa_ctx */
620 +static struct ix_sa_master sa_master;
621 +
622 +static const struct ix_hash_algo _hash_algos[] = {
623 +{
624 + .name = "MD5",
625 + .cfgword = 0xAA010004,
626 + .digest_len = 16,
627 + .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
628 + "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
629 + .type = HASH_TYPE_MD5,
630 +},{
631 + .name = "SHA1",
632 + .cfgword = 0x00000005,
633 + .digest_len = 20,
634 + .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
635 + "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
636 + .type = HASH_TYPE_SHA1,
637 +#if 0
638 +},{
639 + .name = "CBC MAC",
640 + .digest_len = 64,
641 + .aad_len = 48,
642 + .type = HASH_TYPE_CBCMAC,
643 +#endif
644 +} };
645 +
646 +static const struct ix_cipher_algo _cipher_algos[] = {
647 +{
648 + .name = "DES ECB",
649 + .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
650 + .cfgword_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
651 + .block_len = 8,
652 + .type = CIPHER_TYPE_DES,
653 + .mode = CIPHER_MODE_ECB,
654 +},{
655 + .name = "DES CBC",
656 + .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
657 + .cfgword_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
658 + .iv_len = 8,
659 + .block_len = 8,
660 + .type = CIPHER_TYPE_DES,
661 + .mode = CIPHER_MODE_CBC,
662 +},{
663 + .name = "3DES ECB",
664 + .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
665 + .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
666 + .block_len = 8,
667 + .type = CIPHER_TYPE_3DES,
668 + .mode = CIPHER_MODE_ECB,
669 +},{
670 + .name = "3DES CBC",
671 + .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_CBC_ENC | KEYLEN_192,
672 + .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_CBC_DEC | KEYLEN_192,
673 + .iv_len = 8,
674 + .block_len = 8,
675 + .type = CIPHER_TYPE_3DES,
676 + .mode = CIPHER_MODE_CBC,
677 +},{
678 + .name = "AES ECB",
679 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_ECB,
680 + .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_ECB,
681 + .block_len = 16,
682 + .type = CIPHER_TYPE_AES,
683 + .mode = CIPHER_MODE_ECB,
684 +},{
685 + .name = "AES CBC",
686 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CBC_ENC,
687 + .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_CBC_DEC,
688 + .block_len = 16,
689 + .iv_len = 16,
690 + .type = CIPHER_TYPE_AES,
691 + .mode = CIPHER_MODE_CBC,
692 +},{
693 + .name = "AES CTR",
694 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CTR,
695 + .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CTR,
696 + .block_len = 16,
697 + .iv_len = 16,
698 + .type = CIPHER_TYPE_AES,
699 + .mode = CIPHER_MODE_CTR,
700 +#if 0
701 +},{
702 + .name = "AES CCM",
703 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CCM_ENC,
704 + .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CCM_DEC,
705 + .block_len = 16,
706 + .iv_len = 16,
707 + .type = CIPHER_TYPE_AES,
708 + .mode = CIPHER_MODE_CCM,
709 +#endif
710 +} };
711 +
712 +const struct ix_hash_algo *ix_hash_by_id(int type)
713 +{
714 + int i;
715 +
716 + for(i=0; i<ARRAY_SIZE(_hash_algos); i++) {
717 + if (_hash_algos[i].type == type)
718 + return _hash_algos + i;
719 + }
720 + return NULL;
721 +}
722 +
723 +const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode)
724 +{
725 + int i;
726 +
727 + for(i=0; i<ARRAY_SIZE(_cipher_algos); i++) {
728 + if (_cipher_algos[i].type==type && _cipher_algos[i].mode==mode)
729 + return _cipher_algos + i;
730 + }
731 + return NULL;
732 +}
733 +
734 +static void irqcb_recv(struct qm_queue *queue);
735 +
736 +static int init_sa_master(struct ix_sa_master *master)
737 +{
738 + struct npe_info *npe;
739 + int ret = -ENODEV;
740 +
741 + if (! (ix_fuse() & (IX_FUSE_HASH | IX_FUSE_AES | IX_FUSE_DES))) {
742 + printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
743 + return ret;
744 + }
745 + memset(master, 0, sizeof(struct ix_sa_master));
746 + master->npe_dev = get_npe_by_id(NPE_ID);
747 + if (! master->npe_dev)
748 + goto err;
749 +
750 + npe = dev_get_drvdata(master->npe_dev);
751 +
752 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
753 + switch (npe->img_info[1]) {
754 + case 4:
755 + printk(KERN_INFO "Crypto AES avaialable\n");
756 + break;
757 + case 5:
758 + printk(KERN_INFO "Crypto AES and CCM avaialable\n");
759 + break;
760 + default:
761 + printk(KERN_WARNING "Current microcode for %s has no"
762 + " crypto capabilities\n", npe->plat->name);
763 + break;
764 + }
765 + }
766 + rwlock_init(&master->lock);
767 + master->dmapool = dma_pool_create("ixp4xx_crypto", master->npe_dev,
768 + sizeof(struct npe_crypt_cont), 32, 0);
769 + if (!master->dmapool) {
770 + ret = -ENOMEM;
771 + goto err;
772 + }
773 + master->sendq = request_queue(SEND_QID, QUEUE_SIZE);
774 + if (IS_ERR(master->sendq)) {
775 + printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
776 + SEND_QID);
777 + ret = PTR_ERR(master->sendq);
778 + goto err;
779 + }
780 + master->recvq = request_queue(RECV_QID, QUEUE_SIZE);
781 + if (IS_ERR(master->recvq)) {
782 + printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
783 + RECV_QID);
784 + ret = PTR_ERR(master->recvq);
785 + release_queue(master->sendq);
786 + goto err;
787 + }
788 +
789 + master->recvq->irq_cb = irqcb_recv;
790 + queue_set_watermarks(master->recvq, 0, 0);
791 + queue_set_irq_src(master->recvq, Q_IRQ_ID_NOT_E);
792 + queue_enable_irq(master->recvq);
793 + printk(KERN_INFO "ixp4xx_crypto " MY_VERSION " registered successfully\n");
794 +
795 + return 0;
796 +err:
797 + if (master->dmapool)
798 + dma_pool_destroy(master->dmapool);
799 + if (! master->npe_dev)
800 + put_device(master->npe_dev);
801 + return ret;
802 +
803 +}
804 +
805 +static void release_sa_master(struct ix_sa_master *master)
806 +{
807 + struct npe_crypt_cont *cont;
808 + unsigned long flags;
809 +
810 + write_lock_irqsave(&master->lock, flags);
811 + while (master->pool) {
812 + cont = master->pool;
813 + master->pool = cont->next;
814 + dma_pool_free(master->dmapool, cont, cont->phys);
815 + master->pool_size--;
816 + }
817 + write_unlock_irqrestore(&master->lock, flags);
818 + if (master->pool_size) {
819 + printk(KERN_ERR "ixp4xx_crypto: %d items lost from DMA pool\n",
820 + master->pool_size);
821 + }
822 +
823 + dma_pool_destroy(master->dmapool);
824 + release_queue(master->sendq);
825 + release_queue(master->recvq);
826 + return_npe_dev(master->npe_dev);
827 +}
828 +
829 +static struct npe_crypt_cont *ix_sa_get_cont(struct ix_sa_master *master)
830 +{
831 + unsigned long flags;
832 + struct npe_crypt_cont *cont;
833 + dma_addr_t handle;
834 +
835 + write_lock_irqsave(&master->lock, flags);
836 + if (!master->pool) {
837 + cont = dma_pool_alloc(master->dmapool, GFP_ATOMIC, &handle);
838 + if (cont) {
839 + master->pool_size++;
840 + cont->phys = handle;
841 + cont->virt = cont;
842 + }
843 + } else {
844 + cont = master->pool;
845 + master->pool = cont->next;
846 + }
847 + write_unlock_irqrestore(&master->lock, flags);
848 + return cont;
849 +}
850 +
851 +static void
852 +ix_sa_return_cont(struct ix_sa_master *master,struct npe_crypt_cont *cont)
853 +{
854 + unsigned long flags;
855 +
856 + write_lock_irqsave(&master->lock, flags);
857 + cont->next = master->pool;
858 + master->pool = cont;
859 + write_unlock_irqrestore(&master->lock, flags);
860 +}
861 +
862 +static void free_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
863 +{
864 + memset(dir->npe_ctx, 0, NPE_CTX_LEN);
865 + dma_pool_free(sa_ctx->master->dmapool, dir->npe_ctx,
866 + dir->npe_ctx_phys);
867 +}
868 +
869 +static void ix_sa_ctx_destroy(struct ix_sa_ctx *sa_ctx)
870 +{
871 + BUG_ON(sa_ctx->state != STATE_UNLOADING);
872 + free_sa_dir(sa_ctx, &sa_ctx->encrypt);
873 + free_sa_dir(sa_ctx, &sa_ctx->decrypt);
874 + kfree(sa_ctx);
875 + module_put(THIS_MODULE);
876 +}
877 +
878 +static void recv_pack(struct qm_queue *queue, u32 phys)
879 +{
880 + struct ix_sa_ctx *sa_ctx;
881 + struct npe_crypt_cont *cr_cont;
882 + struct npe_cont *cont;
883 + int failed;
884 +
885 + failed = phys & 0x1;
886 + phys &= ~0x3;
887 +
888 + cr_cont = dma_to_virt(queue->dev, phys);
889 + cr_cont = cr_cont->virt;
890 + sa_ctx = cr_cont->ctl.crypt.sa_ctx;
891 +
892 + phys = npe_to_cpu32(cr_cont->ctl.crypt.src_buf);
893 + if (phys) {
894 + cont = dma_to_virt(queue->dev, phys);
895 + cont = cont->virt;
896 + } else {
897 + cont = NULL;
898 + }
899 + if (cr_cont->ctl.crypt.oper_type == OP_PERFORM) {
900 + dma_unmap_single(sa_ctx->master->npe_dev,
901 + cont->eth.phys_addr,
902 + cont->eth.buf_len,
903 + DMA_BIDIRECTIONAL);
904 + if (sa_ctx->perf_cb)
905 + sa_ctx->perf_cb(sa_ctx, cont->data, failed);
906 + qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
907 + ix_sa_return_cont(sa_ctx->master, cr_cont);
908 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
909 + ix_sa_ctx_destroy(sa_ctx);
910 + return;
911 + }
912 +
913 + /* We are registering */
914 + switch (cr_cont->ctl.crypt.mode) {
915 + case NPE_OP_HASH_GEN_ICV:
916 + /* 1 out of 2 HMAC preparation operations completed */
917 + dma_unmap_single(sa_ctx->master->npe_dev,
918 + cont->eth.phys_addr,
919 + cont->eth.buf_len,
920 + DMA_TO_DEVICE);
921 + kfree(cont->data);
922 + qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
923 + break;
924 + case NPE_OP_ENC_GEN_KEY:
925 + memcpy(sa_ctx->decrypt.npe_ctx + sizeof(u32),
926 + sa_ctx->rev_aes->ctl.rev_aes_key + sizeof(u32),
927 + sa_ctx->c_key.len);
928 + /* REV AES data not needed anymore, free it */
929 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
930 + sa_ctx->rev_aes = NULL;
931 + break;
932 + default:
933 + printk(KERN_ERR "Unknown crypt-register mode: %x\n",
934 + cr_cont->ctl.crypt.mode);
935 +
936 + }
937 + if (cr_cont->ctl.crypt.oper_type == OP_REG_DONE) {
938 + if (sa_ctx->state == STATE_UNREGISTERED)
939 + sa_ctx->state = STATE_REGISTERED;
940 + if (sa_ctx->reg_cb)
941 + sa_ctx->reg_cb(sa_ctx, failed);
942 + }
943 + ix_sa_return_cont(sa_ctx->master, cr_cont);
944 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
945 + ix_sa_ctx_destroy(sa_ctx);
946 +}
947 +
948 +static void irqcb_recv(struct qm_queue *queue)
949 +{
950 + u32 phys;
951 +
952 + queue_ack_irq(queue);
953 + while ((phys = queue_get_entry(queue)))
954 + recv_pack(queue, phys);
955 +}
956 +
957 +static int init_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
958 +{
959 + dir->npe_ctx = dma_pool_alloc(sa_ctx->master->dmapool,
960 + sa_ctx->gfp_flags, &dir->npe_ctx_phys);
961 + if (!dir->npe_ctx) {
962 + return 1;
963 + }
964 + memset(dir->npe_ctx, 0, NPE_CTX_LEN);
965 + return 0;
966 +}
967 +
968 +struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags)
969 +{
970 + struct ix_sa_ctx *sa_ctx;
971 + struct ix_sa_master *master = &sa_master;
972 + struct npe_info *npe = dev_get_drvdata(master->npe_dev);
973 +
974 + /* first check if Microcode was downloaded into this NPE */
975 + if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
976 + printk(KERN_ERR "%s not running\n", npe->plat->name);
977 + return NULL;
978 + }
979 + switch (npe->img_info[1]) {
980 + case 4:
981 + case 5:
982 + break;
983 + default:
984 + /* No crypto Microcode */
985 + return NULL;
986 + }
987 + if (!try_module_get(THIS_MODULE)) {
988 + return NULL;
989 + }
990 +
991 + sa_ctx = kzalloc(sizeof(struct ix_sa_ctx) + priv_len, flags);
992 + if (!sa_ctx) {
993 + goto err_put;
994 + }
995 +
996 + sa_ctx->master = master;
997 + sa_ctx->gfp_flags = flags;
998 +
999 + if (init_sa_dir(sa_ctx, &sa_ctx->encrypt))
1000 + goto err_free;
1001 + if (init_sa_dir(sa_ctx, &sa_ctx->decrypt)) {
1002 + free_sa_dir(sa_ctx, &sa_ctx->encrypt);
1003 + goto err_free;
1004 + }
1005 + if (priv_len)
1006 + sa_ctx->priv = sa_ctx + 1;
1007 +
1008 + atomic_set(&sa_ctx->use_cnt, 1);
1009 + return sa_ctx;
1010 +
1011 +err_free:
1012 + kfree(sa_ctx);
1013 +err_put:
1014 + module_put(THIS_MODULE);
1015 + return NULL;
1016 +}
1017 +
1018 +void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx)
1019 +{
1020 + sa_ctx->state = STATE_UNLOADING;
1021 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
1022 + ix_sa_ctx_destroy(sa_ctx);
1023 + else
1024 + printk("ix_sa_ctx_free -> delayed: %p %d\n",
1025 + sa_ctx, atomic_read(&sa_ctx->use_cnt));
1026 +}
1027 +
1028 +/* http://www.ietf.org/rfc/rfc2104.txt */
1029 +#define HMAC_IPAD_VALUE 0x36
1030 +#define HMAC_OPAD_VALUE 0x5C
1031 +#define PAD_BLOCKLEN 64
1032 +
1033 +static int register_chain_var(struct ix_sa_ctx *sa_ctx,
1034 + unsigned char *pad, u32 target, int init_len, u32 ctx_addr, int oper)
1035 +{
1036 + struct npe_crypt_cont *cr_cont;
1037 + struct npe_cont *cont;
1038 +
1039 + cr_cont = ix_sa_get_cont(sa_ctx->master);
1040 + if (!cr_cont)
1041 + return -ENOMEM;
1042 +
1043 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1044 + cr_cont->ctl.crypt.auth_offs = 0;
1045 + cr_cont->ctl.crypt.auth_len =cpu_to_npe16(PAD_BLOCKLEN);
1046 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(ctx_addr);
1047 +
1048 + cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
1049 + if (!cont) {
1050 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1051 + return -ENOMEM;
1052 + }
1053 +
1054 + cont->data = pad;
1055 + cont->eth.next = 0;
1056 + cont->eth.buf_len = cpu_to_npe16(PAD_BLOCKLEN);
1057 + cont->eth.pkt_len = 0;
1058 +
1059 + cont->eth.phys_addr = cpu_to_npe32(dma_map_single(
1060 + sa_ctx->master->npe_dev, pad, PAD_BLOCKLEN, DMA_TO_DEVICE));
1061 +
1062 + cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);
1063 + cr_cont->ctl.crypt.oper_type = oper;
1064 +
1065 + cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(target);
1066 + cr_cont->ctl.crypt.mode = NPE_OP_HASH_GEN_ICV;
1067 + cr_cont->ctl.crypt.init_len = init_len;
1068 +
1069 + atomic_inc(&sa_ctx->use_cnt);
1070 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1071 + if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */
1072 + atomic_dec(&sa_ctx->use_cnt);
1073 + qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev),
1074 + cont);
1075 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1076 + return -ENOMEM;
1077 + }
1078 + return 0;
1079 +}
1080 +
1081 +/* Return value
1082 + * 0 if nothing registered,
1083 + * 1 if something registered and
1084 + * < 0 on error
1085 + */
1086 +static int ix_sa_ctx_setup_auth(struct ix_sa_ctx *sa_ctx,
1087 + const struct ix_hash_algo *algo, int len, int oper, int encrypt)
1088 +{
1089 + unsigned char *ipad, *opad;
1090 + u32 itarget, otarget, ctx_addr;
1091 + unsigned char *cinfo;
1092 + int init_len, i, ret = 0;
1093 + struct qm_qmgr *qmgr;
1094 + struct ix_sa_dir *dir;
1095 + u32 cfgword;
1096 +
1097 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1098 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1099 +
1100 + qmgr = dev_get_drvdata(sa_ctx->master->sendq->dev);
1101 +
1102 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1103 + sa_ctx->h_algo = algo;
1104 +
1105 + if (!algo) {
1106 + dir->npe_mode |= NPE_OP_HMAC_DISABLE;
1107 + return 0;
1108 + }
1109 + if (algo->type == HASH_TYPE_CBCMAC) {
1110 + dir->npe_mode |= NPE_OP_CCM_ENABLE | NPE_OP_HMAC_DISABLE;
1111 + return 0;
1112 + }
1113 + if (sa_ctx->h_key.len > 64 || sa_ctx->h_key.len < algo->digest_len)
1114 + return -EINVAL;
1115 + if (len > algo->digest_len || (len % 4))
1116 + return -EINVAL;
1117 + if (!len)
1118 + len = algo->digest_len;
1119 +
1120 + sa_ctx->digest_len = len;
1121 +
1122 + /* write cfg word to cryptinfo */
1123 + cfgword = algo->cfgword | ((len/4) << 8);
1124 + *(u32*)cinfo = cpu_to_be32(cfgword);
1125 + cinfo += sizeof(cfgword);
1126 +
1127 + /* write ICV to cryptinfo */
1128 + memcpy(cinfo, algo->icv, algo->digest_len);
1129 + cinfo += algo->digest_len;
1130 +
1131 + itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
1132 + + sizeof(algo->cfgword);
1133 + otarget = itarget + algo->digest_len;
1134 +
1135 + opad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA);
1136 + if (!opad) {
1137 + return -ENOMEM;
1138 + }
1139 + ipad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA);
1140 + if (!ipad) {
1141 + kfree(opad);
1142 + return -ENOMEM;
1143 + }
1144 + memcpy(ipad, sa_ctx->h_key.key, sa_ctx->h_key.len);
1145 + memcpy(opad, sa_ctx->h_key.key, sa_ctx->h_key.len);
1146 + for (i = 0; i < PAD_BLOCKLEN; i++) {
1147 + ipad[i] ^= HMAC_IPAD_VALUE;
1148 + opad[i] ^= HMAC_OPAD_VALUE;
1149 + }
1150 + init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
1151 + ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
1152 +
1153 + dir->npe_ctx_idx += init_len;
1154 + dir->npe_mode |= NPE_OP_HASH_ENABLE;
1155 +
1156 + if (!encrypt)
1157 + dir->npe_mode |= NPE_OP_HASH_VERIFY;
1158 +
1159 + /* register first chainvar */
1160 + ret = register_chain_var(sa_ctx, opad, otarget,
1161 + init_len, ctx_addr, OP_REGISTER);
1162 + if (ret) {
1163 + kfree(ipad);
1164 + kfree(opad);
1165 + return ret;
1166 + }
1167 +
1168 + /* register second chainvar */
1169 + ret = register_chain_var(sa_ctx, ipad, itarget,
1170 + init_len, ctx_addr, oper);
1171 + if (ret) {
1172 + kfree(ipad);
1173 + return ret;
1174 + }
1175 +
1176 + return 1;
1177 +}
1178 +
1179 +static int gen_rev_aes_key(struct ix_sa_ctx *sa_ctx,
1180 + u32 keylen_cfg, int cipher_op)
1181 +{
1182 + unsigned char *cinfo;
1183 + struct npe_crypt_cont *cr_cont;
1184 +
1185 + keylen_cfg |= CIPH_ENCR | ALGO_AES | MOD_ECB;
1186 + sa_ctx->rev_aes = ix_sa_get_cont(sa_ctx->master);
1187 + if (!sa_ctx->rev_aes)
1188 + return -ENOMEM;
1189 +
1190 + cinfo = sa_ctx->rev_aes->ctl.rev_aes_key;
1191 + *(u32*)cinfo = cpu_to_be32(keylen_cfg);
1192 + cinfo += sizeof(keylen_cfg);
1193 +
1194 + memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len);
1195 +
1196 + cr_cont = ix_sa_get_cont(sa_ctx->master);
1197 + if (!cr_cont) {
1198 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
1199 + sa_ctx->rev_aes = NULL;
1200 + return -ENOMEM;
1201 + }
1202 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1203 + cr_cont->ctl.crypt.oper_type = cipher_op;
1204 +
1205 + cr_cont->ctl.crypt.crypt_offs = 0;
1206 + cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(AES_BLOCK128);
1207 + cr_cont->ctl.crypt.addr.rev_aes = cpu_to_npe32(
1208 + sa_ctx->rev_aes->phys + sizeof(keylen_cfg));
1209 +
1210 + cr_cont->ctl.crypt.src_buf = 0;
1211 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(sa_ctx->rev_aes->phys);
1212 + cr_cont->ctl.crypt.mode = NPE_OP_ENC_GEN_KEY;
1213 + cr_cont->ctl.crypt.init_len = sa_ctx->decrypt.npe_ctx_idx;
1214 +
1215 + atomic_inc(&sa_ctx->use_cnt);
1216 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1217 + if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */
1218 + atomic_dec(&sa_ctx->use_cnt);
1219 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1220 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
1221 + sa_ctx->rev_aes = NULL;
1222 + return -ENOMEM;
1223 + }
1224 +
1225 + return 1;
1226 +}
1227 +
1228 +/* Return value
1229 + * 0 if nothing registered,
1230 + * 1 if something registered and
1231 + * < 0 on error
1232 + */
1233 +static int ix_sa_ctx_setup_cipher(struct ix_sa_ctx *sa_ctx,
1234 + const struct ix_cipher_algo *algo, int cipher_op, int encrypt)
1235 +{
1236 + unsigned char *cinfo;
1237 + int keylen, init_len;
1238 + u32 cipher_cfg;
1239 + u32 keylen_cfg = 0;
1240 + struct ix_sa_dir *dir;
1241 +
1242 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1243 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1244 +
1245 + sa_ctx->c_algo = algo;
1246 +
1247 + if (!algo)
1248 + return 0;
1249 +
1250 + if (algo->type == CIPHER_TYPE_DES && sa_ctx->c_key.len != 8)
1251 + return -EINVAL;
1252 +
1253 + if (algo->type == CIPHER_TYPE_3DES && sa_ctx->c_key.len != 24)
1254 + return -EINVAL;
1255 +
1256 + keylen = 24;
1257 +
1258 + if (encrypt) {
1259 + cipher_cfg = algo->cfgword_enc;
1260 + dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
1261 + } else {
1262 + cipher_cfg = algo->cfgword_dec;
1263 + }
1264 + if (algo->type == CIPHER_TYPE_AES) {
1265 + switch (sa_ctx->c_key.len) {
1266 + case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
1267 + case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
1268 + case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
1269 + default: return -EINVAL;
1270 + }
1271 + keylen = sa_ctx->c_key.len;
1272 + cipher_cfg |= keylen_cfg;
1273 + }
1274 +
1275 + /* write cfg word to cryptinfo */
1276 + *(u32*)cinfo = cpu_to_be32(cipher_cfg);
1277 + cinfo += sizeof(cipher_cfg);
1278 +
1279 + /* write cipher key to cryptinfo */
1280 + memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len);
1281 + cinfo += keylen;
1282 +
1283 + init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
1284 + dir->npe_ctx_idx += init_len;
1285 +
1286 + dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
1287 +
1288 + if (algo->type == CIPHER_TYPE_AES && !encrypt) {
1289 + return gen_rev_aes_key(sa_ctx, keylen_cfg, cipher_op);
1290 + }
1291 +
1292 + return 0;
1293 +}
1294 +
1295 +/* returns 0 on OK, <0 on error and 1 on overflow */
1296 +int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
1297 + int datalen, int c_offs, int c_len, int a_offs, int a_len,
1298 + int hmac, char *iv, int encrypt)
1299 +{
1300 + struct npe_crypt_cont *cr_cont;
1301 + struct npe_cont *cont;
1302 + u32 data_phys;
1303 + int ret = -ENOMEM;
1304 + struct ix_sa_dir *dir;
1305 +
1306 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1307 +
1308 + if (sa_ctx->state != STATE_REGISTERED)
1309 + return -ENOENT;
1310 +
1311 + cr_cont = ix_sa_get_cont(sa_ctx->master);
1312 + if (!cr_cont)
1313 + return ret;
1314 +
1315 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1316 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(dir->npe_ctx_phys);
1317 + cr_cont->ctl.crypt.oper_type = OP_PERFORM;
1318 + cr_cont->ctl.crypt.mode = dir->npe_mode;
1319 + cr_cont->ctl.crypt.init_len = dir->npe_ctx_idx;
1320 +
1321 + if (sa_ctx->c_algo) {
1322 + cr_cont->ctl.crypt.crypt_offs = cpu_to_npe16(c_offs);
1323 + cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(c_len);
1324 + if (sa_ctx->c_algo->iv_len) {
1325 + if (!iv) {
1326 + ret = -EINVAL;
1327 + goto err_cr;
1328 + }
1329 + memcpy(cr_cont->ctl.crypt.iv, iv,
1330 + sa_ctx->c_algo->iv_len);
1331 + }
1332 + }
1333 +
1334 + if (sa_ctx->h_algo) {
1335 + /* prepare hashing */
1336 + cr_cont->ctl.crypt.auth_offs = cpu_to_npe16(a_offs);
1337 + cr_cont->ctl.crypt.auth_len = cpu_to_npe16(a_len);
1338 + }
1339 +
1340 + data_phys = dma_map_single(sa_ctx->master->npe_dev,
1341 + data, datalen, DMA_BIDIRECTIONAL);
1342 + if (hmac)
1343 + cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(data_phys + hmac);
1344 +
1345 + /* Prepare the data ptr */
1346 + cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
1347 + if (!cont) {
1348 + goto err_unmap;
1349 + }
1350 +
1351 + cont->data = ptr;
1352 + cont->eth.next = 0;
1353 + cont->eth.buf_len = cpu_to_npe16(datalen);
1354 + cont->eth.pkt_len = 0;
1355 +
1356 + cont->eth.phys_addr = cpu_to_npe32(data_phys);
1357 + cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);
1358 +
1359 + atomic_inc(&sa_ctx->use_cnt);
1360 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1361 + if (queue_stat(sa_ctx->master->sendq) != 2) {
1362 + return 0;
1363 + }
1364 +
1365 + /* overflow */
1366 + printk("%s: Overflow\n", __FUNCTION__);
1367 + ret = -EAGAIN;
1368 + atomic_dec(&sa_ctx->use_cnt);
1369 + qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev), cont);
1370 +
1371 +err_unmap:
1372 + dma_unmap_single(sa_ctx->master->npe_dev, data_phys, datalen,
1373 + DMA_BIDIRECTIONAL);
1374 +err_cr:
1375 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1376 +
1377 + return ret;
1378 +}
1379 +
1380 +int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx,
1381 + const struct ix_cipher_algo *cipher,
1382 + const struct ix_hash_algo *auth, int len)
1383 +{
1384 + int ret = 0, sum = 0;
1385 + int cipher_op;
1386 +
1387 + if (sa_ctx->state != STATE_UNREGISTERED)
1388 + return -ENOENT;
1389 +
1390 + atomic_inc(&sa_ctx->use_cnt);
1391 +
1392 + cipher_op = auth ? OP_REGISTER : OP_REG_DONE;
1393 + if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, OP_REGISTER, 1)) < 0)
1394 + goto out;
1395 + sum += ret;
1396 + if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, cipher_op, 0)) < 0)
1397 + goto out;
1398 + sum += ret;
1399 + if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REGISTER, 1)) < 0)
1400 + goto out;
1401 + sum += ret;
1402 + if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REG_DONE, 0)) < 0)
1403 + goto out;
1404 + sum += ret;
1405 +
1406 + /* Nothing registered ?
1407 + * Ok, then we are done and call the callback here.
1408 + */
1409 + if (!sum) {
1410 + if (sa_ctx->state == STATE_UNREGISTERED)
1411 + sa_ctx->state = STATE_REGISTERED;
1412 + if (sa_ctx->reg_cb)
1413 + sa_ctx->reg_cb(sa_ctx, 0);
1414 + }
1415 +out:
1416 + atomic_dec(&sa_ctx->use_cnt);
1417 + return ret;
1418 +}
1419 +
1420 +static int __init init_crypto(void)
1421 +{
1422 + return init_sa_master(&sa_master);
1423 +}
1424 +
1425 +static void __exit finish_crypto(void)
1426 +{
1427 + release_sa_master(&sa_master);
1428 +}
1429 +
1430 +MODULE_LICENSE("GPL");
1431 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1432 +
1433 +EXPORT_SYMBOL(ix_hash_by_id);
1434 +EXPORT_SYMBOL(ix_cipher_by_id);
1435 +
1436 +EXPORT_SYMBOL(ix_sa_ctx_new);
1437 +EXPORT_SYMBOL(ix_sa_ctx_free);
1438 +EXPORT_SYMBOL(ix_sa_ctx_setup_cipher_auth);
1439 +EXPORT_SYMBOL(ix_sa_crypto_perform);
1440 +
1441 +module_init(init_crypto);
1442 +module_exit(finish_crypto);
1443 +
1444 Index: linux-2.6.21.7/drivers/net/ixp4xx/ixp4xx_qmgr.c
1445 ===================================================================
1446 --- /dev/null
1447 +++ linux-2.6.21.7/drivers/net/ixp4xx/ixp4xx_qmgr.c
1448 @@ -0,0 +1,474 @@
1449 +/*
1450 + * qmgr.c - reimplementation of the queue configuration interface.
1451 + *
1452 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1453 + *
1454 + * This file is released under the GPLv2
1455 + */
1456 +
1457 +#include <linux/kernel.h>
1458 +#include <linux/module.h>
1459 +#include <linux/platform_device.h>
1460 +#include <linux/fs.h>
1461 +#include <linux/init.h>
1462 +#include <linux/slab.h>
1463 +#include <linux/dmapool.h>
1464 +#include <linux/interrupt.h>
1465 +#include <linux/err.h>
1466 +#include <linux/delay.h>
1467 +#include <asm/uaccess.h>
1468 +#include <asm/io.h>
1469 +
1470 +#include <linux/ixp_qmgr.h>
1471 +#include <linux/ixp_npe.h>
1472 +
1473 +#define IXQMGR_VERSION "IXP4XX Q Manager 0.2.1"
1474 +
1475 +static struct device *qmgr_dev = NULL;
1476 +
1477 +static int poll_freq = 4000;
1478 +static int poll_enable = 0;
1479 +static u32 timer_countup_ticks;
1480 +
1481 +module_param(poll_freq, int, 0644);
1482 +module_param(poll_enable, int, 0644);
1483 +
1484 +int queue_len(struct qm_queue *queue)
1485 +{
1486 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1487 + int diff, offs;
1488 + u32 val;
1489 +
1490 + offs = queue->id/8 + QUE_LOW_STAT0;
1491 + val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
1492 +
1493 + diff = (val - (val >> 7)) & 0x7f;
1494 + if (!diff) {
1495 + /* diff == 0 means either empty or full, must look at STAT0 */
1496 + if ((*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x04)
1497 + diff = queue->len;
1498 + }
1499 + return diff;
1500 +}
1501 +
1502 +static int request_pool(struct device *dev, int count)
1503 +{
1504 + int i;
1505 + struct npe_cont *cont;
1506 + struct qm_qmgr *qmgr = dev_get_drvdata(dev);
1507 + dma_addr_t handle;
1508 +
1509 + for (i=0; i<count; i++) {
1510 + cont = dma_pool_alloc(qmgr->dmapool, GFP_KERNEL, &handle);
1511 + if (!cont) {
1512 + return -ENOMEM;
1513 + }
1514 + cont->phys = handle;
1515 + cont->virt = cont;
1516 + write_lock(&qmgr->lock);
1517 + cont->next = qmgr->pool;
1518 + qmgr->pool = cont;
1519 + write_unlock(&qmgr->lock);
1520 + }
1521 + return 0;
1522 +}
1523 +
1524 +static int free_pool(struct device *dev, int count)
1525 +{
1526 + int i;
1527 + struct npe_cont *cont;
1528 + struct qm_qmgr *qmgr = dev_get_drvdata(dev);
1529 +
1530 + for (i=0; i<count; i++) {
1531 + write_lock(&qmgr->lock);
1532 + cont = qmgr->pool;
1533 + if (!cont) {
1534 + write_unlock(&qmgr->lock);
1535 + return -1;
1536 + }
1537 + qmgr->pool = cont->next;
1538 + write_unlock(&qmgr->lock);
1539 + dma_pool_free(qmgr->dmapool, cont, cont->phys);
1540 + }
1541 + return 0;
1542 +}
1543 +
1544 +static int get_free_qspace(struct qm_qmgr *qmgr, int len)
1545 +{
1546 + int words = (qmgr->res->end - qmgr->res->start + 1) / 4 -
1547 + IX_QMGR_SRAM_SPACE;
1548 + int i,q;
1549 +
1550 + for (i=0; i<words; i+=len) {
1551 + for (q=0; q<MAX_QUEUES; q++) {
1552 + struct qm_queue *qu = qmgr->queues[q];
1553 + if (!qu)
1554 + continue;
1555 + if ((qu->addr + qu->len > i) && (qu->addr < i + len))
1556 + break;
1557 + }
1558 + if (q == MAX_QUEUES) {
1559 + /* we have a free address */
1560 + return i;
1561 + }
1562 + }
1563 + return -1;
1564 +}
1565 +
1566 +static inline int _log2(int x)
1567 +{
1568 + int r=0;
1569 + while(x>>=1)
1570 + r++;
1571 + return r;
1572 +}
1573 +
1574 +/*
1575 + * 32bit Config registers at IX_QMGR_QUECONFIG_BASE_OFFSET[Qid]
1576 + * 0 - 6 WRPTR Word offset to baseaddr (index 0 .. BSIZE-1)
1577 + * 7 -13 RDPTR ''
1578 + * 14 -21 BADDR baseaddr = (offset to IX_QMGR_QUEBUFFER_SPACE_OFFSET) >> 6
1579 + * 22 -23 ESIZE entrySizeInWords (always 00 because entrySizeInWords==1)
1580 + * 24 -25 BSIZE qSizeInWords 00=16,01=32,10=64,11=128
1581 + * 26 -28 NE nearly empty
1582 + * 29 -31 NF nearly full
1583 + */
1584 +static int conf_q_regs(struct qm_queue *queue)
1585 +{
1586 + int bsize = _log2(queue->len/16);
1587 + int baddr = queue->addr + IX_QMGR_QCFG_SIZE;
1588 +
1589 + /* +2, because baddr is in words and not in bytes */
1590 + queue_write_cfg_reg(queue, (bsize << 24) | (baddr<<(14-6+2)) );
1591 +
1592 + return 0;
1593 +}
1594 +
1595 +static void pmu_timer_restart(void)
1596 +{
1597 + unsigned long flags;
1598 +
1599 + local_irq_save(flags);
1600 +
1601 + __asm__(" mcr p14,0,%0,c1,c1,0\n" /* write current counter */
1602 + : : "r" (timer_countup_ticks));
1603 +
1604 + __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */
1605 + " orr r1,r1,#1; "
1606 + " mcr p14,0,r1,c5,c1,0; " /* clear overflow */
1607 + " mcr p14,0,r1,c4,c1,0\n" /* enable interrupts */
1608 + : : : "r1");
1609 +
1610 + local_irq_restore(flags);
1611 +}
1612 +
1613 +static void pmu_timer_init(void)
1614 +{
1615 + u32 controlRegisterMask =
1616 + BIT(0) | /* enable counters */
1617 + BIT(2); /* reset clock counter; */
1618 +
1619 + /*
1620 + * Compute the number of xscale cycles needed between each
1621 + * PMU IRQ. This is done from the result of an OS calibration loop.
1622 + *
1623 + * For 533MHz CPU, 533000000 tick/s / 4000 times/sec = 138250
1624 + * 4000 times/sec = 37 mbufs/interrupt at line rate
1625 + * The pmu timer is reset to -138250 = 0xfffde3f6, to trigger an IRQ
1626 + * when this up counter overflows.
1627 + *
1628 + * The multiplication gives a number of instructions per second.
1629 + * which is close to the processor frequency, and then close to the
1630 + * PMU clock rate.
1631 + *
1632 + * 2 is the number of instructions per loop
1633 + *
1634 + */
1635 +
1636 + timer_countup_ticks = - ((loops_per_jiffy * HZ * 2) / poll_freq);
1637 +
1638 + /* enable the CCNT (clock count) timer from the PMU */
1639 + __asm__(" mcr p14,0,%0,c0,c1,0\n"
1640 + : : "r" (controlRegisterMask));
1641 +}
1642 +
1643 +static void pmu_timer_disable(void)
1644 +{
1645 + unsigned long flags;
1646 +
1647 + local_irq_save(flags);
1648 +
1649 + __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */
1650 + " and r1,r1,#0x1e; "
1651 + " mcr p14,0,r1,c4,c1,0\n" /* disable interrupts */
1652 + : : : "r1");
1653 + local_irq_restore(flags);
1654 +}
1655 +
1656 +void queue_set_watermarks(struct qm_queue *queue, unsigned ne, unsigned nf)
1657 +{
1658 + u32 val;
1659 + /* calculate the register values
1660 + * 0->0, 1->1, 2->2, 4->3, 8->4 16->5...*/
1661 + ne = _log2(ne<<1) & 0x7;
1662 + nf = _log2(nf<<1) & 0x7;
1663 +
1664 + /* Mask out old watermarks */
1665 + val = queue_read_cfg_reg(queue) & ~0xfc000000;
1666 + queue_write_cfg_reg(queue, val | (ne << 26) | (nf << 29));
1667 +}
1668 +
1669 +int queue_set_irq_src(struct qm_queue *queue, int flag)
1670 +{
1671 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1672 + u32 reg;
1673 + int offs, bitoffs;
1674 +
1675 + /* Q 0-7 are in REG0, 8-15 are in REG1, etc. They occupy 4 bits/Q */
1676 + offs = queue->id/8 + INT0_SRC_SELREG0;
1677 + bitoffs = (queue->id % 8)*4;
1678 +
1679 + reg = *(qmgr->addr + offs) & ~(0xf << bitoffs);
1680 + *(qmgr->addr + offs) = reg | (flag << bitoffs);
1681 +
1682 + return 0;
1683 +}
1684 +
1685 +static irqreturn_t irq_qm1(int irq, void *dev_id)
1686 +{
1687 + struct qm_qmgr *qmgr = dev_id;
1688 + int offs, reg;
1689 + struct qm_queue *queue;
1690 +
1691 + if (poll_enable)
1692 + pmu_timer_restart();
1693 +
1694 + reg = *(qmgr->addr + QUE_INT_REG0);
1695 + while(reg) {
1696 + /*
1697 + * count leading zeros. "offs" gets
1698 + * the amount of leading 0 in "reg"
1699 + */
1700 + asm ("clz %0, %1;" : "=r"(offs) : "r"(reg));
1701 + offs = 31 - offs;
1702 + reg &= ~(1 << offs);
1703 + queue = qmgr->queues[offs];
1704 + if (likely(queue)) {
1705 + if (likely(queue->irq_cb)) {
1706 + queue->irq_cb(queue);
1707 + } else {
1708 + printk(KERN_ERR "Missing callback for Q %d\n",
1709 + offs);
1710 + }
1711 + } else {
1712 + printk(KERN_ERR "IRQ for unregistered Q %d\n", offs);
1713 + }
1714 + }
1715 + return IRQ_HANDLED;
1716 +}
1717 +
1718 +struct qm_queue *request_queue(int qid, int len)
1719 +{
1720 + int ram;
1721 + struct qm_qmgr *qmgr;
1722 + struct qm_queue *queue;
1723 +
1724 + if (!qmgr_dev)
1725 + return ERR_PTR(-ENODEV);
1726 +
1727 + if ((qid < 0) || (qid > MAX_QUEUES))
1728 + return ERR_PTR(-ERANGE);
1729 +
1730 + switch (len) {
1731 + case 16:
1732 + case 32:
1733 + case 64:
1734 + case 128: break;
1735 + default : return ERR_PTR(-EINVAL);
1736 + }
1737 +
1738 + qmgr = dev_get_drvdata(qmgr_dev);
1739 +
1740 + if (qmgr->queues[qid]) {
1741 + /* not an error, just in use already */
1742 + return NULL;
1743 + }
1744 + if ((ram = get_free_qspace(qmgr, len)) < 0) {
1745 + printk(KERN_ERR "No free SRAM space for this queue\n");
1746 + return ERR_PTR(-ENOMEM);
1747 + }
1748 + if (!(queue = kzalloc(sizeof(struct qm_queue), GFP_KERNEL)))
1749 + return ERR_PTR(-ENOMEM);
1750 +
1751 + if (!try_module_get(THIS_MODULE)) {
1752 + kfree(queue);
1753 + return ERR_PTR(-ENODEV);
1754 + }
1755 +
1756 + queue->addr = ram;
1757 + queue->len = len;
1758 + queue->id = qid;
1759 + queue->dev = get_device(qmgr_dev);
1760 + queue->acc_reg = qmgr->addr + (4 * qid);
1761 + qmgr->queues[qid] = queue;
1762 + if (request_pool(qmgr_dev, len)) {
1763 + printk(KERN_ERR "Failed to request DMA pool of Q %d\n", qid);
1764 + }
1765 +
1766 + conf_q_regs(queue);
1767 + return queue;
1768 +}
1769 +
1770 +void release_queue(struct qm_queue *queue)
1771 +{
1772 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1773 +
1774 + BUG_ON(qmgr->queues[queue->id] != queue);
1775 + qmgr->queues[queue->id] = NULL;
1776 +
1777 + if (free_pool(queue->dev, queue->len)) {
1778 + printk(KERN_ERR "Failed to release DMA pool of Q %d\n",
1779 + queue->id);
1780 + }
1781 + queue_disable_irq(queue);
1782 + queue_write_cfg_reg(queue, 0);
1783 +
1784 + module_put(THIS_MODULE);
1785 + put_device(queue->dev);
1786 + kfree(queue);
1787 +}
1788 +
1789 +
1790 +
1791 +
1792 +static int qmgr_probe(struct platform_device *pdev)
1793 +{
1794 + struct resource *res;
1795 + struct qm_qmgr *qmgr;
1796 + int size, ret=0, i;
1797 +
1798 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
1799 + return -EIO;
1800 +
1801 + if ((i = platform_get_irq(pdev, 0)) < 0)
1802 + return -EIO;
1803 +
1804 + if (!(qmgr = kzalloc(sizeof(struct qm_qmgr), GFP_KERNEL)))
1805 + return -ENOMEM;
1806 +
1807 + qmgr->irq = i;
1808 + size = res->end - res->start +1;
1809 + qmgr->res = request_mem_region(res->start, size, "ixp_qmgr");
1810 + if (!qmgr->res) {
1811 + ret = -EBUSY;
1812 + goto out_free;
1813 + }
1814 +
1815 + qmgr->addr = ioremap(res->start, size);
1816 + if (!qmgr->addr) {
1817 + ret = -ENOMEM;
1818 + goto out_rel;
1819 + }
1820 +
1821 + /* Reset Q registers */
1822 + for (i=0; i<4; i++)
1823 + *(qmgr->addr + QUE_LOW_STAT0 +i) = 0x33333333;
1824 + for (i=0; i<10; i++)
1825 + *(qmgr->addr + QUE_UO_STAT0 +i) = 0x0;
1826 + for (i=0; i<4; i++)
1827 + *(qmgr->addr + INT0_SRC_SELREG0 +i) = 0x0;
1828 + for (i=0; i<2; i++) {
1829 + *(qmgr->addr + QUE_IE_REG0 +i) = 0x00;
1830 + *(qmgr->addr + QUE_INT_REG0 +i) = 0xffffffff;
1831 + }
1832 + for (i=0; i<64; i++) {
1833 + *(qmgr->addr + IX_QMGR_QCFG_BASE + i) = 0x0;
1834 + }
1835 +
1836 + if (poll_enable) {
1837 + pmu_timer_init();
1838 + qmgr->irq = IRQ_IXP4XX_XSCALE_PMU;
1839 + }
1840 + ret = request_irq(qmgr->irq, irq_qm1, SA_SHIRQ | SA_INTERRUPT,
1841 + "qmgr", qmgr);
1842 + if (ret) {
1843 + printk(KERN_ERR "Failed to request IRQ(%d)\n", qmgr->irq);
1844 + ret = -EIO;
1845 + goto out_rel;
1846 + }
1847 + if (poll_enable)
1848 + pmu_timer_restart();
1849 +
1850 + rwlock_init(&qmgr->lock);
1851 + qmgr->dmapool = dma_pool_create("qmgr", &pdev->dev,
1852 + sizeof(struct npe_cont), 32, 0);
1853 + platform_set_drvdata(pdev, qmgr);
1854 +
1855 + qmgr_dev = &pdev->dev;
1856 +
1857 + printk(KERN_INFO IXQMGR_VERSION " initialized.\n");
1858 +
1859 + return 0;
1860 +
1861 +out_rel:
1862 + release_resource(qmgr->res);
1863 +out_free:
1864 + kfree(qmgr);
1865 + return ret;
1866 +}
1867 +
1868 +static int qmgr_remove(struct platform_device *pdev)
1869 +{
1870 + struct qm_qmgr *qmgr = platform_get_drvdata(pdev);
1871 + int i;
1872 +
1873 + for (i=0; i<MAX_QUEUES; i++) {
1874 + if (qmgr->queues[i]) {
1875 + printk(KERN_ERR "WARNING Unreleased Q: %d\n", i);
1876 + release_queue(qmgr->queues[i]);
1877 + }
1878 + }
1879 +
1880 + if (poll_enable)
1881 + pmu_timer_disable();
1882 +
1883 + synchronize_irq (qmgr->irq);
1884 + free_irq(qmgr->irq, qmgr);
1885 +
1886 + dma_pool_destroy(qmgr->dmapool);
1887 + iounmap(qmgr->addr);
1888 + release_resource(qmgr->res);
1889 + platform_set_drvdata(pdev, NULL);
1890 + qmgr_dev = NULL;
1891 + kfree(qmgr);
1892 + return 0;
1893 +}
1894 +
1895 +static struct platform_driver ixp4xx_qmgr = {
1896 + .driver.name = "ixp4xx_qmgr",
1897 + .probe = qmgr_probe,
1898 + .remove = qmgr_remove,
1899 +};
1900 +
1901 +
1902 +static int __init init_qmgr(void)
1903 +{
1904 + return platform_driver_register(&ixp4xx_qmgr);
1905 +}
1906 +
1907 +static void __exit finish_qmgr(void)
1908 +{
1909 + platform_driver_unregister(&ixp4xx_qmgr);
1910 +}
1911 +
1912 +module_init(init_qmgr);
1913 +module_exit(finish_qmgr);
1914 +
1915 +MODULE_LICENSE("GPL");
1916 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1917 +
1918 +EXPORT_SYMBOL(request_queue);
1919 +EXPORT_SYMBOL(release_queue);
1920 +EXPORT_SYMBOL(queue_set_irq_src);
1921 +EXPORT_SYMBOL(queue_set_watermarks);
1922 +EXPORT_SYMBOL(queue_len);
1923 Index: linux-2.6.21.7/drivers/net/ixp4xx/mac.h
1924 ===================================================================
1925 --- /dev/null
1926 +++ linux-2.6.21.7/drivers/net/ixp4xx/mac.h
1927 @@ -0,0 +1,275 @@
1928 +/*
1929 + * Copyright (C) 2002-2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1930 + *
1931 + * This file is released under the GPLv2
1932 + */
1933 +
1934 +#include <linux/resource.h>
1935 +#include <linux/netdevice.h>
1936 +#include <linux/io.h>
1937 +#include <linux/mii.h>
1938 +#include <linux/workqueue.h>
1939 +#include <asm/hardware.h>
1940 +#include <linux/ixp_qmgr.h>
1941 +
1942 +/* 32 bit offsets to be added to u32 *pointers */
1943 +#define MAC_TX_CNTRL1 0x00 // 0x000
1944 +#define MAC_TX_CNTRL2 0x01 // 0x004
1945 +#define MAC_RX_CNTRL1 0x04 // 0x010
1946 +#define MAC_RX_CNTRL2 0x05 // 0x014
1947 +#define MAC_RANDOM_SEED 0x08 // 0x020
1948 +#define MAC_THRESH_P_EMPTY 0x0c // 0x030
1949 +#define MAC_THRESH_P_FULL 0x0e // 0x038
1950 +#define MAC_BUF_SIZE_TX 0x10 // 0x040
1951 +#define MAC_TX_DEFER 0x14 // 0x050
1952 +#define MAC_RX_DEFER 0x15 // 0x054
1953 +#define MAC_TX_TWO_DEFER_1 0x18 // 0x060
1954 +#define MAC_TX_TWO_DEFER_2 0x19 // 0x064
1955 +#define MAC_SLOT_TIME 0x1c // 0x070
1956 +#define MAC_MDIO_CMD 0x20 // 0x080 4 registers 0x20 - 0x23
1957 +#define MAC_MDIO_STS 0x24 // 0x090 4 registers 0x24 - 0x27
1958 +#define MAC_ADDR_MASK 0x28 // 0x0A0 6 registers 0x28 - 0x2d
1959 +#define MAC_ADDR 0x30 // 0x0C0 6 registers 0x30 - 0x35
1960 +#define MAC_INT_CLK_THRESH 0x38 // 0x0E0 1 register
1961 +#define MAC_UNI_ADDR 0x3c // 0x0F0 6 registers 0x3c - 0x41
1962 +#define MAC_CORE_CNTRL 0x7f // 0x1fC
1963 +
1964 +/* TX Control Register 1*/
1965 +
1966 +#define TX_CNTRL1_TX_EN BIT(0)
1967 +#define TX_CNTRL1_DUPLEX BIT(1)
1968 +#define TX_CNTRL1_RETRY BIT(2)
1969 +#define TX_CNTRL1_PAD_EN BIT(3)
1970 +#define TX_CNTRL1_FCS_EN BIT(4)
1971 +#define TX_CNTRL1_2DEFER BIT(5)
1972 +#define TX_CNTRL1_RMII BIT(6)
1973 +
1974 +/* TX Control Register 2 */
1975 +#define TX_CNTRL2_RETRIES_MASK 0xf
1976 +
1977 +/* RX Control Register 1 */
1978 +#define RX_CNTRL1_RX_EN BIT(0)
1979 +#define RX_CNTRL1_PADSTRIP_EN BIT(1)
1980 +#define RX_CNTRL1_CRC_EN BIT(2)
1981 +#define RX_CNTRL1_PAUSE_EN BIT(3)
1982 +#define RX_CNTRL1_LOOP_EN BIT(4)
1983 +#define RX_CNTRL1_ADDR_FLTR_EN BIT(5)
1984 +#define RX_CNTRL1_RX_RUNT_EN BIT(6)
1985 +#define RX_CNTRL1_BCAST_DIS BIT(7)
1986 +
1987 +/* RX Control Register 2 */
1988 +#define RX_CNTRL2_DEFER_EN BIT(0)
1989 +
1990 +/* Core Control Register */
1991 +#define CORE_RESET BIT(0)
1992 +#define CORE_RX_FIFO_FLUSH BIT(1)
1993 +#define CORE_TX_FIFO_FLUSH BIT(2)
1994 +#define CORE_SEND_JAM BIT(3)
1995 +#define CORE_MDC_EN BIT(4)
1996 +
1997 +/* Definitions for MII access routines*/
1998 +
1999 +#define MII_REG_SHL 16
2000 +#define MII_ADDR_SHL 21
2001 +
2002 +#define MII_GO BIT(31)
2003 +#define MII_WRITE BIT(26)
2004 +#define MII_READ_FAIL BIT(31)
2005 +
2006 +#define MII_TIMEOUT_10TH_SECS 5
2007 +#define MII_10TH_SEC_IN_MILLIS 100
2008 +
2009 +/*
2010 + *
2011 + * Default values
2012 + *
2013 + */
2014 +
2015 +#define MAC_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
2016 +
2017 +#define MAC_TX_CNTRL1_DEFAULT (\
2018 + TX_CNTRL1_TX_EN | \
2019 + TX_CNTRL1_RETRY | \
2020 + TX_CNTRL1_FCS_EN | \
2021 + TX_CNTRL1_2DEFER | \
2022 + TX_CNTRL1_PAD_EN )
2023 +
2024 +#define MAC_TX_MAX_RETRIES_DEFAULT 0x0f
2025 +
2026 +#define MAC_RX_CNTRL1_DEFAULT ( \
2027 + RX_CNTRL1_PADSTRIP_EN | \
2028 + RX_CNTRL1_CRC_EN | \
2029 + RX_CNTRL1_RX_EN )
2030 +
2031 +#define MAC_RX_CNTRL2_DEFAULT 0x0
2032 +#define MAC_TX_CNTRL2_DEFAULT TX_CNTRL2_RETRIES_MASK
2033 +
2034 +/* Thresholds determined by NPE firmware FS */
2035 +#define MAC_THRESH_P_EMPTY_DEFAULT 0x12
2036 +#define MAC_THRESH_P_FULL_DEFAULT 0x30
2037 +
2038 +/* Number of bytes that must be in the tx fifo before
2039 + * transmission commences */
2040 +#define MAC_BUF_SIZE_TX_DEFAULT 0x8
2041 +
2042 +/* One-part deferral values */
2043 +#define MAC_TX_DEFER_DEFAULT 0x15
2044 +#define MAC_RX_DEFER_DEFAULT 0x16
2045 +
2046 +/* Two-part deferral values... */
2047 +#define MAC_TX_TWO_DEFER_1_DEFAULT 0x08
2048 +#define MAC_TX_TWO_DEFER_2_DEFAULT 0x07
2049 +
2050 +/* This value applies to MII */
2051 +#define MAC_SLOT_TIME_DEFAULT 0x80
2052 +
2053 +/* This value applies to RMII */
2054 +#define MAC_SLOT_TIME_RMII_DEFAULT 0xFF
2055 +
2056 +#define MAC_ADDR_MASK_DEFAULT 0xFF
2057 +
2058 +#define MAC_INT_CLK_THRESH_DEFAULT 0x1
2059 +/* The following is a value chosen at random */
2060 +#define MAC_RANDOM_SEED_DEFAULT 0x8
2061 +
2062 +/* By default we must configure the MAC to generate the MDC clock*/
2063 +#define CORE_DEFAULT (CORE_MDC_EN)
2064 +
2065 +/* End of Intel provided register information */
2066 +
2067 +extern int
2068 +mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg);
2069 +extern void
2070 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val);
2071 +extern void init_mdio(struct net_device *dev, int phy_id);
2072 +
2073 +struct mac_info {
2074 + u32 __iomem *addr;
2075 + struct resource *res;
2076 + struct device *npe_dev;
2077 + struct net_device *netdev;
2078 + struct qm_qmgr *qmgr;
2079 + struct qm_queue *rxq;
2080 + struct qm_queue *txq;
2081 + struct qm_queue *rxdoneq;
2082 + u32 irqflags;
2083 + struct net_device_stats stat;
2084 + struct mii_if_info mii;
2085 + struct delayed_work mdio_thread;
2086 + int rxq_pkt;
2087 + int txq_pkt;
2088 + int unloading;
2089 + struct mac_plat_info *plat;
2090 + int npe_stat_num;
2091 + spinlock_t rx_lock;
2092 + u32 msg_enable;
2093 +};
2094 +
2095 +static inline void mac_write_reg(struct mac_info *mac, int offset, u32 val)
2096 +{
2097 + *(mac->addr + offset) = val;
2098 +}
2099 +static inline u32 mac_read_reg(struct mac_info *mac, int offset)
2100 +{
2101 + return *(mac->addr + offset);
2102 +}
2103 +static inline void mac_set_regbit(struct mac_info *mac, int offset, u32 bit)
2104 +{
2105 + mac_write_reg(mac, offset, mac_read_reg(mac, offset) | bit);
2106 +}
2107 +static inline void mac_reset_regbit(struct mac_info *mac, int offset, u32 bit)
2108 +{
2109 + mac_write_reg(mac, offset, mac_read_reg(mac, offset) & ~bit);
2110 +}
2111 +
2112 +static inline void mac_mdio_cmd_write(struct mac_info *mac, u32 cmd)
2113 +{
2114 + int i;
2115 + for(i=0; i<4; i++) {
2116 + mac_write_reg(mac, MAC_MDIO_CMD + i, cmd & 0xff);
2117 + cmd >>=8;
2118 + }
2119 +}
2120 +
2121 +#define mac_mdio_cmd_read(mac) mac_mdio_read((mac), MAC_MDIO_CMD)
2122 +#define mac_mdio_status_read(mac) mac_mdio_read((mac), MAC_MDIO_STS)
2123 +static inline u32 mac_mdio_read(struct mac_info *mac, int offset)
2124 +{
2125 + int i;
2126 + u32 data = 0;
2127 + for(i=0; i<4; i++) {
2128 + data |= (mac_read_reg(mac, offset + i) & 0xff) << (i*8);
2129 + }
2130 + return data;
2131 +}
2132 +
2133 +static inline u32 mdio_cmd(int phy_addr, int phy_reg)
2134 +{
2135 + return phy_addr << MII_ADDR_SHL |
2136 + phy_reg << MII_REG_SHL |
2137 + MII_GO;
2138 +}
2139 +
2140 +#define MAC_REG_LIST { \
2141 + MAC_TX_CNTRL1, MAC_TX_CNTRL2, \
2142 + MAC_RX_CNTRL1, MAC_RX_CNTRL2, \
2143 + MAC_RANDOM_SEED, MAC_THRESH_P_EMPTY, MAC_THRESH_P_FULL, \
2144 + MAC_BUF_SIZE_TX, MAC_TX_DEFER, MAC_RX_DEFER, \
2145 + MAC_TX_TWO_DEFER_1, MAC_TX_TWO_DEFER_2, MAC_SLOT_TIME, \
2146 + MAC_ADDR_MASK +0, MAC_ADDR_MASK +1, MAC_ADDR_MASK +2, \
2147 + MAC_ADDR_MASK +3, MAC_ADDR_MASK +4, MAC_ADDR_MASK +5, \
2148 + MAC_ADDR +0, MAC_ADDR +1, MAC_ADDR +2, \
2149 + MAC_ADDR +3, MAC_ADDR +4, MAC_ADDR +5, \
2150 + MAC_INT_CLK_THRESH, \
2151 + MAC_UNI_ADDR +0, MAC_UNI_ADDR +1, MAC_UNI_ADDR +2, \
2152 + MAC_UNI_ADDR +3, MAC_UNI_ADDR +4, MAC_UNI_ADDR +5, \
2153 + MAC_CORE_CNTRL \
2154 +}
2155 +
2156 +#define NPE_STAT_NUM 34
2157 +#define NPE_STAT_NUM_BASE 22
2158 +#define NPE_Q_STAT_NUM 4
2159 +
2160 +#define NPE_Q_STAT_STRINGS \
2161 + {"RX ready to use queue len "}, \
2162 + {"RX received queue len "}, \
2163 + {"TX to be send queue len "}, \
2164 + {"TX done queue len "},
2165 +
2166 +#define NPE_STAT_STRINGS \
2167 + {"StatsAlignmentErrors "}, \
2168 + {"StatsFCSErrors "}, \
2169 + {"StatsInternalMacReceiveErrors "}, \
2170 + {"RxOverrunDiscards "}, \
2171 + {"RxLearnedEntryDiscards "}, \
2172 + {"RxLargeFramesDiscards "}, \
2173 + {"RxSTPBlockedDiscards "}, \
2174 + {"RxVLANTypeFilterDiscards "}, \
2175 + {"RxVLANIdFilterDiscards "}, \
2176 + {"RxInvalidSourceDiscards "}, \
2177 + {"RxBlackListDiscards "}, \
2178 + {"RxWhiteListDiscards "}, \
2179 + {"RxUnderflowEntryDiscards "}, \
2180 + {"StatsSingleCollisionFrames "}, \
2181 + {"StatsMultipleCollisionFrames "}, \
2182 + {"StatsDeferredTransmissions "}, \
2183 + {"StatsLateCollisions "}, \
2184 + {"StatsExcessiveCollsions "}, \
2185 + {"StatsInternalMacTransmitErrors"}, \
2186 + {"StatsCarrierSenseErrors "}, \
2187 + {"TxLargeFrameDiscards "}, \
2188 + {"TxVLANIdFilterDiscards "}, \
2189 +\
2190 + {"RxValidFramesTotalOctets "}, \
2191 + {"RxUcastPkts "}, \
2192 + {"RxBcastPkts "}, \
2193 + {"RxMcastPkts "}, \
2194 + {"RxPkts64Octets "}, \
2195 + {"RxPkts65to127Octets "}, \
2196 + {"RxPkts128to255Octets "}, \
2197 + {"RxPkts256to511Octets "}, \
2198 + {"RxPkts512to1023Octets "}, \
2199 + {"RxPkts1024to1518Octets "}, \
2200 + {"RxInternalNPEReceiveErrors "}, \
2201 + {"TxInternalNPETransmitErrors "}
2202 +
2203 Index: linux-2.6.21.7/drivers/net/ixp4xx/mac_driver.c
2204 ===================================================================
2205 --- /dev/null
2206 +++ linux-2.6.21.7/drivers/net/ixp4xx/mac_driver.c
2207 @@ -0,0 +1,850 @@
2208 +/*
2209 + * mac_driver.c - provide a network interface for each MAC
2210 + *
2211 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
2212 + *
2213 + * This file is released under the GPLv2
2214 + */
2215 +
2216 +#include <linux/kernel.h>
2217 +#include <linux/module.h>
2218 +#include <linux/platform_device.h>
2219 +#include <linux/netdevice.h>
2220 +#include <linux/etherdevice.h>
2221 +#include <linux/ethtool.h>
2222 +#include <linux/slab.h>
2223 +#include <linux/delay.h>
2224 +#include <linux/err.h>
2225 +#include <linux/dma-mapping.h>
2226 +#include <linux/workqueue.h>
2227 +#include <asm/io.h>
2228 +#include <asm/irq.h>
2229 +
2230 +
2231 +#include <linux/ixp_qmgr.h>
2232 +#include <linux/ixp_npe.h>
2233 +#include "mac.h"
2234 +
2235 +#define MDIO_INTERVAL (3*HZ)
2236 +#define RX_QUEUE_PREFILL 64
2237 +#define TX_QUEUE_PREFILL 16
2238 +
2239 +#define IXMAC_NAME "ixp4xx_mac"
2240 +#define IXMAC_VERSION "0.3.1"
2241 +
2242 +#define MAC_DEFAULT_REG(mac, name) \
2243 + mac_write_reg(mac, MAC_ ## name, MAC_ ## name ## _DEFAULT)
2244 +
2245 +#define TX_DONE_QID 31
2246 +
2247 +#define DMA_ALLOC_SIZE 2048
2248 +#define DMA_HDR_SIZE (sizeof(struct npe_cont))
2249 +#define DMA_BUF_SIZE (DMA_ALLOC_SIZE - DMA_HDR_SIZE)
2250 +
2251 +/* Since the NPEs use 1 Return Q for sent frames, we need a device
2252 + * independent return Q. We call it tx_doneq.
2253 + * It will be initialized during module load and uninitialized
2254 + * during module unload. Evil hack, but there is no choice :-(
2255 + */
2256 +
2257 +static struct qm_queue *tx_doneq = NULL;
2258 +static int debug = -1;
2259 +module_param(debug, int, 0);
2260 +
2261 +static int init_buffer(struct qm_queue *queue, int count)
2262 +{
2263 + int i;
2264 + struct npe_cont *cont;
2265 +
2266 + for (i=0; i<count; i++) {
2267 + cont = kmalloc(DMA_ALLOC_SIZE, GFP_KERNEL | GFP_DMA);
2268 + if (!cont)
2269 + goto err;
2270 +
2271 + cont->phys = dma_map_single(queue->dev, cont, DMA_ALLOC_SIZE,
2272 + DMA_BIDIRECTIONAL);
2273 + if (dma_mapping_error(cont->phys))
2274 + goto err;
2275 +
2276 + cont->data = cont+1;
2277 + /* now the buffer is on a 32 bit boundary.
2278 + * we add 2 bytes for good alignment to SKB */
2279 + cont->data+=2;
2280 + cont->eth.next = 0;
2281 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2282 + cont->eth.pkt_len = 0;
2283 + /* also add 2 alignment bytes from cont->data*/
2284 + cont->eth.phys_addr = cpu_to_npe32(cont->phys+ DMA_HDR_SIZE+ 2);
2285 +
2286 + dma_sync_single(queue->dev, cont->phys, DMA_HDR_SIZE,
2287 + DMA_TO_DEVICE);
2288 +
2289 + queue_put_entry(queue, cont->phys);
2290 + if (queue_stat(queue) == 2) { /* overflow */
2291 + dma_unmap_single(queue->dev, cont->phys, DMA_ALLOC_SIZE,
2292 + DMA_BIDIRECTIONAL);
2293 + goto err;
2294 + }
2295 + }
2296 + return i;
2297 +err:
2298 + if (cont)
2299 + kfree(cont);
2300 + return i;
2301 +}
2302 +
2303 +static int destroy_buffer(struct qm_queue *queue, int count)
2304 +{
2305 + u32 phys;
2306 + int i;
2307 + struct npe_cont *cont;
2308 +
2309 + for (i=0; i<count; i++) {
2310 + phys = queue_get_entry(queue) & ~0xf;
2311 + if (!phys)
2312 + break;
2313 + dma_unmap_single(queue->dev, phys, DMA_ALLOC_SIZE,
2314 + DMA_BIDIRECTIONAL);
2315 + cont = dma_to_virt(queue->dev, phys);
2316 + kfree(cont);
2317 + }
2318 + return i;
2319 +}
2320 +
2321 +static void mac_init(struct mac_info *mac)
2322 +{
2323 + MAC_DEFAULT_REG(mac, TX_CNTRL2);
2324 + MAC_DEFAULT_REG(mac, RANDOM_SEED);
2325 + MAC_DEFAULT_REG(mac, THRESH_P_EMPTY);
2326 + MAC_DEFAULT_REG(mac, THRESH_P_FULL);
2327 + MAC_DEFAULT_REG(mac, TX_DEFER);
2328 + MAC_DEFAULT_REG(mac, TX_TWO_DEFER_1);
2329 + MAC_DEFAULT_REG(mac, TX_TWO_DEFER_2);
2330 + MAC_DEFAULT_REG(mac, SLOT_TIME);
2331 + MAC_DEFAULT_REG(mac, INT_CLK_THRESH);
2332 + MAC_DEFAULT_REG(mac, BUF_SIZE_TX);
2333 + MAC_DEFAULT_REG(mac, TX_CNTRL1);
2334 + MAC_DEFAULT_REG(mac, RX_CNTRL1);
2335 +}
2336 +
2337 +static void mac_set_uniaddr(struct net_device *dev)
2338 +{
2339 + int i;
2340 + struct mac_info *mac = netdev_priv(dev);
2341 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2342 +
2343 + /* check for multicast */
2344 + if (dev->dev_addr[0] & 1)
2345 + return;
2346 +
2347 + npe_mh_setportaddr(npe, mac->plat, dev->dev_addr);
2348 + npe_mh_disable_firewall(npe, mac->plat);
2349 + for (i=0; i<dev->addr_len; i++)
2350 + mac_write_reg(mac, MAC_UNI_ADDR + i, dev->dev_addr[i]);
2351 +}
2352 +
2353 +static void update_duplex_mode(struct net_device *dev)
2354 +{
2355 + struct mac_info *mac = netdev_priv(dev);
2356 + if (netif_msg_link(mac)) {
2357 + printk(KERN_DEBUG "Link of %s is %s-duplex\n", dev->name,
2358 + mac->mii.full_duplex ? "full" : "half");
2359 + }
2360 + if (mac->mii.full_duplex) {
2361 + mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
2362 + } else {
2363 + mac_set_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
2364 + }
2365 +}
2366 +
2367 +static int media_check(struct net_device *dev, int init)
2368 +{
2369 + struct mac_info *mac = netdev_priv(dev);
2370 +
2371 + if (mii_check_media(&mac->mii, netif_msg_link(mac), init)) {
2372 + update_duplex_mode(dev);
2373 + return 1;
2374 + }
2375 + return 0;
2376 +}
2377 +
2378 +static void get_npe_stats(struct mac_info *mac, u32 *buf, int len, int reset)
2379 +{
2380 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2381 + u32 phys;
2382 +
2383 + memset(buf, len, 0);
2384 + phys = dma_map_single(mac->npe_dev, buf, len, DMA_BIDIRECTIONAL);
2385 + npe_mh_get_stats(npe, mac->plat, phys, reset);
2386 + dma_unmap_single(mac->npe_dev, phys, len, DMA_BIDIRECTIONAL);
2387 +}
2388 +
2389 +static void irqcb_recv(struct qm_queue *queue)
2390 +{
2391 + struct net_device *dev = queue->cb_data;
2392 +
2393 + queue_ack_irq(queue);
2394 + queue_disable_irq(queue);
2395 + if (netif_running(dev))
2396 + netif_rx_schedule(dev);
2397 +}
2398 +
2399 +int ix_recv(struct net_device *dev, int *budget, struct qm_queue *queue)
2400 +{
2401 + struct mac_info *mac = netdev_priv(dev);
2402 + struct sk_buff *skb;
2403 + u32 phys;
2404 + struct npe_cont *cont;
2405 +
2406 + while (*budget > 0 && netif_running(dev) ) {
2407 + int len;
2408 + phys = queue_get_entry(queue) & ~0xf;
2409 + if (!phys)
2410 + break;
2411 + dma_sync_single(queue->dev, phys, DMA_HDR_SIZE,
2412 + DMA_FROM_DEVICE);
2413 + cont = dma_to_virt(queue->dev, phys);
2414 + len = npe_to_cpu16(cont->eth.pkt_len) -4; /* strip FCS */
2415 +
2416 + if (unlikely(netif_msg_rx_status(mac))) {
2417 + printk(KERN_DEBUG "%s: RX packet size: %u\n",
2418 + dev->name, len);
2419 + queue_state(mac->rxq);
2420 + queue_state(mac->rxdoneq);
2421 + }
2422 + skb = dev_alloc_skb(len + 2);
2423 + if (likely(skb)) {
2424 + skb->dev = dev;
2425 + skb_reserve(skb, 2);
2426 + dma_sync_single(queue->dev, cont->eth.phys_addr, len,
2427 + DMA_FROM_DEVICE);
2428 +#ifdef CONFIG_NPE_ADDRESS_COHERENT
2429 + /* swap the payload of the SKB */
2430 + {
2431 + u32 *t = (u32*)(skb->data-2);
2432 + u32 *s = (u32*)(cont->data-2);
2433 + int i, j = (len+5)/4;
2434 + for (i=0; i<j; i++)
2435 + t[i] = cpu_to_be32(s[i]);
2436 + }
2437 +#else
2438 + eth_copy_and_sum(skb, cont->data, len, 0);
2439 +#endif
2440 + skb_put(skb, len);
2441 + skb->protocol = eth_type_trans(skb, dev);
2442 + dev->last_rx = jiffies;
2443 + netif_receive_skb(skb);
2444 + mac->stat.rx_packets++;
2445 + mac->stat.rx_bytes += skb->len;
2446 + } else {
2447 + mac->stat.rx_dropped++;
2448 + }
2449 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2450 + cont->eth.pkt_len = 0;
2451 + dma_sync_single(queue->dev, phys, DMA_HDR_SIZE, DMA_TO_DEVICE);
2452 + queue_put_entry(mac->rxq, phys);
2453 + dev->quota--;
2454 + (*budget)--;
2455 + }
2456 +
2457 + return !budget;
2458 +}
2459 +
2460 +static int ix_poll(struct net_device *dev, int *budget)
2461 +{
2462 + struct mac_info *mac = netdev_priv(dev);
2463 + struct qm_queue *queue = mac->rxdoneq;
2464 +
2465 + for (;;) {
2466 + if (ix_recv(dev, budget, queue))
2467 + return 1;
2468 + netif_rx_complete(dev);
2469 + queue_enable_irq(queue);
2470 + if (!queue_len(queue))
2471 + break;
2472 + queue_disable_irq(queue);
2473 + if (netif_rx_reschedule(dev, 0))
2474 + break;
2475 + }
2476 + return 0;
2477 +}
2478 +
2479 +static void ixmac_set_rx_mode (struct net_device *dev)
2480 +{
2481 + struct mac_info *mac = netdev_priv(dev);
2482 + struct dev_mc_list *mclist;
2483 + u8 aset[dev->addr_len], aclear[dev->addr_len];
2484 + int i,j;
2485 +
2486 + if (dev->flags & IFF_PROMISC) {
2487 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
2488 + } else {
2489 + mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
2490 +
2491 + mclist = dev->mc_list;
2492 + memset(aset, 0xff, dev->addr_len);
2493 + memset(aclear, 0x00, dev->addr_len);
2494 + for (i = 0; mclist && i < dev->mc_count; i++) {
2495 + for (j=0; j< dev->addr_len; j++) {
2496 + aset[j] &= mclist->dmi_addr[j];
2497 + aclear[j] |= mclist->dmi_addr[j];
2498 + }
2499 + mclist = mclist->next;
2500 + }
2501 + for (j=0; j< dev->addr_len; j++) {
2502 + aclear[j] = aset[j] | ~aclear[j];
2503 + }
2504 + for (i=0; i<dev->addr_len; i++) {
2505 + mac_write_reg(mac, MAC_ADDR + i, aset[i]);
2506 + mac_write_reg(mac, MAC_ADDR_MASK + i, aclear[i]);
2507 + }
2508 + }
2509 +}
2510 +
2511 +static int ixmac_open (struct net_device *dev)
2512 +{
2513 + struct mac_info *mac = netdev_priv(dev);
2514 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2515 + u32 buf[NPE_STAT_NUM];
2516 + int i;
2517 + u32 phys;
2518 +
2519 + /* first check if the NPE is up and running */
2520 + if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
2521 + printk(KERN_ERR "%s: %s not running\n", dev->name,
2522 + npe->plat->name);
2523 + return -EIO;
2524 + }
2525 + if (npe_mh_status(npe)) {
2526 + printk(KERN_ERR "%s: %s not responding\n", dev->name,
2527 + npe->plat->name);
2528 + return -EIO;
2529 + }
2530 + mac->txq_pkt += init_buffer(mac->txq, TX_QUEUE_PREFILL - mac->txq_pkt);
2531 + mac->rxq_pkt += init_buffer(mac->rxq, RX_QUEUE_PREFILL - mac->rxq_pkt);
2532 +
2533 + queue_enable_irq(mac->rxdoneq);
2534 +
2535 + /* drain all buffers from then RX-done-q to make the IRQ happen */
2536 + while ((phys = queue_get_entry(mac->rxdoneq) & ~0xf)) {
2537 + struct npe_cont *cont;
2538 + cont = dma_to_virt(mac->rxdoneq->dev, phys);
2539 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2540 + cont->eth.pkt_len = 0;
2541 + dma_sync_single(mac->rxdoneq->dev, phys, DMA_HDR_SIZE,
2542 + DMA_TO_DEVICE);
2543 + queue_put_entry(mac->rxq, phys);
2544 + }
2545 + mac_init(mac);
2546 + npe_mh_set_rxqid(npe, mac->plat, mac->plat->rxdoneq_id);
2547 + get_npe_stats(mac, buf, sizeof(buf), 1); /* reset stats */
2548 + get_npe_stats(mac, buf, sizeof(buf), 0);
2549 + /*
2550 + * if the extended stats contain random values
2551 + * the NPE image lacks extendet statistic counters
2552 + */
2553 + for (i=NPE_STAT_NUM_BASE; i<NPE_STAT_NUM; i++) {
2554 + if (buf[i] >10000)
2555 + break;
2556 + }
2557 + mac->npe_stat_num = i<NPE_STAT_NUM ? NPE_STAT_NUM_BASE : NPE_STAT_NUM;
2558 + mac->npe_stat_num += NPE_Q_STAT_NUM;
2559 +
2560 + mac_set_uniaddr(dev);
2561 + media_check(dev, 1);
2562 + ixmac_set_rx_mode(dev);
2563 + netif_start_queue(dev);
2564 + schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
2565 + if (netif_msg_ifup(mac)) {
2566 + printk(KERN_DEBUG "%s: open " IXMAC_NAME
2567 + " RX queue %d bufs, TX queue %d bufs\n",
2568 + dev->name, mac->rxq_pkt, mac->txq_pkt);
2569 + }
2570 + return 0;
2571 +}
2572 +
2573 +static int ixmac_start_xmit (struct sk_buff *skb, struct net_device *dev)
2574 +{
2575 + struct mac_info *mac = netdev_priv(dev);
2576 + struct npe_cont *cont;
2577 + u32 phys;
2578 + struct qm_queue *queue = mac->txq;
2579 +
2580 + if (unlikely(skb->len > DMA_BUF_SIZE)) {
2581 + dev_kfree_skb(skb);
2582 + mac->stat.tx_errors++;
2583 + return NETDEV_TX_OK;
2584 + }
2585 + phys = queue_get_entry(tx_doneq) & ~0xf;
2586 + if (!phys)
2587 + goto busy;
2588 + cont = dma_to_virt(queue->dev, phys);
2589 +#ifdef CONFIG_NPE_ADDRESS_COHERENT
2590 + /* swap the payload of the SKB */
2591 + {
2592 + u32 *s = (u32*)(skb->data-2);
2593 + u32 *t = (u32*)(cont->data-2);
2594 + int i,j = (skb->len+5) / 4;
2595 + for (i=0; i<j; i++)
2596 + t[i] = cpu_to_be32(s[i]);
2597 + }
2598 +#else
2599 + //skb_copy_and_csum_dev(skb, cont->data);
2600 + memcpy(cont->data, skb->data, skb->len);
2601 +#endif
2602 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2603 + cont->eth.pkt_len = cpu_to_npe16(skb->len);
2604 + /* disable VLAN functions in NPE image for now */
2605 + cont->eth.flags = 0;
2606 + dma_sync_single(queue->dev, phys, skb->len + DMA_HDR_SIZE,
2607 + DMA_TO_DEVICE);
2608 + queue_put_entry(queue, phys);
2609 + if (queue_stat(queue) == 2) { /* overflow */
2610 + queue_put_entry(tx_doneq, phys);
2611 + goto busy;
2612 + }
2613 + dev_kfree_skb(skb);
2614 +
2615 + mac->stat.tx_packets++;
2616 + mac->stat.tx_bytes += skb->len;
2617 + dev->trans_start = jiffies;
2618 + if (netif_msg_tx_queued(mac)) {
2619 + printk(KERN_DEBUG "%s: TX packet size %u\n",
2620 + dev->name, skb->len);
2621 + queue_state(mac->txq);
2622 + queue_state(tx_doneq);
2623 + }
2624 + return NETDEV_TX_OK;
2625 +busy:
2626 + return NETDEV_TX_BUSY;
2627 +}
2628 +
2629 +static int ixmac_close (struct net_device *dev)
2630 +{
2631 + struct mac_info *mac = netdev_priv(dev);
2632 +
2633 + netif_stop_queue (dev);
2634 + queue_disable_irq(mac->rxdoneq);
2635 +
2636 + mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
2637 + mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
2638 +
2639 + cancel_rearming_delayed_work(&(mac->mdio_thread));
2640 +
2641 + if (netif_msg_ifdown(mac)) {
2642 + printk(KERN_DEBUG "%s: close " IXMAC_NAME
2643 + " RX queue %d bufs, TX queue %d bufs\n",
2644 + dev->name, mac->rxq_pkt, mac->txq_pkt);
2645 + }
2646 + return 0;
2647 +}
2648 +
2649 +static int ixmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2650 +{
2651 + struct mac_info *mac = netdev_priv(dev);
2652 + int rc, duplex_changed;
2653 +
2654 + if (!netif_running(dev))
2655 + return -EINVAL;
2656 + if (!try_module_get(THIS_MODULE))
2657 + return -ENODEV;
2658 + rc = generic_mii_ioctl(&mac->mii, if_mii(rq), cmd, &duplex_changed);
2659 + module_put(THIS_MODULE);
2660 + if (duplex_changed)
2661 + update_duplex_mode(dev);
2662 + return rc;
2663 +}
2664 +
2665 +static struct net_device_stats *ixmac_stats (struct net_device *dev)
2666 +{
2667 + struct mac_info *mac = netdev_priv(dev);
2668 + return &mac->stat;
2669 +}
2670 +
2671 +static void ixmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2672 +{
2673 + struct mac_info *mac = netdev_priv(dev);
2674 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2675 +
2676 + strcpy(info->driver, IXMAC_NAME);
2677 + strcpy(info->version, IXMAC_VERSION);
2678 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2679 + snprintf(info->fw_version, 32, "%d.%d func [%d]",
2680 + npe->img_info[2], npe->img_info[3], npe->img_info[1]);
2681 + }
2682 + strncpy(info->bus_info, npe->plat->name, ETHTOOL_BUSINFO_LEN);
2683 +}
2684 +
2685 +static int ixmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2686 +{
2687 + struct mac_info *mac = netdev_priv(dev);
2688 + mii_ethtool_gset(&mac->mii, cmd);
2689 + return 0;
2690 +}
2691 +
2692 +static int ixmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2693 +{
2694 + struct mac_info *mac = netdev_priv(dev);
2695 + int rc;
2696 + rc = mii_ethtool_sset(&mac->mii, cmd);
2697 + return rc;
2698 +}
2699 +
2700 +static int ixmac_nway_reset(struct net_device *dev)
2701 +{
2702 + struct mac_info *mac = netdev_priv(dev);
2703 + return mii_nway_restart(&mac->mii);
2704 +}
2705 +
2706 +static u32 ixmac_get_link(struct net_device *dev)
2707 +{
2708 + struct mac_info *mac = netdev_priv(dev);
2709 + return mii_link_ok(&mac->mii);
2710 +}
2711 +
2712 +static const int mac_reg_list[] = MAC_REG_LIST;
2713 +
2714 +static int ixmac_get_regs_len(struct net_device *dev)
2715 +{
2716 + return ARRAY_SIZE(mac_reg_list);
2717 +}
2718 +
2719 +static void
2720 +ixmac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
2721 +{
2722 + int i;
2723 + struct mac_info *mac = netdev_priv(dev);
2724 + u8 *buf = regbuf;
2725 +
2726 + for (i=0; i<regs->len; i++) {
2727 + buf[i] = mac_read_reg(mac, mac_reg_list[i]);
2728 + }
2729 +}
2730 +
2731 +static struct {
2732 + const char str[ETH_GSTRING_LEN];
2733 +} ethtool_stats_keys[NPE_STAT_NUM + NPE_Q_STAT_NUM] = {
2734 + NPE_Q_STAT_STRINGS
2735 + NPE_STAT_STRINGS
2736 +};
2737 +
2738 +static void ixmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2739 +{
2740 + struct mac_info *mac = netdev_priv(dev);
2741 + memcpy(data, ethtool_stats_keys, mac->npe_stat_num * ETH_GSTRING_LEN);
2742 +}
2743 +
2744 +static int ixmac_get_stats_count(struct net_device *dev)
2745 +{
2746 + struct mac_info *mac = netdev_priv(dev);
2747 + return mac->npe_stat_num;
2748 +}
2749 +
2750 +static u32 ixmac_get_msglevel(struct net_device *dev)
2751 +{
2752 + struct mac_info *mac = netdev_priv(dev);
2753 + return mac->msg_enable;
2754 +}
2755 +
2756 +static void ixmac_set_msglevel(struct net_device *dev, u32 datum)
2757 +{
2758 + struct mac_info *mac = netdev_priv(dev);
2759 + mac->msg_enable = datum;
2760 +}
2761 +
2762 +static void ixmac_get_ethtool_stats(struct net_device *dev,
2763 + struct ethtool_stats *stats, u64 *data)
2764 +{
2765 + int i;
2766 + struct mac_info *mac = netdev_priv(dev);
2767 + u32 buf[NPE_STAT_NUM];
2768 +
2769 + data[0] = queue_len(mac->rxq);
2770 + data[1] = queue_len(mac->rxdoneq);
2771 + data[2] = queue_len(mac->txq);
2772 + data[3] = queue_len(tx_doneq);
2773 +
2774 + get_npe_stats(mac, buf, sizeof(buf), 0);
2775 +
2776 + for (i=0; i<stats->n_stats-4; i++) {
2777 + data[i+4] = npe_to_cpu32(buf[i]);
2778 + }
2779 +}
2780 +
2781 +static struct ethtool_ops ixmac_ethtool_ops = {
2782 + .get_drvinfo = ixmac_get_drvinfo,
2783 + .get_settings = ixmac_get_settings,
2784 + .set_settings = ixmac_set_settings,
2785 + .nway_reset = ixmac_nway_reset,
2786 + .get_link = ixmac_get_link,
2787 + .get_msglevel = ixmac_get_msglevel,
2788 + .set_msglevel = ixmac_set_msglevel,
2789 + .get_regs_len = ixmac_get_regs_len,
2790 + .get_regs = ixmac_get_regs,
2791 + .get_perm_addr = ethtool_op_get_perm_addr,
2792 + .get_strings = ixmac_get_strings,
2793 + .get_stats_count = ixmac_get_stats_count,
2794 + .get_ethtool_stats = ixmac_get_ethtool_stats,
2795 +};
2796 +
2797 +static void mac_mdio_thread(struct work_struct *work)
2798 +{
2799 + struct mac_info *mac = container_of(work, struct mac_info,
2800 + mdio_thread.work);
2801 + struct net_device *dev = mac->netdev;
2802 +
2803 + media_check(dev, 0);
2804 + schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
2805 +}
2806 +
2807 +static int mac_probe(struct platform_device *pdev)
2808 +{
2809 + struct resource *res;
2810 + struct mac_info *mac;
2811 + struct net_device *dev;
2812 + struct npe_info *npe;
2813 + struct mac_plat_info *plat = pdev->dev.platform_data;
2814 + int size, ret;
2815 +
2816 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) {
2817 + return -EIO;
2818 + }
2819 + if (!(dev = alloc_etherdev (sizeof(struct mac_info)))) {
2820 + return -ENOMEM;
2821 + }
2822 + SET_MODULE_OWNER(dev);
2823 + SET_NETDEV_DEV(dev, &pdev->dev);
2824 + mac = netdev_priv(dev);
2825 + mac->netdev = dev;
2826 +
2827 + size = res->end - res->start +1;
2828 + mac->res = request_mem_region(res->start, size, IXMAC_NAME);
2829 + if (!mac->res) {
2830 + ret = -EBUSY;
2831 + goto out_free;
2832 + }
2833 +
2834 + mac->addr = ioremap(res->start, size);
2835 + if (!mac->addr) {
2836 + ret = -ENOMEM;
2837 + goto out_rel;
2838 + }
2839 +
2840 + dev->open = ixmac_open;
2841 + dev->hard_start_xmit = ixmac_start_xmit;
2842 + dev->poll = ix_poll;
2843 + dev->stop = ixmac_close;
2844 + dev->get_stats = ixmac_stats;
2845 + dev->do_ioctl = ixmac_ioctl;
2846 + dev->set_multicast_list = ixmac_set_rx_mode;
2847 + dev->ethtool_ops = &ixmac_ethtool_ops;
2848 +
2849 + dev->weight = 16;
2850 + dev->tx_queue_len = 100;
2851 +
2852 + mac->npe_dev = get_npe_by_id(plat->npe_id);
2853 + if (!mac->npe_dev) {
2854 + ret = -EIO;
2855 + goto out_unmap;
2856 + }
2857 + npe = dev_get_drvdata(mac->npe_dev);
2858 +
2859 + mac->rxq = request_queue(plat->rxq_id, 128);
2860 + if (IS_ERR(mac->rxq)) {
2861 + printk(KERN_ERR "Error requesting Q: %d\n", plat->rxq_id);
2862 + ret = -EBUSY;
2863 + goto out_putmod;
2864 + }
2865 + mac->txq = request_queue(plat->txq_id, 128);
2866 + if (IS_ERR(mac->txq)) {
2867 + printk(KERN_ERR "Error requesting Q: %d\n", plat->txq_id);
2868 + ret = -EBUSY;
2869 + goto out_putmod;
2870 + }
2871 + mac->rxdoneq = request_queue(plat->rxdoneq_id, 128);
2872 + if (IS_ERR(mac->rxdoneq)) {
2873 + printk(KERN_ERR "Error requesting Q: %d\n", plat->rxdoneq_id);
2874 + ret = -EBUSY;
2875 + goto out_putmod;
2876 + }
2877 + mac->rxdoneq->irq_cb = irqcb_recv;
2878 + mac->rxdoneq->cb_data = dev;
2879 + queue_set_watermarks(mac->rxdoneq, 0, 0);
2880 + queue_set_irq_src(mac->rxdoneq, Q_IRQ_ID_NOT_E);
2881 +
2882 + mac->qmgr = dev_get_drvdata(mac->rxq->dev);
2883 + if (register_netdev (dev)) {
2884 + ret = -EIO;
2885 + goto out_putmod;
2886 + }
2887 +
2888 + mac->plat = plat;
2889 + mac->npe_stat_num = NPE_STAT_NUM_BASE;
2890 + mac->msg_enable = netif_msg_init(debug, MAC_DEF_MSG_ENABLE);
2891 +
2892 + platform_set_drvdata(pdev, dev);
2893 +
2894 + mac_write_reg(mac, MAC_CORE_CNTRL, CORE_RESET);
2895 + udelay(500);
2896 + mac_write_reg(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
2897 +
2898 + init_mdio(dev, plat->phy_id);
2899 +
2900 + INIT_DELAYED_WORK(&mac->mdio_thread, mac_mdio_thread);
2901 +
2902 + /* The place of the MAC address is very system dependent.
2903 + * Here we use a random one to be replaced by one of the
2904 + * following commands:
2905 + * "ip link set address 02:03:04:04:04:01 dev eth0"
2906 + * "ifconfig eth0 hw ether 02:03:04:04:04:07"
2907 + */
2908 +
2909 + if (is_zero_ether_addr(plat->hwaddr)) {
2910 + random_ether_addr(dev->dev_addr);
2911 + dev->dev_addr[5] = plat->phy_id;
2912 + }
2913 + else
2914 + memcpy(dev->dev_addr, plat->hwaddr, 6);
2915 +
2916 + printk(KERN_INFO IXMAC_NAME " driver " IXMAC_VERSION
2917 + ": %s on %s with PHY[%d] initialized\n",
2918 + dev->name, npe->plat->name, plat->phy_id);
2919 +
2920 + return 0;
2921 +
2922 +out_putmod:
2923 + if (mac->rxq)
2924 + release_queue(mac->rxq);
2925 + if (mac->txq)
2926 + release_queue(mac->txq);
2927 + if (mac->rxdoneq)
2928 + release_queue(mac->rxdoneq);
2929 + module_put(mac->npe_dev->driver->owner);
2930 +out_unmap:
2931 + iounmap(mac->addr);
2932 +out_rel:
2933 + release_resource(mac->res);
2934 +out_free:
2935 + kfree(mac);
2936 + return ret;
2937 +}
2938 +
2939 +static void drain_npe(struct mac_info *mac)
2940 +{
2941 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2942 + struct npe_cont *cont;
2943 + u32 phys;
2944 + int loop = 0;
2945 +
2946 + /* Now there are some skb hold by the NPE.
2947 + * We switch the MAC in loopback mode and send a pseudo packet
2948 + * that will be returned by the NPE in its last SKB.
2949 + * We will also try to isolate the PHY to keep the packets internal.
2950 + */
2951 +
2952 + if (mac->txq_pkt <2)
2953 + mac->txq_pkt += init_buffer(tx_doneq, 5);
2954 +
2955 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2956 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
2957 + mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
2958 +
2959 + npe_mh_npe_loopback_mode(npe, mac->plat, 1);
2960 + mdelay(200);
2961 +
2962 + while (mac->rxq_pkt && loop++ < 2000 ) {
2963 + phys = queue_get_entry(tx_doneq) & ~0xf;
2964 + if (!phys)
2965 + break;
2966 + cont = dma_to_virt(queue->dev, phys);
2967 + /* actually the packets should never leave the system,
2968 + * but if they do, they shall contain 0s instead of
2969 + * intresting random data....
2970 + */
2971 + memset(cont->data, 0, 64);
2972 + cont->eth.pkt_len = 64;
2973 + dma_sync_single(mac->txq->dev, phys, 64 + DMA_HDR_SIZE,
2974 + DMA_TO_DEVICE);
2975 + queue_put_entry(mac->txq, phys);
2976 + if (queue_stat(mac->txq) == 2) { /* overflow */
2977 + queue_put_entry(tx_doneq, phys);
2978 + break;
2979 + }
2980 + mdelay(1);
2981 + mac->rxq_pkt -= destroy_buffer(mac->rxdoneq,
2982 + mac->rxq_pkt);
2983 + }
2984 + npe_mh_npe_loopback_mode(npe, mac->plat, 0);
2985 + }
2986 + /* Flush MAC TX fifo to drain the bogus packages */
2987 + mac_set_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2988 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_RX_EN);
2989 + mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_TX_EN);
2990 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
2991 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2992 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2993 +}
2994 +
2995 +static int mac_remove(struct platform_device *pdev)
2996 +{
2997 + struct net_device* dev = platform_get_drvdata(pdev);
2998 + struct mac_info *mac = netdev_priv(dev);
2999 +
3000 + unregister_netdev(dev);
3001 +
3002 + mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
3003 + if (mac->rxq_pkt)
3004 + drain_npe(mac);
3005 +
3006 + mac->txq_pkt -= destroy_buffer(mac->txq, mac->txq_pkt);
3007 + mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
3008 +
3009 + if (mac->rxq_pkt || mac->txq_pkt)
3010 + printk("Buffers lost in NPE: RX:%d, TX:%d\n",
3011 + mac->rxq_pkt, mac->txq_pkt);
3012 +
3013 + release_queue(mac->txq);
3014 + release_queue(mac->rxq);
3015 + release_queue(mac->rxdoneq);
3016 +
3017 + flush_scheduled_work();
3018 + return_npe_dev(mac->npe_dev);
3019 +
3020 + iounmap(mac->addr);
3021 + release_resource(mac->res);
3022 + platform_set_drvdata(pdev, NULL);
3023 + free_netdev(dev);
3024 + return 0;
3025 +}
3026 +
3027 +static struct platform_driver ixp4xx_mac = {
3028 + .driver.name = IXMAC_NAME,
3029 + .probe = mac_probe,
3030 + .remove = mac_remove,
3031 +};
3032 +
3033 +static int __init init_mac(void)
3034 +{
3035 + /* The TX done Queue handles skbs sent out by the NPE */
3036 + tx_doneq = request_queue(TX_DONE_QID, 128);
3037 + if (IS_ERR(tx_doneq)) {
3038 + printk(KERN_ERR "Error requesting Q: %d\n", TX_DONE_QID);
3039 + return -EBUSY;
3040 + }
3041 + return platform_driver_register(&ixp4xx_mac);
3042 +}
3043 +
3044 +static void __exit finish_mac(void)
3045 +{
3046 + platform_driver_unregister(&ixp4xx_mac);
3047 + if (tx_doneq) {
3048 + release_queue(tx_doneq);
3049 + }
3050 +}
3051 +
3052 +module_init(init_mac);
3053 +module_exit(finish_mac);
3054 +
3055 +MODULE_LICENSE("GPL");
3056 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
3057 +
3058 Index: linux-2.6.21.7/drivers/net/ixp4xx/npe.c
3059 ===================================================================
3060 --- /dev/null
3061 +++ linux-2.6.21.7/drivers/net/ixp4xx/npe.c
3062 @@ -0,0 +1,291 @@
3063 +
3064 +#include <linux/ixp_npe.h>
3065 +#include <asm/hardware.h>
3066 +
3067 +#define RESET_NPE_PARITY 0x0800
3068 +#define PARITY_BIT_MASK 0x3F00FFFF
3069 +#define CONFIG_CTRL_REG_MASK 0x3F3FFFFF
3070 +#define MAX_RETRIES 1000000
3071 +#define NPE_PHYS_REG 32
3072 +#define RESET_MBST_VAL 0x0000F0F0
3073 +#define NPE_REGMAP 0x0000001E
3074 +#define INSTR_WR_REG_SHORT 0x0000C000
3075 +#define INSTR_WR_REG_BYTE 0x00004000
3076 +#define MASK_ECS_REG_0_NEXTPC 0x1FFF0000
3077 +
3078 +#define INSTR_RD_FIFO 0x0F888220
3079 +#define INSTR_RESET_MBOX 0x0FAC8210
3080 +
3081 +#define ECS_REG_0_LDUR 8
3082 +#define ECS_REG_1_CCTXT 16
3083 +#define ECS_REG_1_SELCTXT 0
3084 +
3085 +#define ECS_BG_CTXT_REG_0 0x00
3086 +#define ECS_BG_CTXT_REG_1 0x01
3087 +#define ECS_BG_CTXT_REG_2 0x02
3088 +#define ECS_PRI_1_CTXT_REG_0 0x04
3089 +#define ECS_PRI_1_CTXT_REG_1 0x05
3090 +#define ECS_PRI_1_CTXT_REG_2 0x06
3091 +#define ECS_PRI_2_CTXT_REG_0 0x08
3092 +#define ECS_PRI_2_CTXT_REG_1 0x09
3093 +#define ECS_PRI_2_CTXT_REG_2 0x0A
3094 +#define ECS_DBG_CTXT_REG_0 0x0C
3095 +#define ECS_DBG_CTXT_REG_1 0x0D
3096 +#define ECS_DBG_CTXT_REG_2 0x0E
3097 +#define ECS_INSTRUCT_REG 0x11
3098 +
3099 +#define ECS_BG_CTXT_REG_0_RESET 0xA0000000
3100 +#define ECS_BG_CTXT_REG_1_RESET 0x01000000
3101 +#define ECS_BG_CTXT_REG_2_RESET 0x00008000
3102 +#define ECS_PRI_1_CTXT_REG_0_RESET 0x20000080
3103 +#define ECS_PRI_1_CTXT_REG_1_RESET 0x01000000
3104 +#define ECS_PRI_1_CTXT_REG_2_RESET 0x00008000
3105 +#define ECS_PRI_2_CTXT_REG_0_RESET 0x20000080
3106 +#define ECS_PRI_2_CTXT_REG_1_RESET 0x01000000
3107 +#define ECS_PRI_2_CTXT_REG_2_RESET 0x00008000
3108 +#define ECS_DBG_CTXT_REG_0_RESET 0x20000000
3109 +#define ECS_DBG_CTXT_REG_1_RESET 0x00000000
3110 +#define ECS_DBG_CTXT_REG_2_RESET 0x001E0000
3111 +#define ECS_INSTRUCT_REG_RESET 0x1003C00F
3112 +
3113 +static struct { u32 reg; u32 val; } ecs_reset[] =
3114 +{
3115 + { ECS_BG_CTXT_REG_0, ECS_BG_CTXT_REG_0_RESET },
3116 + { ECS_BG_CTXT_REG_1, ECS_BG_CTXT_REG_1_RESET },
3117 + { ECS_BG_CTXT_REG_2, ECS_BG_CTXT_REG_2_RESET },
3118 + { ECS_PRI_1_CTXT_REG_0, ECS_PRI_1_CTXT_REG_0_RESET },
3119 + { ECS_PRI_1_CTXT_REG_1, ECS_PRI_1_CTXT_REG_1_RESET },
3120 + { ECS_PRI_1_CTXT_REG_2, ECS_PRI_1_CTXT_REG_2_RESET },
3121 + { ECS_PRI_2_CTXT_REG_0, ECS_PRI_2_CTXT_REG_0_RESET },
3122 + { ECS_PRI_2_CTXT_REG_1, ECS_PRI_2_CTXT_REG_1_RESET },
3123 + { ECS_PRI_2_CTXT_REG_2, ECS_PRI_2_CTXT_REG_2_RESET },
3124 + { ECS_DBG_CTXT_REG_0, ECS_DBG_CTXT_REG_0_RESET },
3125 + { ECS_DBG_CTXT_REG_1, ECS_DBG_CTXT_REG_1_RESET },
3126 + { ECS_DBG_CTXT_REG_2, ECS_DBG_CTXT_REG_2_RESET },
3127 + { ECS_INSTRUCT_REG, ECS_INSTRUCT_REG_RESET }
3128 +};
3129 +
3130 +/* actually I have no idea what I'm doing here !!
3131 + * I only rewrite the "reset" sequence the way Intel does it.
3132 + */
3133 +
3134 +static void npe_debg_preexec(struct npe_info *npe)
3135 +{
3136 + u32 r = IX_NPEDL_MASK_ECS_DBG_REG_2_IF | IX_NPEDL_MASK_ECS_DBG_REG_2_IE;
3137 +
3138 + npe->exec_count = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCT);
3139 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, 0);
3140 + npe->ctx_reg2 = npe_read_ecs_reg(npe, ECS_DBG_CTXT_REG_2);
3141 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2 | r);
3142 +}
3143 +
3144 +static void npe_debg_postexec(struct npe_info *npe)
3145 +{
3146 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0, 0);
3147 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3148 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, npe->exec_count);
3149 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2);
3150 +}
3151 +
3152 +static int
3153 +npe_debg_inst_exec(struct npe_info *npe, u32 instr, u32 ctx, u32 ldur)
3154 +{
3155 + u32 regval, wc;
3156 + int c = 0;
3157 +
3158 + regval = IX_NPEDL_MASK_ECS_REG_0_ACTIVE |
3159 + (ldur << ECS_REG_0_LDUR);
3160 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0 , regval);
3161 + /* set CCTXT at ECS DEBUG L3 to specify in which context
3162 + * to execute the instruction
3163 + */
3164 + regval = (ctx << ECS_REG_1_CCTXT) |
3165 + (ctx << ECS_REG_1_SELCTXT);
3166 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_1, regval);
3167 +
3168 + /* clear the pipeline */
3169 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3170 +
3171 + /* load NPE instruction into the instruction register */
3172 + npe_write_ecs_reg(npe, ECS_INSTRUCT_REG, instr);
3173 + /* we need this value later to wait for
3174 + * completion of NPE execution step
3175 + */
3176 + wc = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC);
3177 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STEP);
3178 +
3179 + /* Watch Count register increments when NPE completes an instruction */
3180 + while (wc == npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC) &&
3181 + ++c < MAX_RETRIES);
3182 +
3183 + if (c >= MAX_RETRIES) {
3184 + printk(KERN_ERR "%s reset:npe_debg_inst_exec(): Timeout\n",
3185 + npe->plat->name);
3186 + return 1;
3187 + }
3188 + return 0;
3189 +}
3190 +
3191 +static int npe_logical_reg_write8(struct npe_info *npe, u32 addr, u32 val)
3192 +{
3193 + u32 instr;
3194 + val &= 0xff;
3195 + /* here we build the NPE assembler instruction:
3196 + * mov8 d0, #0 */
3197 + instr = INSTR_WR_REG_BYTE | /* OpCode */
3198 + addr << 9 | /* base Operand */
3199 + (val & 0x1f) << 4 | /* lower 5 bits to immediate data */
3200 + (val & ~0x1f) << (18-5);/* higher 3 bits to CoProc instr. */
3201 + /* and execute it */
3202 + return npe_debg_inst_exec(npe, instr, 0, 1);
3203 +}
3204 +
3205 +static int npe_logical_reg_write16(struct npe_info *npe, u32 addr, u32 val)
3206 +{
3207 + u32 instr;
3208 + /* here we build the NPE assembler instruction:
3209 + * mov16 d0, #0 */
3210 + val &= 0xffff;
3211 + instr = INSTR_WR_REG_SHORT | /* OpCode */
3212 + addr << 9 | /* base Operand */
3213 + (val & 0x1f) << 4 | /* lower 5 bits to immediate data */
3214 + (val & ~0x1f) << (18-5);/* higher 11 bits to CoProc instr. */
3215 + /* and execute it */
3216 + return npe_debg_inst_exec(npe, instr, 0, 1);
3217 +}
3218 +
3219 +static int npe_logical_reg_write32(struct npe_info *npe, u32 addr, u32 val)
3220 +{
3221 + /* write in 16 bit steps first the high and then the low value */
3222 + npe_logical_reg_write16(npe, addr, val >> 16);
3223 + return npe_logical_reg_write16(npe, addr+2, val & 0xffff);
3224 +}
3225 +
3226 +void npe_reset(struct npe_info *npe)
3227 +{
3228 + u32 reg, cfg_ctrl;
3229 + int i;
3230 + struct { u32 reset; int addr; int size; } ctx_reg[] = {
3231 + { 0x80, 0x1b, 8 },
3232 + { 0, 0x1c, 16 },
3233 + { 0x820, 0x1e, 16 },
3234 + { 0, 0x1f, 8 }
3235 + }, *cr;
3236 +
3237 + cfg_ctrl = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
3238 + cfg_ctrl |= 0x3F000000;
3239 + /* disable the parity interrupt */
3240 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, cfg_ctrl & PARITY_BIT_MASK);
3241 +
3242 + npe_debg_preexec(npe);
3243 +
3244 + /* clear the FIFOs */
3245 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WFIFO) ==
3246 + IX_NPEDL_MASK_WFIFO_VALID);
3247 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) ==
3248 + IX_NPEDL_MASK_STAT_OFNE)
3249 + {
3250 + u32 reg;
3251 + reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_FIFO);
3252 + printk("%s reset: Read FIFO:=%x\n", npe->plat->name, reg);
3253 + }
3254 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) ==
3255 + IX_NPEDL_MASK_STAT_IFNE) {
3256 + npe_debg_inst_exec(npe, INSTR_RD_FIFO, 0, 0);
3257 + }
3258 +
3259 + /* Reset the mailbox reg */
3260 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_MBST, RESET_MBST_VAL);
3261 + npe_debg_inst_exec(npe, INSTR_RESET_MBOX, 0, 0);
3262 +
3263 + /* Reset the physical registers in the NPE register file */
3264 + for (i=0; i<NPE_PHYS_REG; i++) {
3265 + npe_logical_reg_write16(npe, NPE_REGMAP, i >> 1);
3266 + npe_logical_reg_write32(npe, (i&1) *4, 0);
3267 + }
3268 +
3269 + /* Reset the context store. Iterate over the 16 ctx s */
3270 + for(i=0; i<16; i++) {
3271 + for (reg=0; reg<4; reg++) {
3272 + /* There is no (STEVT) register for Context 0.
3273 + * ignore if register=0 and ctx=0 */
3274 + if (!(reg || i))
3275 + continue;
3276 + /* Context 0 has no STARTPC. Instead, this value is
3277 + * used to set NextPC for Background ECS,
3278 + * to set where NPE starts executing code
3279 + */
3280 + if (!i && reg==1) {
3281 + u32 r;
3282 + r = npe_read_ecs_reg(npe, ECS_BG_CTXT_REG_0);
3283 + r &= ~MASK_ECS_REG_0_NEXTPC;
3284 + r |= (cr->reset << 16) & MASK_ECS_REG_0_NEXTPC;
3285 + continue;
3286 + }
3287 + cr = ctx_reg + reg;
3288 + switch (cr->size) {
3289 + case 8:
3290 + npe_logical_reg_write8(npe, cr->addr,
3291 + cr->reset);
3292 + break;
3293 + case 16:
3294 + npe_logical_reg_write16(npe, cr->addr,
3295 + cr->reset);
3296 + }
3297 + }
3298 + }
3299 + npe_debg_postexec(npe);
3300 +
3301 + for (i=0; i< ARRAY_SIZE(ecs_reset); i++) {
3302 + npe_write_ecs_reg(npe, ecs_reset[i].reg, ecs_reset[i].val);
3303 + }
3304 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT);
3305 +
3306 + for (i=IX_NPEDL_REG_OFFSET_EXCT; i<=IX_NPEDL_REG_OFFSET_AP3; i+=4) {
3307 + npe_reg_write(npe, i, 0);
3308 + }
3309 +
3310 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_WC, 0);
3311 +
3312 + reg = *IXP4XX_EXP_CFG2;
3313 + reg |= 0x800 << npe->plat->id; /* IX_FUSE_NPE[ABC] */
3314 + *IXP4XX_EXP_CFG2 = reg;
3315 + reg &= ~(0x800 << npe->plat->id); /* IX_FUSE_NPE[ABC] */
3316 + *IXP4XX_EXP_CFG2 = reg;
3317 +
3318 + npe_stop(npe);
3319 +
3320 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL,
3321 + cfg_ctrl & CONFIG_CTRL_REG_MASK);
3322 + npe->loaded = 0;
3323 +}
3324 +
3325 +
3326 +void npe_stop(struct npe_info *npe)
3327 +{
3328 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STOP);
3329 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3330 +}
3331 +
3332 +static void npe_reset_active(struct npe_info *npe, u32 reg)
3333 +{
3334 + u32 regval;
3335 +
3336 + regval = npe_read_ecs_reg(npe, reg);
3337 + regval &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
3338 + npe_write_ecs_reg(npe, reg, regval);
3339 +}
3340 +
3341 +void npe_start(struct npe_info *npe)
3342 +{
3343 + npe_reset_active(npe, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
3344 + npe_reset_active(npe, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
3345 + npe_reset_active(npe, IX_NPEDL_ECS_DBG_CTXT_REG_0);
3346 +
3347 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3348 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_START);
3349 +}
3350 +
3351 +EXPORT_SYMBOL(npe_stop);
3352 +EXPORT_SYMBOL(npe_start);
3353 +EXPORT_SYMBOL(npe_reset);
3354 Index: linux-2.6.21.7/drivers/net/ixp4xx/npe_mh.c
3355 ===================================================================
3356 --- /dev/null
3357 +++ linux-2.6.21.7/drivers/net/ixp4xx/npe_mh.c
3358 @@ -0,0 +1,170 @@
3359 +/*
3360 + * npe_mh.c - NPE message handler.
3361 + *
3362 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3363 + *
3364 + * This file is released under the GPLv2
3365 + */
3366 +
3367 +#include <linux/ixp_npe.h>
3368 +#include <linux/slab.h>
3369 +
3370 +#define MAX_RETRY 200
3371 +
3372 +struct npe_mh_msg {
3373 + union {
3374 + u8 byte[8]; /* Very desciptive name, I know ... */
3375 + u32 data[2];
3376 + } u;
3377 +};
3378 +
3379 +/*
3380 + * The whole code in this function must be reworked.
3381 + * It is in a state that works but is not rock solid
3382 + */
3383 +static int send_message(struct npe_info *npe, struct npe_mh_msg *msg)
3384 +{
3385 + int i,j;
3386 + u32 send[2], recv[2];
3387 +
3388 + for (i=0; i<2; i++)
3389 + send[i] = be32_to_cpu(msg->u.data[i]);
3390 +
3391 + if ((npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3392 + IX_NPEMH_NPE_STAT_IFNE))
3393 + return -1;
3394 +
3395 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[0]);
3396 + for(i=0; i<MAX_RETRY; i++) {
3397 + /* if the IFNF status bit is unset then the inFIFO is full */
3398 + if (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3399 + IX_NPEMH_NPE_STAT_IFNF)
3400 + break;
3401 + }
3402 + if (i>=MAX_RETRY)
3403 + return -1;
3404 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[1]);
3405 + i=0;
3406 + while (!(npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3407 + IX_NPEMH_NPE_STAT_OFNE)) {
3408 + if (i++>MAX_RETRY) {
3409 + printk("Waiting for Output FIFO NotEmpty failed\n");
3410 + return -1;
3411 + }
3412 + }
3413 + //printk("Output FIFO Not Empty. Loops: %d\n", i);
3414 + j=0;
3415 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3416 + IX_NPEMH_NPE_STAT_OFNE) {
3417 + recv[j&1] = npe_reg_read(npe,IX_NPEDL_REG_OFFSET_FIFO);
3418 + j++;
3419 + }
3420 + if ((recv[0] != send[0]) || (recv[1] != send[1])) {
3421 + if (send[0] || send[1]) {
3422 + /* all CMDs return the complete message as answer,
3423 + * only GETSTATUS returns the ImageID of the NPE
3424 + */
3425 + printk("Unexpected answer: "
3426 + "Send %08x:%08x Ret %08x:%08x\n",
3427 + send[0], send[1], recv[0], recv[1]);
3428 + }
3429 + }
3430 + return 0;
3431 +}
3432 +
3433 +#define CMD 0
3434 +#define PORT 1
3435 +#define MAC 2
3436 +
3437 +#define IX_ETHNPE_NPE_GETSTATUS 0x00
3438 +#define IX_ETHNPE_EDB_SETPORTADDRESS 0x01
3439 +#define IX_ETHNPE_GETSTATS 0x04
3440 +#define IX_ETHNPE_RESETSTATS 0x05
3441 +#define IX_ETHNPE_FW_SETFIREWALLMODE 0x0E
3442 +#define IX_ETHNPE_VLAN_SETRXQOSENTRY 0x0B
3443 +#define IX_ETHNPE_SETLOOPBACK_MODE 0x12
3444 +
3445 +#define logical_id(mp) (((mp)->npe_id << 4) | ((mp)->port_id & 0xf))
3446 +
3447 +int npe_mh_status(struct npe_info *npe)
3448 +{
3449 + struct npe_mh_msg msg;
3450 +
3451 + memset(&msg, 0, sizeof(msg));
3452 + msg.u.byte[CMD] = IX_ETHNPE_NPE_GETSTATUS;
3453 + return send_message(npe, &msg);
3454 +}
3455 +
3456 +int npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp,
3457 + u8 *macaddr)
3458 +{
3459 + struct npe_mh_msg msg;
3460 +
3461 + msg.u.byte[CMD] = IX_ETHNPE_EDB_SETPORTADDRESS;
3462 + msg.u.byte[PORT] = mp->eth_id;
3463 + memcpy(msg.u.byte + MAC, macaddr, 6);
3464 +
3465 + return send_message(npe, &msg);
3466 +}
3467 +
3468 +int npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp)
3469 +{
3470 + struct npe_mh_msg msg;
3471 +
3472 + memset(&msg, 0, sizeof(msg));
3473 + msg.u.byte[CMD] = IX_ETHNPE_FW_SETFIREWALLMODE;
3474 + msg.u.byte[PORT] = logical_id(mp);
3475 +
3476 + return send_message(npe, &msg);
3477 +}
3478 +
3479 +int npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp,
3480 + int enable)
3481 +{
3482 + struct npe_mh_msg msg;
3483 +
3484 + memset(&msg, 0, sizeof(msg));
3485 + msg.u.byte[CMD] = IX_ETHNPE_SETLOOPBACK_MODE;
3486 + msg.u.byte[PORT] = logical_id(mp);
3487 + msg.u.byte[3] = enable ? 1 : 0;
3488 +
3489 + return send_message(npe, &msg);
3490 +}
3491 +
3492 +int npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid)
3493 +{
3494 + struct npe_mh_msg msg;
3495 + int i, ret;
3496 +
3497 + memset(&msg, 0, sizeof(msg));
3498 + msg.u.byte[CMD] = IX_ETHNPE_VLAN_SETRXQOSENTRY;
3499 + msg.u.byte[PORT] = logical_id(mp);
3500 + msg.u.byte[5] = qid | 0x80;
3501 + msg.u.byte[7] = qid<<4;
3502 + for(i=0; i<8; i++) {
3503 + msg.u.byte[3] = i;
3504 + if ((ret = send_message(npe, &msg)))
3505 + return ret;
3506 + }
3507 + return 0;
3508 +}
3509 +
3510 +int npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys,
3511 + int reset)
3512 +{
3513 + struct npe_mh_msg msg;
3514 + memset(&msg, 0, sizeof(msg));
3515 + msg.u.byte[CMD] = reset ? IX_ETHNPE_RESETSTATS : IX_ETHNPE_GETSTATS;
3516 + msg.u.byte[PORT] = logical_id(mp);
3517 + msg.u.data[1] = cpu_to_npe32(cpu_to_be32(phys));
3518 +
3519 + return send_message(npe, &msg);
3520 +}
3521 +
3522 +
3523 +EXPORT_SYMBOL(npe_mh_status);
3524 +EXPORT_SYMBOL(npe_mh_setportaddr);
3525 +EXPORT_SYMBOL(npe_mh_disable_firewall);
3526 +EXPORT_SYMBOL(npe_mh_set_rxqid);
3527 +EXPORT_SYMBOL(npe_mh_npe_loopback_mode);
3528 +EXPORT_SYMBOL(npe_mh_get_stats);
3529 Index: linux-2.6.21.7/drivers/net/ixp4xx/phy.c
3530 ===================================================================
3531 --- /dev/null
3532 +++ linux-2.6.21.7/drivers/net/ixp4xx/phy.c
3533 @@ -0,0 +1,113 @@
3534 +/*
3535 + * phy.c - MDIO functions and mii initialisation
3536 + *
3537 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3538 + *
3539 + * This file is released under the GPLv2
3540 + */
3541 +
3542 +
3543 +#include <linux/mutex.h>
3544 +#include "mac.h"
3545 +
3546 +#define MAX_PHYS (1<<5)
3547 +
3548 +/*
3549 + * We must always use the same MAC for acessing the MDIO
3550 + * We may not use each MAC for its PHY :-(
3551 + */
3552 +
3553 +static struct net_device *phy_dev = NULL;
3554 +static struct mutex mtx;
3555 +
3556 +/* here we remember if the PHY is alive, to avoid log dumping */
3557 +static int phy_works[MAX_PHYS];
3558 +
3559 +int mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg)
3560 +{
3561 + struct mac_info *mac;
3562 + u32 cmd, reg;
3563 + int cnt = 0;
3564 +
3565 + if (!phy_dev)
3566 + return 0;
3567 +
3568 + mac = netdev_priv(phy_dev);
3569 + cmd = mdio_cmd(phy_addr, phy_reg);
3570 + mutex_lock_interruptible(&mtx);
3571 + mac_mdio_cmd_write(mac, cmd);
3572 + while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
3573 + if (++cnt >= 100) {
3574 + printk("%s: PHY[%d] access failed\n",
3575 + dev->name, phy_addr);
3576 + break;
3577 + }
3578 + schedule();
3579 + }
3580 + reg = mac_mdio_status_read(mac);
3581 + mutex_unlock(&mtx);
3582 + if (reg & MII_READ_FAIL) {
3583 + if (phy_works[phy_addr]) {
3584 + printk("%s: PHY[%d] unresponsive\n",
3585 + dev->name, phy_addr);
3586 + }
3587 + reg = 0;
3588 + phy_works[phy_addr] = 0;
3589 + } else {
3590 + if ( !phy_works[phy_addr]) {
3591 + printk("%s: PHY[%d] responsive again\n",
3592 + dev->name, phy_addr);
3593 + }
3594 + phy_works[phy_addr] = 1;
3595 + }
3596 + return reg & 0xffff;
3597 +}
3598 +
3599 +void
3600 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val)
3601 +{
3602 + struct mac_info *mac;
3603 + u32 cmd;
3604 + int cnt=0;
3605 +
3606 + if (!phy_dev)
3607 + return;
3608 +
3609 + mac = netdev_priv(phy_dev);
3610 + cmd = mdio_cmd(phy_addr, phy_reg) | MII_WRITE | val;
3611 +
3612 + mutex_lock_interruptible(&mtx);
3613 + mac_mdio_cmd_write(mac, cmd);
3614 + while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
3615 + if (++cnt >= 100) {
3616 + printk("%s: PHY[%d] access failed\n",
3617 + dev->name, phy_addr);
3618 + break;
3619 + }
3620 + schedule();
3621 + }
3622 + mutex_unlock(&mtx);
3623 +}
3624 +
3625 +void init_mdio(struct net_device *dev, int phy_id)
3626 +{
3627 + struct mac_info *mac = netdev_priv(dev);
3628 + int i;
3629 +
3630 + /* All phy operations should use the same MAC
3631 + * (my experience)
3632 + */
3633 + if (mac->plat->eth_id == 0) {
3634 + mutex_init(&mtx);
3635 + phy_dev = dev;
3636 + for (i=0; i<MAX_PHYS; i++)
3637 + phy_works[i] = 1;
3638 + }
3639 + mac->mii.dev = dev;
3640 + mac->mii.phy_id = phy_id;
3641 + mac->mii.phy_id_mask = MAX_PHYS - 1;
3642 + mac->mii.reg_num_mask = 0x1f;
3643 + mac->mii.mdio_read = mdio_read_register;
3644 + mac->mii.mdio_write = mdio_write_register;
3645 +}
3646 +
3647 Index: linux-2.6.21.7/drivers/net/ixp4xx/ucode_dl.c
3648 ===================================================================
3649 --- /dev/null
3650 +++ linux-2.6.21.7/drivers/net/ixp4xx/ucode_dl.c
3651 @@ -0,0 +1,479 @@
3652 +/*
3653 + * ucode_dl.c - provide an NPE device and a char-dev for microcode download
3654 + *
3655 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3656 + *
3657 + * This file is released under the GPLv2
3658 + */
3659 +
3660 +#include <linux/kernel.h>
3661 +#include <linux/module.h>
3662 +#include <linux/miscdevice.h>
3663 +#include <linux/platform_device.h>
3664 +#include <linux/fs.h>
3665 +#include <linux/init.h>
3666 +#include <linux/slab.h>
3667 +#include <linux/firmware.h>
3668 +#include <linux/dma-mapping.h>
3669 +#include <linux/byteorder/swab.h>
3670 +#include <asm/uaccess.h>
3671 +#include <asm/io.h>
3672 +
3673 +#include <linux/ixp_npe.h>
3674 +
3675 +#define IXNPE_VERSION "IXP4XX NPE driver Version 0.3.0"
3676 +
3677 +#define DL_MAGIC 0xfeedf00d
3678 +#define DL_MAGIC_SWAP 0x0df0edfe
3679 +
3680 +#define EOF_BLOCK 0xf
3681 +#define IMG_SIZE(image) (((image)->size * sizeof(u32)) + \
3682 + sizeof(struct dl_image))
3683 +
3684 +#define BT_INSTR 0
3685 +#define BT_DATA 1
3686 +
3687 +enum blk_type {
3688 + instruction,
3689 + data,
3690 +};
3691 +
3692 +struct dl_block {
3693 + u32 type;
3694 + u32 offset;
3695 +};
3696 +
3697 +struct dl_image {
3698 + u32 magic;
3699 + u32 id;
3700 + u32 size;
3701 + union {
3702 + u32 data[0];
3703 + struct dl_block block[0];
3704 + } u;
3705 +};
3706 +
3707 +struct dl_codeblock {
3708 + u32 npe_addr;
3709 + u32 size;
3710 + u32 data[0];
3711 +};
3712 +
3713 +static struct platform_driver ixp4xx_npe_driver;
3714 +
3715 +static int match_by_npeid(struct device *dev, void *id)
3716 +{
3717 + struct npe_info *npe = dev_get_drvdata(dev);
3718 + if (!npe->plat)
3719 + return 0;
3720 + return (npe->plat->id == *(int*)id);
3721 +}
3722 +
3723 +struct device *get_npe_by_id(int id)
3724 +{
3725 + struct device *dev = driver_find_device(&ixp4xx_npe_driver.driver,
3726 + NULL, &id, match_by_npeid);
3727 + if (dev) {
3728 + struct npe_info *npe = dev_get_drvdata(dev);
3729 + if (!try_module_get(THIS_MODULE)) {
3730 + put_device(dev);
3731 + return NULL;
3732 + }
3733 + npe->usage++;
3734 + }
3735 + return dev;
3736 +}
3737 +
3738 +void return_npe_dev(struct device *dev)
3739 +{
3740 + struct npe_info *npe = dev_get_drvdata(dev);
3741 + put_device(dev);
3742 + module_put(THIS_MODULE);
3743 + npe->usage--;
3744 +}
3745 +
3746 +static int
3747 +download_block(struct npe_info *npe, struct dl_codeblock *cb, unsigned type)
3748 +{
3749 + int i;
3750 + int cmd;
3751 +
3752 + switch (type) {
3753 + case BT_DATA:
3754 + cmd = IX_NPEDL_EXCTL_CMD_WR_DATA_MEM;
3755 + if (cb->npe_addr + cb->size > npe->plat->data_size) {
3756 + printk(KERN_INFO "Data size too large: %d+%d > %d\n",
3757 + cb->npe_addr, cb->size, npe->plat->data_size);
3758 + return -EIO;
3759 + }
3760 + break;
3761 + case BT_INSTR:
3762 + cmd = IX_NPEDL_EXCTL_CMD_WR_INS_MEM;
3763 + if (cb->npe_addr + cb->size > npe->plat->inst_size) {
3764 + printk(KERN_INFO "Instr size too large: %d+%d > %d\n",
3765 + cb->npe_addr, cb->size, npe->plat->inst_size);
3766 + return -EIO;
3767 + }
3768 + break;
3769 + default:
3770 + printk(KERN_INFO "Unknown CMD: %d\n", type);
3771 + return -EIO;
3772 + }
3773 +
3774 + for (i=0; i < cb->size; i++) {
3775 + npe_write_cmd(npe, cb->npe_addr + i, cb->data[i], cmd);
3776 + }
3777 +
3778 + return 0;
3779 +}
3780 +
3781 +static int store_npe_image(struct dl_image *image, struct device *dev)
3782 +{
3783 + struct dl_block *blk;
3784 + struct dl_codeblock *cb;
3785 + struct npe_info *npe;
3786 + int ret=0;
3787 +
3788 + if (!dev) {
3789 + dev = get_npe_by_id( (image->id >> 24) & 0xf);
3790 + return_npe_dev(dev);
3791 + }
3792 + if (!dev)
3793 + return -ENODEV;
3794 +
3795 + npe = dev_get_drvdata(dev);
3796 + if (npe->loaded && (npe->usage > 0)) {
3797 + printk(KERN_INFO "Cowardly refusing to reload an Image "
3798 + "into the used and running %s\n", npe->plat->name);
3799 + return 0; /* indicate success anyway... */
3800 + }
3801 + if (!cpu_is_ixp46x() && ((image->id >> 28) & 0xf)) {
3802 + printk(KERN_INFO "IXP46x NPE image ignored on IXP42x\n");
3803 + return -EIO;
3804 + }
3805 +
3806 + npe_stop(npe);
3807 + npe_reset(npe);
3808 +
3809 + for (blk = image->u.block; blk->type != EOF_BLOCK; blk++) {
3810 + if (blk->offset > image->size) {
3811 + printk(KERN_INFO "Block offset out of range\n");
3812 + return -EIO;
3813 + }
3814 + cb = (struct dl_codeblock*)&image->u.data[blk->offset];
3815 + if (blk->offset + cb->size + 2 > image->size) {
3816 + printk(KERN_INFO "Codeblock size out of range\n");
3817 + return -EIO;
3818 + }
3819 + if ((ret = download_block(npe, cb, blk->type)))
3820 + return ret;
3821 + }
3822 + *(u32*)npe->img_info = cpu_to_be32(image->id);
3823 + npe_start(npe);
3824 +
3825 + printk(KERN_INFO "Image loaded to %s Func:%x, Rel: %x:%x, Status: %x\n",
3826 + npe->plat->name, npe->img_info[1], npe->img_info[2],
3827 + npe->img_info[3], npe_status(npe));
3828 + if (npe_mh_status(npe)) {
3829 + printk(KERN_ERR "%s not responding\n", npe->plat->name);
3830 + }
3831 + npe->loaded = 1;
3832 + return 0;
3833 +}
3834 +
3835 +static int ucode_open(struct inode *inode, struct file *file)
3836 +{
3837 + file->private_data = kmalloc(sizeof(struct dl_image), GFP_KERNEL);
3838 + if (!file->private_data)
3839 + return -ENOMEM;
3840 + return 0;
3841 +}
3842 +
3843 +static int ucode_close(struct inode *inode, struct file *file)
3844 +{
3845 + kfree(file->private_data);
3846 + return 0;
3847 +}
3848 +
3849 +static ssize_t ucode_write(struct file *file, const char __user *buf,
3850 + size_t count, loff_t *ppos)
3851 +{
3852 + union {
3853 + char *data;
3854 + struct dl_image *image;
3855 + } u;
3856 + const char __user *cbuf = buf;
3857 +
3858 + u.data = file->private_data;
3859 +
3860 + while (count) {
3861 + int len;
3862 + if (*ppos < sizeof(struct dl_image)) {
3863 + len = sizeof(struct dl_image) - *ppos;
3864 + len = len > count ? count : len;
3865 + if (copy_from_user(u.data + *ppos, cbuf, len))
3866 + return -EFAULT;
3867 + count -= len;
3868 + *ppos += len;
3869 + cbuf += len;
3870 + continue;
3871 + } else if (*ppos == sizeof(struct dl_image)) {
3872 + void *data;
3873 + if (u.image->magic == DL_MAGIC_SWAP) {
3874 + printk(KERN_INFO "swapped image found\n");
3875 + u.image->id = swab32(u.image->id);
3876 + u.image->size = swab32(u.image->size);
3877 + } else if (u.image->magic != DL_MAGIC) {
3878 + printk(KERN_INFO "Bad magic:%x\n",
3879 + u.image->magic);
3880 + return -EFAULT;
3881 + }
3882 + len = IMG_SIZE(u.image);
3883 + data = kmalloc(len, GFP_KERNEL);
3884 + if (!data)
3885 + return -ENOMEM;
3886 + memcpy(data, u.data, *ppos);
3887 + kfree(u.data);
3888 + u.data = (char*)data;
3889 + file->private_data = data;
3890 + }
3891 + len = IMG_SIZE(u.image) - *ppos;
3892 + len = len > count ? count : len;
3893 + if (copy_from_user(u.data + *ppos, cbuf, len))
3894 + return -EFAULT;
3895 + count -= len;
3896 + *ppos += len;
3897 + cbuf += len;
3898 + if (*ppos == IMG_SIZE(u.image)) {
3899 + int ret, i;
3900 + *ppos = 0;
3901 + if (u.image->magic == DL_MAGIC_SWAP) {
3902 + for (i=0; i<u.image->size; i++) {
3903 + u.image->u.data[i] =
3904 + swab32(u.image->u.data[i]);
3905 + }
3906 + u.image->magic = swab32(u.image->magic);
3907 + }
3908 + ret = store_npe_image(u.image, NULL);
3909 + if (ret) {
3910 + printk(KERN_INFO "Error in NPE image: %x\n",
3911 + u.image->id);
3912 + return ret;
3913 + }
3914 + }
3915 + }
3916 + return (cbuf-buf);
3917 +}
3918 +
3919 +static void npe_firmware_probe(struct device *dev)
3920 +{
3921 +#if (defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)) \
3922 + && defined(MODULE)
3923 + const struct firmware *fw_entry;
3924 + struct npe_info *npe = dev_get_drvdata(dev);
3925 + struct dl_image *image;
3926 + int ret = -1, i;
3927 +
3928 + if (request_firmware(&fw_entry, npe->plat->name, dev) != 0) {
3929 + return;
3930 + }
3931 + image = (struct dl_image*)fw_entry->data;
3932 + /* Sanity checks */
3933 + if (fw_entry->size < sizeof(struct dl_image)) {
3934 + printk(KERN_ERR "Firmware error: too small\n");
3935 + goto out;
3936 + }
3937 + if (image->magic == DL_MAGIC_SWAP) {
3938 + printk(KERN_INFO "swapped image found\n");
3939 + image->id = swab32(image->id);
3940 + image->size = swab32(image->size);
3941 + } else if (image->magic != DL_MAGIC) {
3942 + printk(KERN_ERR "Bad magic:%x\n", image->magic);
3943 + goto out;
3944 + }
3945 + if (IMG_SIZE(image) != fw_entry->size) {
3946 + printk(KERN_ERR "Firmware error: bad size\n");
3947 + goto out;
3948 + }
3949 + if (((image->id >> 24) & 0xf) != npe->plat->id) {
3950 + printk(KERN_ERR "NPE id missmatch\n");
3951 + goto out;
3952 + }
3953 + if (image->magic == DL_MAGIC_SWAP) {
3954 + for (i=0; i<image->size; i++) {
3955 + image->u.data[i] = swab32(image->u.data[i]);
3956 + }
3957 + image->magic = swab32(image->magic);
3958 + }
3959 +
3960 + ret = store_npe_image(image, dev);
3961 +out:
3962 + if (ret) {
3963 + printk(KERN_ERR "Error downloading Firmware for %s\n",
3964 + npe->plat->name);
3965 + }
3966 + release_firmware(fw_entry);
3967 +#endif
3968 +}
3969 +
3970 +static void disable_npe_irq(struct npe_info *npe)
3971 +{
3972 + u32 reg;
3973 + reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
3974 + reg &= ~(IX_NPEMH_NPE_CTL_OFE | IX_NPEMH_NPE_CTL_IFE);
3975 + reg |= IX_NPEMH_NPE_CTL_OFEWE | IX_NPEMH_NPE_CTL_IFEWE;
3976 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, reg);
3977 +}
3978 +
3979 +static ssize_t show_npe_state(struct device *dev, struct device_attribute *attr,
3980 + char *buf)
3981 +{
3982 + struct npe_info *npe = dev_get_drvdata(dev);
3983 +
3984 + strcpy(buf, npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN ?
3985 + "start\n" : "stop\n");
3986 + return strlen(buf);
3987 +}
3988 +
3989 +static ssize_t set_npe_state(struct device *dev, struct device_attribute *attr,
3990 + const char *buf, size_t count)
3991 +{
3992 + struct npe_info *npe = dev_get_drvdata(dev);
3993 +
3994 + if (npe->usage) {
3995 + printk("%s in use: read-only\n", npe->plat->name);
3996 + return count;
3997 + }
3998 + if (!strncmp(buf, "start", 5)) {
3999 + npe_start(npe);
4000 + }
4001 + if (!strncmp(buf, "stop", 4)) {
4002 + npe_stop(npe);
4003 + }
4004 + if (!strncmp(buf, "reset", 5)) {
4005 + npe_stop(npe);
4006 + npe_reset(npe);
4007 + }
4008 + return count;
4009 +}
4010 +
4011 +static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_npe_state, set_npe_state);
4012 +
4013 +static int npe_probe(struct platform_device *pdev)
4014 +{
4015 + struct resource *res;
4016 + struct npe_info *npe;
4017 + struct npe_plat_data *plat = pdev->dev.platform_data;
4018 + int err, size, ret=0;
4019 +
4020 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
4021 + return -EIO;
4022 +
4023 + if (!(npe = kzalloc(sizeof(struct npe_info), GFP_KERNEL)))
4024 + return -ENOMEM;
4025 +
4026 + size = res->end - res->start +1;
4027 + npe->res = request_mem_region(res->start, size, plat->name);
4028 + if (!npe->res) {
4029 + ret = -EBUSY;
4030 + printk(KERN_ERR "Failed to get memregion(%x, %x)\n",
4031 + res->start, size);
4032 + goto out_free;
4033 + }
4034 +
4035 + npe->addr = ioremap(res->start, size);
4036 + if (!npe->addr) {
4037 + ret = -ENOMEM;
4038 + printk(KERN_ERR "Failed to ioremap(%x, %x)\n",
4039 + res->start, size);
4040 + goto out_rel;
4041 + }
4042 +
4043 + pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
4044 +
4045 + platform_set_drvdata(pdev, npe);
4046 +
4047 + err = device_create_file(&pdev->dev, &dev_attr_state);
4048 + if (err)
4049 + goto out_rel;
4050 +
4051 + npe->plat = plat;
4052 + disable_npe_irq(npe);
4053 + npe->usage = 0;
4054 + npe_reset(npe);
4055 + npe_firmware_probe(&pdev->dev);
4056 +
4057 + return 0;
4058 +
4059 +out_rel:
4060 + release_resource(npe->res);
4061 +out_free:
4062 + kfree(npe);
4063 + return ret;
4064 +}
4065 +
4066 +static struct file_operations ucode_dl_fops = {
4067 + .owner = THIS_MODULE,
4068 + .write = ucode_write,
4069 + .open = ucode_open,
4070 + .release = ucode_close,
4071 +};
4072 +
4073 +static struct miscdevice ucode_dl_dev = {
4074 + .minor = MICROCODE_MINOR,
4075 + .name = "ixp4xx_ucode",
4076 + .fops = &ucode_dl_fops,
4077 +};
4078 +
4079 +static int npe_remove(struct platform_device *pdev)
4080 +{
4081 + struct npe_info *npe = platform_get_drvdata(pdev);
4082 +
4083 + device_remove_file(&pdev->dev, &dev_attr_state);
4084 +
4085 + iounmap(npe->addr);
4086 + release_resource(npe->res);
4087 + kfree(npe);
4088 + return 0;
4089 +}
4090 +
4091 +static struct platform_driver ixp4xx_npe_driver = {
4092 + .driver = {
4093 + .name = "ixp4xx_npe",
4094 + .owner = THIS_MODULE,
4095 + },
4096 + .probe = npe_probe,
4097 + .remove = npe_remove,
4098 +};
4099 +
4100 +static int __init init_npedriver(void)
4101 +{
4102 + int ret;
4103 + if ((ret = misc_register(&ucode_dl_dev))){
4104 + printk(KERN_ERR "Failed to register misc device %d\n",
4105 + MICROCODE_MINOR);
4106 + return ret;
4107 + }
4108 + if ((ret = platform_driver_register(&ixp4xx_npe_driver)))
4109 + misc_deregister(&ucode_dl_dev);
4110 + else
4111 + printk(KERN_INFO IXNPE_VERSION " initialized\n");
4112 +
4113 + return ret;
4114 +
4115 +}
4116 +
4117 +static void __exit finish_npedriver(void)
4118 +{
4119 + misc_deregister(&ucode_dl_dev);
4120 + platform_driver_unregister(&ixp4xx_npe_driver);
4121 +}
4122 +
4123 +module_init(init_npedriver);
4124 +module_exit(finish_npedriver);
4125 +
4126 +MODULE_LICENSE("GPL");
4127 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
4128 +
4129 +EXPORT_SYMBOL(get_npe_by_id);
4130 +EXPORT_SYMBOL(return_npe_dev);
4131 Index: linux-2.6.21.7/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h
4132 ===================================================================
4133 --- linux-2.6.21.7.orig/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h
4134 +++ linux-2.6.21.7/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h
4135 @@ -22,6 +22,8 @@
4136 #ifndef _ASM_ARM_IXP4XX_H_
4137 #define _ASM_ARM_IXP4XX_H_
4138
4139 +#include "npe_regs.h"
4140 +
4141 /*
4142 * IXP4xx Linux Memory Map:
4143 *
4144 @@ -44,6 +46,12 @@
4145 */
4146
4147 /*
4148 + * PCI Memory Space
4149 + */
4150 +#define IXP4XX_PCIMEM_BASE_PHYS (0x48000000)
4151 +#define IXP4XX_PCIMEM_REGION_SIZE (0x04000000)
4152 +#define IXP4XX_PCIMEM_BAR_SIZE (0x01000000)
4153 +/*
4154 * Queue Manager
4155 */
4156 #define IXP4XX_QMGR_BASE_PHYS (0x60000000)
4157 @@ -322,7 +330,13 @@
4158 #define PCI_ATPDMA0_LENADDR_OFFSET 0x48
4159 #define PCI_ATPDMA1_AHBADDR_OFFSET 0x4C
4160 #define PCI_ATPDMA1_PCIADDR_OFFSET 0x50
4161 -#define PCI_ATPDMA1_LENADDR_OFFSET 0x54
4162 +#define PCI_ATPDMA1_LENADDR_OFFSET 0x54
4163 +#define PCI_PTADMA0_AHBADDR_OFFSET 0x58
4164 +#define PCI_PTADMA0_PCIADDR_OFFSET 0x5c
4165 +#define PCI_PTADMA0_LENADDR_OFFSET 0x60
4166 +#define PCI_PTADMA1_AHBADDR_OFFSET 0x64
4167 +#define PCI_PTADMA1_PCIADDR_OFFSET 0x68
4168 +#define PCI_PTADMA1_LENADDR_OFFSET 0x6c
4169
4170 /*
4171 * PCI Control/Status Registers
4172 @@ -351,6 +365,12 @@
4173 #define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
4174 #define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
4175 #define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
4176 +#define PCI_PTADMA0_AHBADDR IXP4XX_PCI_CSR(PCI_PTADMA0_AHBADDR_OFFSET)
4177 +#define PCI_PTADMA0_PCIADDR IXP4XX_PCI_CSR(PCI_PTADMA0_PCIADDR_OFFSET)
4178 +#define PCI_PTADMA0_LENADDR IXP4XX_PCI_CSR(PCI_PTADMA0_LENADDR_OFFSET)
4179 +#define PCI_PTADMA1_AHBADDR IXP4XX_PCI_CSR(PCI_PTADMA1_AHBADDR_OFFSET)
4180 +#define PCI_PTADMA1_PCIADDR IXP4XX_PCI_CSR(PCI_PTADMA1_PCIADDR_OFFSET)
4181 +#define PCI_PTADMA1_LENADDR IXP4XX_PCI_CSR(PCI_PTADMA1_LENADDR_OFFSET)
4182
4183 /*
4184 * PCI register values and bit definitions
4185 @@ -607,6 +627,34 @@
4186
4187 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
4188
4189 +
4190 +/* Fuse Bits of IXP_EXP_CFG2 */
4191 +#define IX_FUSE_RCOMP (1 << 0)
4192 +#define IX_FUSE_USB (1 << 1)
4193 +#define IX_FUSE_HASH (1 << 2)
4194 +#define IX_FUSE_AES (1 << 3)
4195 +#define IX_FUSE_DES (1 << 4)
4196 +#define IX_FUSE_HDLC (1 << 5)
4197 +#define IX_FUSE_AAL (1 << 6)
4198 +#define IX_FUSE_HSS (1 << 7)
4199 +#define IX_FUSE_UTOPIA (1 << 8)
4200 +#define IX_FUSE_ETH0 (1 << 9)
4201 +#define IX_FUSE_ETH1 (1 << 10)
4202 +#define IX_FUSE_NPEA (1 << 11)
4203 +#define IX_FUSE_NPEB (1 << 12)
4204 +#define IX_FUSE_NPEC (1 << 13)
4205 +#define IX_FUSE_PCI (1 << 14)
4206 +#define IX_FUSE_ECC (1 << 15)
4207 +#define IX_FUSE_UTOPIA_PHY_LIMIT (3 << 16)
4208 +#define IX_FUSE_USB_HOST (1 << 18)
4209 +#define IX_FUSE_NPEA_ETH (1 << 19)
4210 +#define IX_FUSE_NPEB_ETH (1 << 20)
4211 +#define IX_FUSE_RSA (1 << 21)
4212 +#define IX_FUSE_XSCALE_MAX_FREQ (3 << 22)
4213 +
4214 +#define IX_FUSE_IXP46X_ONLY IX_FUSE_XSCALE_MAX_FREQ | IX_FUSE_RSA | \
4215 + IX_FUSE_NPEB_ETH | IX_FUSE_NPEA_ETH | IX_FUSE_USB_HOST | IX_FUSE_ECC
4216 +
4217 #ifndef __ASSEMBLY__
4218 static inline int cpu_is_ixp46x(void)
4219 {
4220 @@ -620,6 +668,15 @@ static inline int cpu_is_ixp46x(void)
4221 #endif
4222 return 0;
4223 }
4224 +
4225 +static inline u32 ix_fuse(void)
4226 +{
4227 + unsigned int fuses = ~(*IXP4XX_EXP_CFG2);
4228 + if (!cpu_is_ixp46x())
4229 + fuses &= ~IX_FUSE_IXP46X_ONLY;
4230 +
4231 + return fuses;
4232 +}
4233 #endif
4234
4235 #endif
4236 Index: linux-2.6.21.7/include/asm-arm/arch-ixp4xx/npe_regs.h
4237 ===================================================================
4238 --- /dev/null
4239 +++ linux-2.6.21.7/include/asm-arm/arch-ixp4xx/npe_regs.h
4240 @@ -0,0 +1,82 @@
4241 +#ifndef NPE_REGS_H
4242 +#define NPE_REGS_H
4243 +
4244 +/* Execution Address */
4245 +#define IX_NPEDL_REG_OFFSET_EXAD 0x00
4246 +/* Execution Data */
4247 +#define IX_NPEDL_REG_OFFSET_EXDATA 0x04
4248 +/* Execution Control */
4249 +#define IX_NPEDL_REG_OFFSET_EXCTL 0x08
4250 +/* Execution Count */
4251 +#define IX_NPEDL_REG_OFFSET_EXCT 0x0C
4252 +/* Action Point 0 */
4253 +#define IX_NPEDL_REG_OFFSET_AP0 0x10
4254 +/* Action Point 1 */
4255 +#define IX_NPEDL_REG_OFFSET_AP1 0x14
4256 +/* Action Point 2 */
4257 +#define IX_NPEDL_REG_OFFSET_AP2 0x18
4258 +/* Action Point 3 */
4259 +#define IX_NPEDL_REG_OFFSET_AP3 0x1C
4260 +/* Watchpoint FIFO */
4261 +#define IX_NPEDL_REG_OFFSET_WFIFO 0x20
4262 +/* Watch Count */
4263 +#define IX_NPEDL_REG_OFFSET_WC 0x24
4264 +/* Profile Count */
4265 +#define IX_NPEDL_REG_OFFSET_PROFCT 0x28
4266 +
4267 +/* Messaging Status */
4268 +#define IX_NPEDL_REG_OFFSET_STAT 0x2C
4269 +/* Messaging Control */
4270 +#define IX_NPEDL_REG_OFFSET_CTL 0x30
4271 +/* Mailbox Status */
4272 +#define IX_NPEDL_REG_OFFSET_MBST 0x34
4273 +/* messaging in/out FIFO */
4274 +#define IX_NPEDL_REG_OFFSET_FIFO 0x38
4275 +
4276 +
4277 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IF 0x00100000
4278 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IE 0x00080000
4279 +#define IX_NPEDL_MASK_ECS_REG_0_ACTIVE 0x80000000
4280 +
4281 +#define IX_NPEDL_EXCTL_CMD_NPE_STEP 0x01
4282 +#define IX_NPEDL_EXCTL_CMD_NPE_START 0x02
4283 +#define IX_NPEDL_EXCTL_CMD_NPE_STOP 0x03
4284 +#define IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE 0x04
4285 +#define IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT 0x0C
4286 +#define IX_NPEDL_EXCTL_CMD_RD_INS_MEM 0x10
4287 +#define IX_NPEDL_EXCTL_CMD_WR_INS_MEM 0x11
4288 +#define IX_NPEDL_EXCTL_CMD_RD_DATA_MEM 0x12
4289 +#define IX_NPEDL_EXCTL_CMD_WR_DATA_MEM 0x13
4290 +#define IX_NPEDL_EXCTL_CMD_RD_ECS_REG 0x14
4291 +#define IX_NPEDL_EXCTL_CMD_WR_ECS_REG 0x15
4292 +
4293 +#define IX_NPEDL_EXCTL_STATUS_RUN 0x80000000
4294 +#define IX_NPEDL_EXCTL_STATUS_STOP 0x40000000
4295 +#define IX_NPEDL_EXCTL_STATUS_CLEAR 0x20000000
4296 +
4297 +#define IX_NPEDL_MASK_WFIFO_VALID 0x80000000
4298 +#define IX_NPEDL_MASK_STAT_OFNE 0x00010000
4299 +#define IX_NPEDL_MASK_STAT_IFNE 0x00080000
4300 +
4301 +#define IX_NPEDL_ECS_DBG_CTXT_REG_0 0x0C
4302 +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_0 0x04
4303 +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_0 0x08
4304 +
4305 +/* NPE control register bit definitions */
4306 +#define IX_NPEMH_NPE_CTL_OFE (1 << 16) /**< OutFifoEnable */
4307 +#define IX_NPEMH_NPE_CTL_IFE (1 << 17) /**< InFifoEnable */
4308 +#define IX_NPEMH_NPE_CTL_OFEWE (1 << 24) /**< OutFifoEnableWriteEnable */
4309 +#define IX_NPEMH_NPE_CTL_IFEWE (1 << 25) /**< InFifoEnableWriteEnable */
4310 +
4311 +/* NPE status register bit definitions */
4312 +#define IX_NPEMH_NPE_STAT_OFNE (1 << 16) /**< OutFifoNotEmpty */
4313 +#define IX_NPEMH_NPE_STAT_IFNF (1 << 17) /**< InFifoNotFull */
4314 +#define IX_NPEMH_NPE_STAT_OFNF (1 << 18) /**< OutFifoNotFull */
4315 +#define IX_NPEMH_NPE_STAT_IFNE (1 << 19) /**< InFifoNotEmpty */
4316 +#define IX_NPEMH_NPE_STAT_MBINT (1 << 20) /**< Mailbox interrupt */
4317 +#define IX_NPEMH_NPE_STAT_IFINT (1 << 21) /**< InFifo interrupt */
4318 +#define IX_NPEMH_NPE_STAT_OFINT (1 << 22) /**< OutFifo interrupt */
4319 +#define IX_NPEMH_NPE_STAT_WFINT (1 << 23) /**< WatchFifo interrupt */
4320 +
4321 +#endif
4322 +
4323 Index: linux-2.6.21.7/include/asm-arm/arch-ixp4xx/platform.h
4324 ===================================================================
4325 --- linux-2.6.21.7.orig/include/asm-arm/arch-ixp4xx/platform.h
4326 +++ linux-2.6.21.7/include/asm-arm/arch-ixp4xx/platform.h
4327 @@ -86,6 +86,25 @@ struct ixp4xx_i2c_pins {
4328 unsigned long scl_pin;
4329 };
4330
4331 +struct npe_plat_data {
4332 + const char *name;
4333 + int data_size;
4334 + int inst_size;
4335 + int id; /* Node ID */
4336 +};
4337 +
4338 +struct mac_plat_info {
4339 + int npe_id; /* Node ID of the NPE for this port */
4340 + int port_id; /* Port ID for NPE-B @ ixp465 */
4341 + int eth_id; /* Physical ID */
4342 + int phy_id; /* ID of the connected PHY (PCB/platform dependent) */
4343 + int rxq_id; /* Queue ID of the RX-free q */
4344 + int rxdoneq_id; /* where incoming packets are returned */
4345 + int txq_id; /* Where to push the outgoing packets */
4346 + unsigned char hwaddr[6]; /* Desired hardware address */
4347 +
4348 +};
4349 +
4350 /*
4351 * This structure provide a means for the board setup code
4352 * to give information to th pata_ixp4xx driver. It is
4353 Index: linux-2.6.21.7/include/linux/ixp_crypto.h
4354 ===================================================================
4355 --- /dev/null
4356 +++ linux-2.6.21.7/include/linux/ixp_crypto.h
4357 @@ -0,0 +1,192 @@
4358 +
4359 +#ifndef IX_CRYPTO_H
4360 +#define IX_CRYPTO_H
4361 +
4362 +#define MAX_KEYLEN 64
4363 +#define NPE_CTX_LEN 80
4364 +#define AES_BLOCK128 16
4365 +
4366 +#define NPE_OP_HASH_GEN_ICV 0x50
4367 +#define NPE_OP_ENC_GEN_KEY 0xc9
4368 +
4369 +
4370 +#define NPE_OP_HASH_VERIFY 0x01
4371 +#define NPE_OP_CCM_ENABLE 0x04
4372 +#define NPE_OP_CRYPT_ENABLE 0x08
4373 +#define NPE_OP_HASH_ENABLE 0x10
4374 +#define NPE_OP_NOT_IN_PLACE 0x20
4375 +#define NPE_OP_HMAC_DISABLE 0x40
4376 +#define NPE_OP_CRYPT_ENCRYPT 0x80
4377 +
4378 +#define MOD_ECB 0x0000
4379 +#define MOD_CTR 0x1000
4380 +#define MOD_CBC_ENC 0x2000
4381 +#define MOD_CBC_DEC 0x3000
4382 +#define MOD_CCM_ENC 0x4000
4383 +#define MOD_CCM_DEC 0x5000
4384 +
4385 +#define ALGO_AES 0x0800
4386 +#define CIPH_DECR 0x0000
4387 +#define CIPH_ENCR 0x0400
4388 +
4389 +#define MOD_DES 0x0000
4390 +#define MOD_TDEA2 0x0100
4391 +#define MOD_TDEA3 0x0200
4392 +#define MOD_AES128 0x0000
4393 +#define MOD_AES192 0x0100
4394 +#define MOD_AES256 0x0200
4395 +
4396 +#define KEYLEN_128 4
4397 +#define KEYLEN_192 6
4398 +#define KEYLEN_256 8
4399 +
4400 +#define CIPHER_TYPE_NULL 0
4401 +#define CIPHER_TYPE_DES 1
4402 +#define CIPHER_TYPE_3DES 2
4403 +#define CIPHER_TYPE_AES 3
4404 +
4405 +#define CIPHER_MODE_ECB 1
4406 +#define CIPHER_MODE_CTR 2
4407 +#define CIPHER_MODE_CBC 3
4408 +#define CIPHER_MODE_CCM 4
4409 +
4410 +#define HASH_TYPE_NULL 0
4411 +#define HASH_TYPE_MD5 1
4412 +#define HASH_TYPE_SHA1 2
4413 +#define HASH_TYPE_CBCMAC 3
4414 +
4415 +#define OP_REG_DONE 1
4416 +#define OP_REGISTER 2
4417 +#define OP_PERFORM 3
4418 +
4419 +#define STATE_UNREGISTERED 0
4420 +#define STATE_REGISTERED 1
4421 +#define STATE_UNLOADING 2
4422 +
4423 +struct crypt_ctl {
4424 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4425 + u8 mode; /* NPE operation */
4426 + u8 init_len;
4427 + u16 reserved;
4428 +#else
4429 + u16 reserved;
4430 + u8 init_len;
4431 + u8 mode; /* NPE operation */
4432 +#endif
4433 + u8 iv[16]; /* IV for CBC mode or CTR IV for CTR mode */
4434 + union {
4435 + u32 icv;
4436 + u32 rev_aes;
4437 + } addr;
4438 + u32 src_buf;
4439 + u32 dest_buf;
4440 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4441 + u16 auth_offs; /* Authentication start offset */
4442 + u16 auth_len; /* Authentication data length */
4443 + u16 crypt_offs; /* Cryption start offset */
4444 + u16 crypt_len; /* Cryption data length */
4445 +#else
4446 + u16 auth_len; /* Authentication data length */
4447 + u16 auth_offs; /* Authentication start offset */
4448 + u16 crypt_len; /* Cryption data length */
4449 + u16 crypt_offs; /* Cryption start offset */
4450 +#endif
4451 + u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
4452 + u32 crypto_ctx; /* NPE Crypto Param structure address */
4453 +
4454 + /* Used by Host */
4455 + struct ix_sa_ctx *sa_ctx;
4456 + int oper_type;
4457 +};
4458 +
4459 +struct npe_crypt_cont {
4460 + union {
4461 + struct crypt_ctl crypt;
4462 + u8 rev_aes_key[NPE_CTX_LEN];
4463 + } ctl;
4464 + struct npe_crypt_cont *next;
4465 + struct npe_crypt_cont *virt;
4466 + dma_addr_t phys;
4467 +};
4468 +
4469 +struct ix_hash_algo {
4470 + char *name;
4471 + u32 cfgword;
4472 + int digest_len;
4473 + int aad_len;
4474 + unsigned char *icv;
4475 + int type;
4476 +};
4477 +
4478 +struct ix_cipher_algo {
4479 + char *name;
4480 + u32 cfgword_enc;
4481 + u32 cfgword_dec;
4482 + int block_len;
4483 + int iv_len;
4484 + int type;
4485 + int mode;
4486 +};
4487 +
4488 +struct ix_key {
4489 + u8 key[MAX_KEYLEN];
4490 + int len;
4491 +};
4492 +
4493 +struct ix_sa_master {
4494 + struct device *npe_dev;
4495 + struct qm_queue *sendq;
4496 + struct qm_queue *recvq;
4497 + struct dma_pool *dmapool;
4498 + struct npe_crypt_cont *pool;
4499 + int pool_size;
4500 + rwlock_t lock;
4501 +};
4502 +
4503 +struct ix_sa_dir {
4504 + unsigned char *npe_ctx;
4505 + dma_addr_t npe_ctx_phys;
4506 + int npe_ctx_idx;
4507 + u8 npe_mode;
4508 +};
4509 +
4510 +struct ix_sa_ctx {
4511 + struct list_head list;
4512 + struct ix_sa_master *master;
4513 +
4514 + const struct ix_hash_algo *h_algo;
4515 + const struct ix_cipher_algo *c_algo;
4516 + struct ix_key c_key;
4517 + struct ix_key h_key;
4518 +
4519 + int digest_len;
4520 +
4521 + struct ix_sa_dir encrypt;
4522 + struct ix_sa_dir decrypt;
4523 +
4524 + struct npe_crypt_cont *rev_aes;
4525 + gfp_t gfp_flags;
4526 +
4527 + int state;
4528 + void *priv;
4529 +
4530 + void(*reg_cb)(struct ix_sa_ctx*, int);
4531 + void(*perf_cb)(struct ix_sa_ctx*, void*, int);
4532 + atomic_t use_cnt;
4533 +};
4534 +
4535 +const struct ix_hash_algo *ix_hash_by_id(int type);
4536 +const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode);
4537 +
4538 +struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags);
4539 +void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx);
4540 +
4541 +int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
4542 + int datalen, int c_offs, int c_len, int a_offs, int a_len,
4543 + int hmac, char *iv, int encrypt);
4544 +
4545 +int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx,
4546 + const struct ix_cipher_algo *cipher,
4547 + const struct ix_hash_algo *auth, int len);
4548 +
4549 +#endif
4550 Index: linux-2.6.21.7/include/linux/ixp_npe.h
4551 ===================================================================
4552 --- /dev/null
4553 +++ linux-2.6.21.7/include/linux/ixp_npe.h
4554 @@ -0,0 +1,117 @@
4555 +/*
4556 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
4557 + *
4558 + * This file is released under the GPLv2
4559 + */
4560 +
4561 +#ifndef NPE_DEVICE_H
4562 +#define NPE_DEVICE_H
4563 +
4564 +#include <linux/miscdevice.h>
4565 +#include <asm/hardware.h>
4566 +
4567 +#ifdef __ARMEB__
4568 +#undef CONFIG_NPE_ADDRESS_COHERENT
4569 +#else
4570 +#define CONFIG_NPE_ADDRESS_COHERENT
4571 +#endif
4572 +
4573 +#if defined(__ARMEB__) || defined (CONFIG_NPE_ADDRESS_COHERENT)
4574 +#define npe_to_cpu32(x) (x)
4575 +#define npe_to_cpu16(x) (x)
4576 +#define cpu_to_npe32(x) (x)
4577 +#define cpu_to_npe16(x) (x)
4578 +#else
4579 +#error NPE_DATA_COHERENT
4580 +#define NPE_DATA_COHERENT
4581 +#define npe_to_cpu32(x) be32_to_cpu(x)
4582 +#define npe_to_cpu16(x) be16_to_cpu(x)
4583 +#define cpu_to_npe32(x) cpu_to_be32(x)
4584 +#define cpu_to_npe16(x) cpu_to_be16(x)
4585 +#endif
4586 +
4587 +
4588 +struct npe_info {
4589 + struct resource *res;
4590 + void __iomem *addr;
4591 + struct npe_plat_data *plat;
4592 + u8 img_info[4];
4593 + int usage;
4594 + int loaded;
4595 + u32 exec_count;
4596 + u32 ctx_reg2;
4597 +};
4598 +
4599 +
4600 +static inline void npe_reg_write(struct npe_info *npe, u32 reg, u32 val)
4601 +{
4602 + *(volatile u32*)((u8*)(npe->addr) + reg) = val;
4603 +}
4604 +
4605 +static inline u32 npe_reg_read(struct npe_info *npe, u32 reg)
4606 +{
4607 + return *(volatile u32*)((u8*)(npe->addr) + reg);
4608 +}
4609 +
4610 +static inline u32 npe_status(struct npe_info *npe)
4611 +{
4612 + return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCTL);
4613 +}
4614 +
4615 +/* ixNpeDlNpeMgrCommandIssue */
4616 +static inline void npe_write_exctl(struct npe_info *npe, u32 cmd)
4617 +{
4618 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4619 +}
4620 +/* ixNpeDlNpeMgrWriteCommandIssue */
4621 +static inline void
4622 +npe_write_cmd(struct npe_info *npe, u32 addr, u32 data, int cmd)
4623 +{
4624 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXDATA, data);
4625 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
4626 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4627 +}
4628 +/* ixNpeDlNpeMgrReadCommandIssue */
4629 +static inline u32
4630 +npe_read_cmd(struct npe_info *npe, u32 addr, int cmd)
4631 +{
4632 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
4633 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4634 + /* Intel reads the data twice - so do we... */
4635 + npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
4636 + return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
4637 +}
4638 +
4639 +/* ixNpeDlNpeMgrExecAccRegWrite */
4640 +static inline void npe_write_ecs_reg(struct npe_info *npe, u32 addr, u32 data)
4641 +{
4642 + npe_write_cmd(npe, addr, data, IX_NPEDL_EXCTL_CMD_WR_ECS_REG);
4643 +}
4644 +/* ixNpeDlNpeMgrExecAccRegRead */
4645 +static inline u32 npe_read_ecs_reg(struct npe_info *npe, u32 addr)
4646 +{
4647 + return npe_read_cmd(npe, addr, IX_NPEDL_EXCTL_CMD_RD_ECS_REG);
4648 +}
4649 +
4650 +extern void npe_stop(struct npe_info *npe);
4651 +extern void npe_start(struct npe_info *npe);
4652 +extern void npe_reset(struct npe_info *npe);
4653 +
4654 +extern struct device *get_npe_by_id(int id);
4655 +extern void return_npe_dev(struct device *dev);
4656 +
4657 +/* NPE Messages */
4658 +extern int
4659 +npe_mh_status(struct npe_info *npe);
4660 +extern int
4661 +npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp, u8 *macaddr);
4662 +extern int
4663 +npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp);
4664 +extern int
4665 +npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid);
4666 +extern int
4667 +npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp, int enable);
4668 +extern int
4669 +npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys, int reset);
4670 +
4671 +#endif
4672 Index: linux-2.6.21.7/include/linux/ixp_qmgr.h
4673 ===================================================================
4674 --- /dev/null
4675 +++ linux-2.6.21.7/include/linux/ixp_qmgr.h
4676 @@ -0,0 +1,202 @@
4677 +/*
4678 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
4679 + *
4680 + * This file is released under the GPLv2
4681 + */
4682 +
4683 +#ifndef IX_QMGR_H
4684 +#define IX_QMGR_H
4685 +
4686 +#include <linux/skbuff.h>
4687 +#include <linux/list.h>
4688 +#include <linux/if_ether.h>
4689 +#include <linux/spinlock.h>
4690 +#include <linux/platform_device.h>
4691 +#include <linux/ixp_npe.h>
4692 +#include <asm/atomic.h>
4693 +
4694 +/* All offsets are in 32bit words */
4695 +#define QUE_LOW_STAT0 0x100 /* 4x Status of the 32 lower queues 0-31 */
4696 +#define QUE_UO_STAT0 0x104 /* 2x Underflow/Overflow status bits*/
4697 +#define QUE_UPP_STAT0 0x106 /* 2x Status of thew 32 upper queues 32-63 */
4698 +#define INT0_SRC_SELREG0 0x108 /* 4x */
4699 +#define QUE_IE_REG0 0x10c /* 2x */
4700 +#define QUE_INT_REG0 0x10e /* 2x IRQ reg, write 1 to reset IRQ */
4701 +
4702 +#define IX_QMGR_QCFG_BASE 0x800
4703 +#define IX_QMGR_QCFG_SIZE 0x40
4704 +#define IX_QMGR_SRAM_SPACE (IX_QMGR_QCFG_BASE + IX_QMGR_QCFG_SIZE)
4705 +
4706 +#define MAX_QUEUES 32 /* first, we only support the lower 32 queues */
4707 +#define MAX_NPES 3
4708 +
4709 +enum {
4710 + Q_IRQ_ID_E = 0, /* Queue Empty due to last read */
4711 + Q_IRQ_ID_NE, /* Queue Nearly Empty due to last read */
4712 + Q_IRQ_ID_NF, /* Queue Nearly Full due to last write */
4713 + Q_IRQ_ID_F, /* Queue Full due to last write */
4714 + Q_IRQ_ID_NOT_E, /* Queue Not Empty due to last write */
4715 + Q_IRQ_ID_NOT_NE, /* Queue Not Nearly Empty due to last write */
4716 + Q_IRQ_ID_NOT_NF, /* Queue Not Nearly Full due to last read */
4717 + Q_IRQ_ID_NOT_F /* Queue Not Full due to last read */
4718 +};
4719 +
4720 +extern struct qm_queue *request_queue(int qid, int len);
4721 +extern void release_queue(struct qm_queue *queue);
4722 +extern int queue_set_irq_src(struct qm_queue *queue, int flag);
4723 +extern void queue_set_watermarks(struct qm_queue *, unsigned ne, unsigned nf);
4724 +extern int queue_len(struct qm_queue *queue);
4725 +
4726 +struct qm_qmgr;
4727 +struct qm_queue;
4728 +
4729 +typedef void(*queue_cb)(struct qm_queue *);
4730 +
4731 +struct qm_queue {
4732 + int addr; /* word offset from IX_QMGR_SRAM_SPACE */
4733 + int len; /* size in words */
4734 + int id; /* Q Id */
4735 + u32 __iomem *acc_reg;
4736 + struct device *dev;
4737 + atomic_t use;
4738 + queue_cb irq_cb;
4739 + void *cb_data;
4740 +};
4741 +
4742 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4743 +struct eth_ctl {
4744 + u32 next;
4745 + u16 buf_len;
4746 + u16 pkt_len;
4747 + u32 phys_addr;
4748 + u8 dest_id;
4749 + u8 src_id;
4750 + u16 flags;
4751 + u8 qos;
4752 + u8 padlen;
4753 + u16 vlan_tci;
4754 + u8 dest_mac[ETH_ALEN];
4755 + u8 src_mac[ETH_ALEN];
4756 +};
4757 +
4758 +#else
4759 +struct eth_ctl {
4760 + u32 next;
4761 + u16 pkt_len;
4762 + u16 buf_len;
4763 + u32 phys_addr;
4764 + u16 flags;
4765 + u8 src_id;
4766 + u8 dest_id;
4767 + u16 vlan_tci;
4768 + u8 padlen;
4769 + u8 qos;
4770 + u8 dest_mac[ETH_ALEN];
4771 + u8 src_mac[ETH_ALEN];
4772 +};
4773 +#endif
4774 +
4775 +struct npe_cont {
4776 + struct eth_ctl eth;
4777 + void *data;
4778 + struct npe_cont *next;
4779 + struct npe_cont *virt;
4780 + dma_addr_t phys;
4781 +};
4782 +
4783 +struct qm_qmgr {
4784 + u32 __iomem *addr;
4785 + struct resource *res;
4786 + struct qm_queue *queues[MAX_QUEUES];
4787 + rwlock_t lock;
4788 + struct npe_cont *pool;
4789 + struct dma_pool *dmapool;
4790 + int irq;
4791 +};
4792 +
4793 +static inline void queue_write_cfg_reg(struct qm_queue *queue, u32 val)
4794 +{
4795 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4796 + *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id) = val;
4797 +}
4798 +static inline u32 queue_read_cfg_reg(struct qm_queue *queue)
4799 +{
4800 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4801 + return *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
4802 +}
4803 +
4804 +static inline void queue_ack_irq(struct qm_queue *queue)
4805 +{
4806 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4807 + *(qmgr->addr + QUE_INT_REG0) = 1 << queue->id;
4808 +}
4809 +
4810 +static inline void queue_enable_irq(struct qm_queue *queue)
4811 +{
4812 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4813 + *(qmgr->addr + QUE_IE_REG0) |= 1 << queue->id;
4814 +}
4815 +
4816 +static inline void queue_disable_irq(struct qm_queue *queue)
4817 +{
4818 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4819 + *(qmgr->addr + QUE_IE_REG0) &= ~(1 << queue->id);
4820 +}
4821 +
4822 +static inline void queue_put_entry(struct qm_queue *queue, u32 entry)
4823 +{
4824 + *(queue->acc_reg) = npe_to_cpu32(entry);
4825 +}
4826 +
4827 +static inline u32 queue_get_entry(struct qm_queue *queue)
4828 +{
4829 + return cpu_to_npe32(*queue->acc_reg);
4830 +}
4831 +
4832 +static inline struct npe_cont *qmgr_get_cont(struct qm_qmgr *qmgr)
4833 +{
4834 + unsigned long flags;
4835 + struct npe_cont *cont;
4836 +
4837 + if (!qmgr->pool)
4838 + return NULL;
4839 + write_lock_irqsave(&qmgr->lock, flags);
4840 + cont = qmgr->pool;
4841 + qmgr->pool = cont->next;
4842 + write_unlock_irqrestore(&qmgr->lock, flags);
4843 + return cont;
4844 +}
4845 +
4846 +static inline void qmgr_return_cont(struct qm_qmgr *qmgr,struct npe_cont *cont)
4847 +{
4848 + unsigned long flags;
4849 +
4850 + write_lock_irqsave(&qmgr->lock, flags);
4851 + cont->next = qmgr->pool;
4852 + qmgr->pool = cont;
4853 + write_unlock_irqrestore(&qmgr->lock, flags);
4854 +}
4855 +
4856 +static inline int queue_stat(struct qm_queue *queue)
4857 +{
4858 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4859 + u32 reg = *(qmgr->addr + QUE_UO_STAT0 + (queue->id >> 4));
4860 + return (reg >> (queue->id & 0xf) << 1) & 3;
4861 +}
4862 +
4863 +/* Prints the queue state, which is very, very helpfull for debugging */
4864 +static inline void queue_state(struct qm_queue *queue)
4865 +{
4866 + u32 val=0, lstat=0;
4867 + int offs;
4868 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4869 +
4870 + offs = queue->id/8 + QUE_LOW_STAT0;
4871 + val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
4872 + lstat = (*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x0f;
4873 +
4874 + printk("Qid[%02d]: Wptr=%4x, Rptr=%4x, diff=%4x, Stat:%x\n", queue->id,
4875 + val&0x7f, (val>>7) &0x7f, (val - (val >> 7)) & 0x7f, lstat);
4876 +}
4877 +
4878 +#endif
This page took 0.277797 seconds and 5 git commands to generate.