1 diff --git a/Documentation/networking/ixp4xx/IxNpeMicrocode.h b/Documentation/networking/ixp4xx/IxNpeMicrocode.h
4 Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/IxNpeMicrocode.h
5 ===================================================================
6 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
7 +++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/IxNpeMicrocode.h 2007-02-21 02:24:35.000000000 -0800
10 + * IxNpeMicrocode.h - Headerfile for compiling the Intel microcode C file
12 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
14 + * This file is released under the GPLv2
19 + * gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode
21 + * Executing the resulting binary on your build-host creates the
22 + * "NPE-[ABC].xxxxxxxx" files containing the selected microcode
24 + * fetch the IxNpeMicrocode.c from the Intel Access Library.
25 + * It will include this header.
27 + * select Images for every NPE from the following
28 + * (used C++ comments for easy uncommenting ....)
31 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
32 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
33 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
34 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_TSLOT_SWITCH
35 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
36 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
37 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL
38 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_2_PORT
39 +// #define IX_NPEDL_NPEIMAGE_NPEA_DMA
40 +// #define IX_NPEDL_NPEIMAGE_NPEA_ATM_MPHY_12_PORT
41 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_MPHY_1_PORT
42 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_SPHY_1_PORT
43 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0
44 +// #define IX_NPEDL_NPEIMAGE_NPEA_WEP
47 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
48 +//#define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
49 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
50 +// #define IX_NPEDL_NPEIMAGE_NPEB_DMA
51 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
52 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
53 + #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL
56 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
57 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
58 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
59 +// #define IX_NPEDL_NPEIMAGE_NPEC_DMA
60 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_SPAN
61 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_FIREWALL
62 + #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_CCM_ETH
63 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_ETH_LEARN_FILTER_SPAN_FIREWALL
64 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
65 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
66 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL
72 +#include <netinet/in.h>
73 +#include <sys/types.h>
74 +#include <sys/stat.h>
78 +#include <byteswap.h>
81 +#if __BYTE_ORDER == __LITTLE_ENDIAN
82 +#define to_le32(x) (x)
83 +#define to_be32(x) bswap_32(x)
85 +#define to_be32(x) (x)
86 +#define to_le32(x) bswap_32(x)
96 +const unsigned IxNpeMicrocode_array[];
98 +int main(int argc, char *argv[])
100 + struct dl_image *image = (struct dl_image *)IxNpeMicrocode_array;
101 + int imgsiz, i, fd, cnt;
102 + const unsigned *arrayptr = IxNpeMicrocode_array;
103 + const char *names[] = { "IXP425", "IXP465", "unknown" };
107 + if (!strcmp(argv[1], "-le"))
109 + else if (!strcmp(argv[1], "-be"))
112 + printf("Usage: %s <-le|-be>\n", argv[0]);
113 + return EXIT_FAILURE;
117 + for (image = (struct dl_image *)arrayptr, cnt=0;
118 + (image->id != 0xfeedf00d) && (image->magic == 0xfeedf00d);
119 + image = (struct dl_image *)(arrayptr), cnt++)
121 + unsigned char field[4];
122 + imgsiz = image->size + 3;
123 + *(unsigned*)field = to_be32(image->id);
124 + char filename[40], slnk[10];
126 + sprintf(filename, "NPE-%c.%08x", (field[0] & 0xf) + 'A',
128 + sprintf(slnk, "NPE-%c", (field[0] & 0xf) + 'A');
129 + printf("Writing image: %s.NPE_%c Func: %2x Rev: %02x.%02x "
130 + "Size: %5d to: '%s'\n",
131 + names[field[0] >> 4], (field[0] & 0xf) + 'A',
132 + field[1], field[2], field[3], imgsiz*4, filename);
133 + fd = open(filename, O_CREAT | O_RDWR | O_TRUNC, 0644);
135 + for (i=0; i<imgsiz; i++) {
136 + *(unsigned*)field = bigendian ?
137 + to_be32(arrayptr[i]) :
138 + to_le32(arrayptr[i]);
139 + write(fd, field, sizeof(field));
143 + symlink(filename, slnk);
147 + arrayptr += imgsiz;
152 Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/README
153 ===================================================================
154 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
155 +++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/README 2007-02-21 02:24:35.000000000 -0800
157 +Informations about the Networking Driver using the IXP4XX CPU internal NPEs
160 +If this driver is used, the IAL (Intel Access Library) must not be loaded.
161 +However, the IAL may be loaded, if this Modules are unloaded:
162 + ixp4xx_npe.ko, ixp4xx_qmgr.ko ixp4xx_mac.ko
164 +This also means that HW crypto accelleration does NOT work when using this
165 +driver, unless I have finished my crypto driver for NPE-C
168 +Adoption to your custom board:
169 +------------------------------
170 +use "arch/arm/mach-ixp4xx/ixdp425-setup.c" as template:
172 +in "static struct mac_plat_info" adopt the entry "phy_id" to your needs
173 +(Ask your hardware designer about the PHY id)
175 +The order of "&mac0" and "&mac1" in the "struct platform_device"
176 +determines which of them becomes eth0 and eth1
182 + Configure "CONFIG_HOTPLUG" and "CONFIG_FW_LOADER" and configure
183 + IXP4XX_NPE as module.
184 + The default hotplug script will load the Firmware from
185 + /usr/lib/hotplug/firmware/NPE-[ABC]
186 + see Documentation/firmware_class/hotplug-script
188 + You should take care, that $ACTION is "add" and $SUBSYSTEM is "firmware"
189 + to avoid unnessecary calls:
190 + test $ACTION = "remove" -o $SUBSYSTEM != "firmware" && exit
193 + create a char-dev: "mknod /dev/misc/npe c 10 184"
194 + cat the Microcode into it:
195 + cat /usr/lib/hotplug/firmware/NPE-* > /dev/misc/npe
196 + This also works if the driver is linked to the kernel
198 + Having a mix of both (e.g. solution 1 for NPE-B and solution 2 for NPE-C)
199 + is perfectly ok and works.
201 + The state of the NPEs can be seen and changed at:
202 + /sys/bus/platform/devices/ixp4xx_npe.X/state
205 +Obtaining the Microcode:
206 +------------------------
207 +1) IxNpeMicrocode.h in this directory:
208 + Download IPL_IXP400NPELIBRARYWITHCRYPTO-2_1.ZIP from Intel
209 + It unpacks the Microcode IxNpeMicrocode.c
211 + Compile it with "gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode" on your host.
212 + The resulting images can be moved to "/usr/lib/hotplug/firmware"
214 +2) mc_grab.c in this directory:
215 + Compile and execute it either on the host or on the target
216 + to grab the microcode from a binary image like the RedBoot bootloader.
219 Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/mc_grab.c
220 ===================================================================
221 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
222 +++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/mc_grab.c 2007-02-21 02:24:35.000000000 -0800
225 + * mc_grab.c - grabs IXP4XX microcode from a binary datastream
226 + * e.g. The redboot bootloader....
228 + * usage: mc_grab 1010200 2010200 < /dev/mtd/0 > /dev/misc/npe
230 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
232 + * This file is released under the GPLv2
239 +#include <netinet/in.h>
240 +#include <sys/types.h>
241 +#include <sys/stat.h>
248 +static void print_mc_info(unsigned id, int siz)
250 + unsigned char buf[sizeof(unsigned)];
251 + *(unsigned*)buf = id;
253 + const char *names[] = { "IXP425", "IXP465", "unknown" };
255 + idx = (buf[0] >> 4) < 2 ? (buf[0] >> 4) : 2;
257 + fprintf(stderr, "Device: %s:NPE_%c Func: %2x Rev: %02x.%02x "
258 + "Size: %5d bytes ID:%08x\n", names[idx], (buf[0] & 0xf)+'A',
259 + buf[1], buf[2], buf[3], siz*4, ntohl(id));
262 +int main(int argc, char *argv[])
265 + unsigned char buf[sizeof(unsigned)];
266 + unsigned magic = htonl(0xfeedf00d);
267 + unsigned id, my_ids[MAX_IMG+1], siz, sizbe;
268 + int ret=1, verbose=0;
270 + for (i=0, j=0; i<argc-1 && j<MAX_IMG; i++) {
271 + if (!strcmp(argv[i+1], "-v"))
274 + my_ids[j++] = htonl(strtoul(argv[i+1], NULL, 16));
277 + if (my_ids[0] == 0 && !verbose) {
278 + fprintf(stderr, "Usage: %s <-v> [ID1] [ID2] [IDn]\n", argv[0]);
282 + while ((ret=read(0, buf, sizeof(unsigned))) == sizeof(unsigned)) {
283 + if (*(unsigned*)buf != magic)
285 + if ((ret=read(0, buf, sizeof(unsigned))) != sizeof(unsigned) )
287 + id = *(unsigned*)buf;
289 + if (read(0, buf, sizeof(siz)) != sizeof(siz) )
291 + sizbe = *(unsigned*)buf;
292 + siz = ntohl(sizbe);
295 + print_mc_info(id, siz);
297 + for(i=0; my_ids[i]; i++)
298 + if (id == my_ids[i])
304 + print_mc_info(id, siz);
306 + write(1, &magic, sizeof(magic));
307 + write(1, &id, sizeof(id));
308 + write(1, &sizbe, sizeof(sizbe));
309 + for (i=0; i<siz; i++) {
310 + if (read(0, buf, sizeof(unsigned)) != sizeof(unsigned))
312 + write(1, buf, sizeof(unsigned));
318 + fprintf(stderr, "Error reading Microcode\n");
321 Index: linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/common.c
322 ===================================================================
323 --- linux-2.6.21-rc1-arm.orig/arch/arm/mach-ixp4xx/common.c 2007-02-21 02:24:18.000000000 -0800
324 +++ linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/common.c 2007-02-21 02:24:35.000000000 -0800
326 &ixp46x_i2c_controller
329 +static struct npe_plat_data npea = {
331 + .data_size = 0x800,
332 + .inst_size = 0x1000,
336 +static struct npe_plat_data npeb = {
338 + .data_size = 0x800,
339 + .inst_size = 0x800,
343 +static struct npe_plat_data npec = {
345 + .data_size = 0x800,
346 + .inst_size = 0x800,
350 +static struct resource res_npea = {
351 + .start = IXP4XX_NPEA_BASE_PHYS,
352 + .end = IXP4XX_NPEA_BASE_PHYS + 0xfff,
353 + .flags = IORESOURCE_MEM,
356 +static struct resource res_npeb = {
357 + .start = IXP4XX_NPEB_BASE_PHYS,
358 + .end = IXP4XX_NPEB_BASE_PHYS + 0xfff,
359 + .flags = IORESOURCE_MEM,
362 +static struct resource res_npec = {
363 + .start = IXP4XX_NPEC_BASE_PHYS,
364 + .end = IXP4XX_NPEC_BASE_PHYS + 0xfff,
365 + .flags = IORESOURCE_MEM,
368 +static struct platform_device dev_npea = {
369 + .name = "ixp4xx_npe",
371 + .dev.platform_data = &npea,
372 + .num_resources = 1,
373 + .resource = &res_npea,
376 +static struct platform_device dev_npeb = {
377 + .name = "ixp4xx_npe",
379 + .dev.platform_data = &npeb,
380 + .num_resources = 1,
381 + .resource = &res_npeb,
384 +static struct platform_device dev_npec = {
385 + .name = "ixp4xx_npe",
387 + .dev.platform_data = &npec,
388 + .num_resources = 1,
389 + .resource = &res_npec,
393 +static struct resource res_qmgr[] = {
395 + .start = IXP4XX_QMGR_BASE_PHYS,
396 + .end = IXP4XX_QMGR_BASE_PHYS + IXP4XX_QMGR_REGION_SIZE -1,
397 + .flags = IORESOURCE_MEM,
399 + .start = IRQ_IXP4XX_QM1,
400 + .flags = IORESOURCE_IRQ,
403 +static struct platform_device qmgr = {
404 + .name = "ixp4xx_qmgr",
407 + .coherent_dma_mask = DMA_32BIT_MASK,
409 + .num_resources = ARRAY_SIZE(res_qmgr),
410 + .resource = res_qmgr,
413 unsigned long ixp4xx_exp_bus_size;
414 EXPORT_SYMBOL(ixp4xx_exp_bus_size);
420 + npeb.inst_size = 0x1000;
421 + npec.inst_size = 0x1000;
424 + platform_device_register(&qmgr);
426 + if (ix_fuse() & IX_FUSE_NPEA)
427 + platform_device_register(&dev_npea);
428 + if (ix_fuse() & IX_FUSE_NPEB)
429 + platform_device_register(&dev_npeb);
430 + if (ix_fuse() & IX_FUSE_NPEC)
431 + platform_device_register(&dev_npec);
433 printk("IXP4xx: Using %luMiB expansion bus window size\n",
434 ixp4xx_exp_bus_size >> 20);
436 Index: linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/ixdp425-setup.c
437 ===================================================================
438 --- linux-2.6.21-rc1-arm.orig/arch/arm/mach-ixp4xx/ixdp425-setup.c 2007-02-21 02:24:18.000000000 -0800
439 +++ linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/ixdp425-setup.c 2007-02-21 02:24:35.000000000 -0800
440 @@ -101,10 +101,59 @@
441 .resource = ixdp425_uart_resources
445 +static struct resource res_mac0 = {
446 + .start = IXP4XX_EthB_BASE_PHYS,
447 + .end = IXP4XX_EthB_BASE_PHYS + 0x1ff,
448 + .flags = IORESOURCE_MEM,
451 +static struct resource res_mac1 = {
452 + .start = IXP4XX_EthC_BASE_PHYS,
453 + .end = IXP4XX_EthC_BASE_PHYS + 0x1ff,
454 + .flags = IORESOURCE_MEM,
457 +static struct mac_plat_info plat_mac0 = {
466 +static struct mac_plat_info plat_mac1 = {
475 +static struct platform_device mac0 = {
476 + .name = "ixp4xx_mac",
478 + .dev.platform_data = &plat_mac0,
479 + .num_resources = 1,
480 + .resource = &res_mac0,
483 +static struct platform_device mac1 = {
484 + .name = "ixp4xx_mac",
486 + .dev.platform_data = &plat_mac1,
487 + .num_resources = 1,
488 + .resource = &res_mac1,
491 static struct platform_device *ixdp425_devices[] __initdata = {
492 &ixdp425_i2c_controller,
500 static void __init ixdp425_init(void)
501 Index: linux-2.6.21-rc1-arm/drivers/net/Kconfig
502 ===================================================================
503 --- linux-2.6.21-rc1-arm.orig/drivers/net/Kconfig 2007-02-21 02:24:18.000000000 -0800
504 +++ linux-2.6.21-rc1-arm/drivers/net/Kconfig 2007-02-21 02:24:35.000000000 -0800
507 source "drivers/net/arm/Kconfig"
509 +source "drivers/net/ixp4xx/Kconfig"
512 tristate "MACE (Power Mac ethernet) support"
513 depends on NET_ETHERNET && PPC_PMAC && PPC32
514 Index: linux-2.6.21-rc1-arm/drivers/net/Makefile
515 ===================================================================
516 --- linux-2.6.21-rc1-arm.orig/drivers/net/Makefile 2007-02-21 02:24:18.000000000 -0800
517 +++ linux-2.6.21-rc1-arm/drivers/net/Makefile 2007-02-21 02:24:35.000000000 -0800
519 obj-$(CONFIG_IRDA) += irda/
520 obj-$(CONFIG_ETRAX_ETHERNET) += cris/
521 obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
522 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx/
524 obj-$(CONFIG_NETCONSOLE) += netconsole.o
526 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Kconfig
527 ===================================================================
528 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
529 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Kconfig 2007-02-21 02:24:35.000000000 -0800
532 + tristate "IXP4xx Queue Manager support"
533 + depends on ARCH_IXP4XX
534 + depends on NET_ETHERNET
536 + The IXP4XX Queue manager is a configurable hardware ringbuffer.
537 + It is used by the NPEs to exchange data from and to the CPU.
538 + You can either use this OR the Intel Access Library (IAL)
541 + tristate "IXP4xx NPE support"
542 + depends on ARCH_IXP4XX
543 + depends on NET_ETHERNET
545 + The IXP4XX NPE driver supports the 3 CPU co-processors called
546 + "Network Processing Engines" (NPE). It adds support fo downloading
547 + the Microcode (firmware) via Hotplug or character-special-device.
548 + More about this at: Documentation/networking/ixp4xx/README.
549 + You can either use this OR the Intel Access Library (IAL)
551 +config IXP4XX_FW_LOAD
552 + bool "Use Firmware hotplug for Microcode download"
553 + depends on IXP4XX_NPE
557 + The default hotplug script will load the Firmware from
558 + /usr/lib/hotplug/firmware/NPE-[ABC]
559 + see Documentation/firmware_class/hotplug-script
562 + tristate "IXP4xx MAC support"
563 + depends on IXP4XX_NPE
564 + depends on IXP4XX_QMGR
565 + depends on NET_ETHERNET
568 + The IXP4XX MAC driver supports the MACs on the IXP4XX CPUs.
569 + There are 2 on ixp425 and up to 5 on ixdp465.
570 + You can either use this OR the Intel Access Library (IAL)
572 +config IXP4XX_CRYPTO
573 + tristate "IXP4xx crypto support"
574 + depends on IXP4XX_NPE
575 + depends on IXP4XX_QMGR
577 + This driver is a generic NPE-crypto access layer.
578 + You need additional code in OCF for example.
579 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Makefile
580 ===================================================================
581 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
582 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Makefile 2007-02-21 02:24:35.000000000 -0800
584 +obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o
585 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o
586 +obj-$(CONFIG_IXP4XX_MAC) += ixp4xx_mac.o
587 +obj-$(CONFIG_IXP4XX_CRYPTO) += ixp4xx_crypto.o
589 +ixp4xx_npe-objs := ucode_dl.o npe_mh.o npe.o
590 +ixp4xx_mac-objs := mac_driver.o phy.o
591 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_crypto.c
592 ===================================================================
593 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
594 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_crypto.c 2007-02-21 02:24:35.000000000 -0800
597 + * ixp4xx_crypto.c - interface to the HW crypto
599 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
601 + * This file is released under the GPLv2
604 +#include <linux/ixp_qmgr.h>
605 +#include <linux/ixp_npe.h>
606 +#include <linux/dma-mapping.h>
607 +#include <linux/dmapool.h>
608 +#include <linux/device.h>
609 +#include <linux/delay.h>
610 +#include <linux/slab.h>
611 +#include <linux/kernel.h>
612 +#include <linux/ixp_crypto.h>
617 +#define NPE_ID 2 /* NPE C */
619 +#define QUEUE_SIZE 64
620 +#define MY_VERSION "0.0.1"
622 +/* local head for all sa_ctx */
623 +static struct ix_sa_master sa_master;
625 +static const struct ix_hash_algo _hash_algos[] = {
628 + .cfgword = 0xAA010004,
630 + .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
631 + "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
632 + .type = HASH_TYPE_MD5,
635 + .cfgword = 0x00000005,
637 + .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
638 + "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
639 + .type = HASH_TYPE_SHA1,
645 + .type = HASH_TYPE_CBCMAC,
649 +static const struct ix_cipher_algo _cipher_algos[] = {
652 + .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
653 + .cfgword_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
655 + .type = CIPHER_TYPE_DES,
656 + .mode = CIPHER_MODE_ECB,
659 + .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
660 + .cfgword_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
663 + .type = CIPHER_TYPE_DES,
664 + .mode = CIPHER_MODE_CBC,
666 + .name = "3DES ECB",
667 + .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
668 + .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
670 + .type = CIPHER_TYPE_3DES,
671 + .mode = CIPHER_MODE_ECB,
673 + .name = "3DES CBC",
674 + .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_CBC_ENC | KEYLEN_192,
675 + .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_CBC_DEC | KEYLEN_192,
678 + .type = CIPHER_TYPE_3DES,
679 + .mode = CIPHER_MODE_CBC,
682 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_ECB,
683 + .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_ECB,
685 + .type = CIPHER_TYPE_AES,
686 + .mode = CIPHER_MODE_ECB,
689 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CBC_ENC,
690 + .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_CBC_DEC,
693 + .type = CIPHER_TYPE_AES,
694 + .mode = CIPHER_MODE_CBC,
697 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CTR,
698 + .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CTR,
701 + .type = CIPHER_TYPE_AES,
702 + .mode = CIPHER_MODE_CTR,
706 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CCM_ENC,
707 + .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CCM_DEC,
710 + .type = CIPHER_TYPE_AES,
711 + .mode = CIPHER_MODE_CCM,
715 +const struct ix_hash_algo *ix_hash_by_id(int type)
719 + for(i=0; i<ARRAY_SIZE(_hash_algos); i++) {
720 + if (_hash_algos[i].type == type)
721 + return _hash_algos + i;
726 +const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode)
730 + for(i=0; i<ARRAY_SIZE(_cipher_algos); i++) {
731 + if (_cipher_algos[i].type==type && _cipher_algos[i].mode==mode)
732 + return _cipher_algos + i;
737 +static void irqcb_recv(struct qm_queue *queue);
739 +static int init_sa_master(struct ix_sa_master *master)
741 + struct npe_info *npe;
744 + if (! (ix_fuse() & (IX_FUSE_HASH | IX_FUSE_AES | IX_FUSE_DES))) {
745 + printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
748 + memset(master, 0, sizeof(struct ix_sa_master));
749 + master->npe_dev = get_npe_by_id(NPE_ID);
750 + if (! master->npe_dev)
753 + npe = dev_get_drvdata(master->npe_dev);
755 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
756 + switch (npe->img_info[1]) {
758 + printk(KERN_INFO "Crypto AES avaialable\n");
761 + printk(KERN_INFO "Crypto AES and CCM avaialable\n");
764 + printk(KERN_WARNING "Current microcode for %s has no"
765 + " crypto capabilities\n", npe->plat->name);
769 + rwlock_init(&master->lock);
770 + master->dmapool = dma_pool_create("ixp4xx_crypto", master->npe_dev,
771 + sizeof(struct npe_crypt_cont), 32, 0);
772 + if (!master->dmapool) {
776 + master->sendq = request_queue(SEND_QID, QUEUE_SIZE);
777 + if (IS_ERR(master->sendq)) {
778 + printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
780 + ret = PTR_ERR(master->sendq);
783 + master->recvq = request_queue(RECV_QID, QUEUE_SIZE);
784 + if (IS_ERR(master->recvq)) {
785 + printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
787 + ret = PTR_ERR(master->recvq);
788 + release_queue(master->sendq);
792 + master->recvq->irq_cb = irqcb_recv;
793 + queue_set_watermarks(master->recvq, 0, 0);
794 + queue_set_irq_src(master->recvq, Q_IRQ_ID_NOT_E);
795 + queue_enable_irq(master->recvq);
796 + printk(KERN_INFO "ixp4xx_crypto " MY_VERSION " registered successfully\n");
800 + if (master->dmapool)
801 + dma_pool_destroy(master->dmapool);
802 + if (! master->npe_dev)
803 + put_device(master->npe_dev);
808 +static void release_sa_master(struct ix_sa_master *master)
810 + struct npe_crypt_cont *cont;
811 + unsigned long flags;
813 + write_lock_irqsave(&master->lock, flags);
814 + while (master->pool) {
815 + cont = master->pool;
816 + master->pool = cont->next;
817 + dma_pool_free(master->dmapool, cont, cont->phys);
818 + master->pool_size--;
820 + write_unlock_irqrestore(&master->lock, flags);
821 + if (master->pool_size) {
822 + printk(KERN_ERR "ixp4xx_crypto: %d items lost from DMA pool\n",
823 + master->pool_size);
826 + dma_pool_destroy(master->dmapool);
827 + release_queue(master->sendq);
828 + release_queue(master->recvq);
829 + return_npe_dev(master->npe_dev);
832 +static struct npe_crypt_cont *ix_sa_get_cont(struct ix_sa_master *master)
834 + unsigned long flags;
835 + struct npe_crypt_cont *cont;
838 + write_lock_irqsave(&master->lock, flags);
839 + if (!master->pool) {
840 + cont = dma_pool_alloc(master->dmapool, GFP_ATOMIC, &handle);
842 + master->pool_size++;
843 + cont->phys = handle;
847 + cont = master->pool;
848 + master->pool = cont->next;
850 + write_unlock_irqrestore(&master->lock, flags);
855 +ix_sa_return_cont(struct ix_sa_master *master,struct npe_crypt_cont *cont)
857 + unsigned long flags;
859 + write_lock_irqsave(&master->lock, flags);
860 + cont->next = master->pool;
861 + master->pool = cont;
862 + write_unlock_irqrestore(&master->lock, flags);
865 +static void free_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
867 + memset(dir->npe_ctx, 0, NPE_CTX_LEN);
868 + dma_pool_free(sa_ctx->master->dmapool, dir->npe_ctx,
869 + dir->npe_ctx_phys);
872 +static void ix_sa_ctx_destroy(struct ix_sa_ctx *sa_ctx)
874 + BUG_ON(sa_ctx->state != STATE_UNLOADING);
875 + free_sa_dir(sa_ctx, &sa_ctx->encrypt);
876 + free_sa_dir(sa_ctx, &sa_ctx->decrypt);
878 + module_put(THIS_MODULE);
881 +static void recv_pack(struct qm_queue *queue, u32 phys)
883 + struct ix_sa_ctx *sa_ctx;
884 + struct npe_crypt_cont *cr_cont;
885 + struct npe_cont *cont;
888 + failed = phys & 0x1;
891 + cr_cont = dma_to_virt(queue->dev, phys);
892 + cr_cont = cr_cont->virt;
893 + sa_ctx = cr_cont->ctl.crypt.sa_ctx;
895 + phys = npe_to_cpu32(cr_cont->ctl.crypt.src_buf);
897 + cont = dma_to_virt(queue->dev, phys);
902 + if (cr_cont->ctl.crypt.oper_type == OP_PERFORM) {
903 + dma_unmap_single(sa_ctx->master->npe_dev,
904 + cont->eth.phys_addr,
906 + DMA_BIDIRECTIONAL);
907 + if (sa_ctx->perf_cb)
908 + sa_ctx->perf_cb(sa_ctx, cont->data, failed);
909 + qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
910 + ix_sa_return_cont(sa_ctx->master, cr_cont);
911 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
912 + ix_sa_ctx_destroy(sa_ctx);
916 + /* We are registering */
917 + switch (cr_cont->ctl.crypt.mode) {
918 + case NPE_OP_HASH_GEN_ICV:
919 + /* 1 out of 2 HMAC preparation operations completed */
920 + dma_unmap_single(sa_ctx->master->npe_dev,
921 + cont->eth.phys_addr,
925 + qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
927 + case NPE_OP_ENC_GEN_KEY:
928 + memcpy(sa_ctx->decrypt.npe_ctx + sizeof(u32),
929 + sa_ctx->rev_aes->ctl.rev_aes_key + sizeof(u32),
930 + sa_ctx->c_key.len);
931 + /* REV AES data not needed anymore, free it */
932 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
933 + sa_ctx->rev_aes = NULL;
936 + printk(KERN_ERR "Unknown crypt-register mode: %x\n",
937 + cr_cont->ctl.crypt.mode);
940 + if (cr_cont->ctl.crypt.oper_type == OP_REG_DONE) {
941 + if (sa_ctx->state == STATE_UNREGISTERED)
942 + sa_ctx->state = STATE_REGISTERED;
943 + if (sa_ctx->reg_cb)
944 + sa_ctx->reg_cb(sa_ctx, failed);
946 + ix_sa_return_cont(sa_ctx->master, cr_cont);
947 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
948 + ix_sa_ctx_destroy(sa_ctx);
951 +static void irqcb_recv(struct qm_queue *queue)
955 + queue_ack_irq(queue);
956 + while ((phys = queue_get_entry(queue)))
957 + recv_pack(queue, phys);
960 +static int init_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
962 + dir->npe_ctx = dma_pool_alloc(sa_ctx->master->dmapool,
963 + sa_ctx->gfp_flags, &dir->npe_ctx_phys);
964 + if (!dir->npe_ctx) {
967 + memset(dir->npe_ctx, 0, NPE_CTX_LEN);
971 +struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags)
973 + struct ix_sa_ctx *sa_ctx;
974 + struct ix_sa_master *master = &sa_master;
975 + struct npe_info *npe = dev_get_drvdata(master->npe_dev);
977 + /* first check if Microcode was downloaded into this NPE */
978 + if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
979 + printk(KERN_ERR "%s not running\n", npe->plat->name);
982 + switch (npe->img_info[1]) {
987 + /* No crypto Microcode */
990 + if (!try_module_get(THIS_MODULE)) {
994 + sa_ctx = kzalloc(sizeof(struct ix_sa_ctx) + priv_len, flags);
999 + sa_ctx->master = master;
1000 + sa_ctx->gfp_flags = flags;
1002 + if (init_sa_dir(sa_ctx, &sa_ctx->encrypt))
1004 + if (init_sa_dir(sa_ctx, &sa_ctx->decrypt)) {
1005 + free_sa_dir(sa_ctx, &sa_ctx->encrypt);
1009 + sa_ctx->priv = sa_ctx + 1;
1011 + atomic_set(&sa_ctx->use_cnt, 1);
1017 + module_put(THIS_MODULE);
1021 +void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx)
1023 + sa_ctx->state = STATE_UNLOADING;
1024 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
1025 + ix_sa_ctx_destroy(sa_ctx);
1027 + printk("ix_sa_ctx_free -> delayed: %p %d\n",
1028 + sa_ctx, atomic_read(&sa_ctx->use_cnt));
1031 +/* http://www.ietf.org/rfc/rfc2104.txt */
1032 +#define HMAC_IPAD_VALUE 0x36
1033 +#define HMAC_OPAD_VALUE 0x5C
1034 +#define PAD_BLOCKLEN 64
1036 +static int register_chain_var(struct ix_sa_ctx *sa_ctx,
1037 + unsigned char *pad, u32 target, int init_len, u32 ctx_addr, int oper)
1039 + struct npe_crypt_cont *cr_cont;
1040 + struct npe_cont *cont;
1042 + cr_cont = ix_sa_get_cont(sa_ctx->master);
1046 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1047 + cr_cont->ctl.crypt.auth_offs = 0;
1048 + cr_cont->ctl.crypt.auth_len =cpu_to_npe16(PAD_BLOCKLEN);
1049 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(ctx_addr);
1051 + cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
1053 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1058 + cont->eth.next = 0;
1059 + cont->eth.buf_len = cpu_to_npe16(PAD_BLOCKLEN);
1060 + cont->eth.pkt_len = 0;
1062 + cont->eth.phys_addr = cpu_to_npe32(dma_map_single(
1063 + sa_ctx->master->npe_dev, pad, PAD_BLOCKLEN, DMA_TO_DEVICE));
1065 + cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);
1066 + cr_cont->ctl.crypt.oper_type = oper;
1068 + cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(target);
1069 + cr_cont->ctl.crypt.mode = NPE_OP_HASH_GEN_ICV;
1070 + cr_cont->ctl.crypt.init_len = init_len;
1072 + atomic_inc(&sa_ctx->use_cnt);
1073 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1074 + if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */
1075 + atomic_dec(&sa_ctx->use_cnt);
1076 + qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev),
1078 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1085 + * 0 if nothing registered,
1086 + * 1 if something registered and
1089 +static int ix_sa_ctx_setup_auth(struct ix_sa_ctx *sa_ctx,
1090 + const struct ix_hash_algo *algo, int len, int oper, int encrypt)
1092 + unsigned char *ipad, *opad;
1093 + u32 itarget, otarget, ctx_addr;
1094 + unsigned char *cinfo;
1095 + int init_len, i, ret = 0;
1096 + struct qm_qmgr *qmgr;
1097 + struct ix_sa_dir *dir;
1100 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1101 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1103 + qmgr = dev_get_drvdata(sa_ctx->master->sendq->dev);
1105 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1106 + sa_ctx->h_algo = algo;
1109 + dir->npe_mode |= NPE_OP_HMAC_DISABLE;
1112 + if (algo->type == HASH_TYPE_CBCMAC) {
1113 + dir->npe_mode |= NPE_OP_CCM_ENABLE | NPE_OP_HMAC_DISABLE;
1116 + if (sa_ctx->h_key.len > 64 || sa_ctx->h_key.len < algo->digest_len)
1118 + if (len > algo->digest_len || (len % 4))
1121 + len = algo->digest_len;
1123 + sa_ctx->digest_len = len;
1125 + /* write cfg word to cryptinfo */
1126 + cfgword = algo->cfgword | ((len/4) << 8);
1127 + *(u32*)cinfo = cpu_to_be32(cfgword);
1128 + cinfo += sizeof(cfgword);
1130 + /* write ICV to cryptinfo */
1131 + memcpy(cinfo, algo->icv, algo->digest_len);
1132 + cinfo += algo->digest_len;
1134 + itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
1135 + + sizeof(algo->cfgword);
1136 + otarget = itarget + algo->digest_len;
1138 + opad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA);
1142 + ipad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA);
1147 + memcpy(ipad, sa_ctx->h_key.key, sa_ctx->h_key.len);
1148 + memcpy(opad, sa_ctx->h_key.key, sa_ctx->h_key.len);
1149 + for (i = 0; i < PAD_BLOCKLEN; i++) {
1150 + ipad[i] ^= HMAC_IPAD_VALUE;
1151 + opad[i] ^= HMAC_OPAD_VALUE;
1153 + init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
1154 + ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
1156 + dir->npe_ctx_idx += init_len;
1157 + dir->npe_mode |= NPE_OP_HASH_ENABLE;
1160 + dir->npe_mode |= NPE_OP_HASH_VERIFY;
1162 + /* register first chainvar */
1163 + ret = register_chain_var(sa_ctx, opad, otarget,
1164 + init_len, ctx_addr, OP_REGISTER);
1171 + /* register second chainvar */
1172 + ret = register_chain_var(sa_ctx, ipad, itarget,
1173 + init_len, ctx_addr, oper);
1182 +static int gen_rev_aes_key(struct ix_sa_ctx *sa_ctx,
1183 + u32 keylen_cfg, int cipher_op)
1185 + unsigned char *cinfo;
1186 + struct npe_crypt_cont *cr_cont;
1188 + keylen_cfg |= CIPH_ENCR | ALGO_AES | MOD_ECB;
1189 + sa_ctx->rev_aes = ix_sa_get_cont(sa_ctx->master);
1190 + if (!sa_ctx->rev_aes)
1193 + cinfo = sa_ctx->rev_aes->ctl.rev_aes_key;
1194 + *(u32*)cinfo = cpu_to_be32(keylen_cfg);
1195 + cinfo += sizeof(keylen_cfg);
1197 + memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len);
1199 + cr_cont = ix_sa_get_cont(sa_ctx->master);
1201 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
1202 + sa_ctx->rev_aes = NULL;
1205 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1206 + cr_cont->ctl.crypt.oper_type = cipher_op;
1208 + cr_cont->ctl.crypt.crypt_offs = 0;
1209 + cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(AES_BLOCK128);
1210 + cr_cont->ctl.crypt.addr.rev_aes = cpu_to_npe32(
1211 + sa_ctx->rev_aes->phys + sizeof(keylen_cfg));
1213 + cr_cont->ctl.crypt.src_buf = 0;
1214 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(sa_ctx->rev_aes->phys);
1215 + cr_cont->ctl.crypt.mode = NPE_OP_ENC_GEN_KEY;
1216 + cr_cont->ctl.crypt.init_len = sa_ctx->decrypt.npe_ctx_idx;
1218 + atomic_inc(&sa_ctx->use_cnt);
1219 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1220 + if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */
1221 + atomic_dec(&sa_ctx->use_cnt);
1222 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1223 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
1224 + sa_ctx->rev_aes = NULL;
1232 + * 0 if nothing registered,
1233 + * 1 if something registered and
1236 +static int ix_sa_ctx_setup_cipher(struct ix_sa_ctx *sa_ctx,
1237 + const struct ix_cipher_algo *algo, int cipher_op, int encrypt)
1239 + unsigned char *cinfo;
1240 + int keylen, init_len;
1242 + u32 keylen_cfg = 0;
1243 + struct ix_sa_dir *dir;
1245 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1246 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1248 + sa_ctx->c_algo = algo;
1253 + if (algo->type == CIPHER_TYPE_DES && sa_ctx->c_key.len != 8)
1256 + if (algo->type == CIPHER_TYPE_3DES && sa_ctx->c_key.len != 24)
1262 + cipher_cfg = algo->cfgword_enc;
1263 + dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
1265 + cipher_cfg = algo->cfgword_dec;
1267 + if (algo->type == CIPHER_TYPE_AES) {
1268 + switch (sa_ctx->c_key.len) {
1269 + case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
1270 + case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
1271 + case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
1272 + default: return -EINVAL;
1274 + keylen = sa_ctx->c_key.len;
1275 + cipher_cfg |= keylen_cfg;
1278 + /* write cfg word to cryptinfo */
1279 + *(u32*)cinfo = cpu_to_be32(cipher_cfg);
1280 + cinfo += sizeof(cipher_cfg);
1282 + /* write cipher key to cryptinfo */
1283 + memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len);
1286 + init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
1287 + dir->npe_ctx_idx += init_len;
1289 + dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
1291 + if (algo->type == CIPHER_TYPE_AES && !encrypt) {
1292 + return gen_rev_aes_key(sa_ctx, keylen_cfg, cipher_op);
1298 +/* returns 0 on OK, <0 on error and 1 on overflow */
1299 +int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
1300 + int datalen, int c_offs, int c_len, int a_offs, int a_len,
1301 + int hmac, char *iv, int encrypt)
1303 + struct npe_crypt_cont *cr_cont;
1304 + struct npe_cont *cont;
1306 + int ret = -ENOMEM;
1307 + struct ix_sa_dir *dir;
1309 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1311 + if (sa_ctx->state != STATE_REGISTERED)
1314 + cr_cont = ix_sa_get_cont(sa_ctx->master);
1318 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1319 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(dir->npe_ctx_phys);
1320 + cr_cont->ctl.crypt.oper_type = OP_PERFORM;
1321 + cr_cont->ctl.crypt.mode = dir->npe_mode;
1322 + cr_cont->ctl.crypt.init_len = dir->npe_ctx_idx;
1324 + if (sa_ctx->c_algo) {
1325 + cr_cont->ctl.crypt.crypt_offs = cpu_to_npe16(c_offs);
1326 + cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(c_len);
1327 + if (sa_ctx->c_algo->iv_len) {
1332 + memcpy(cr_cont->ctl.crypt.iv, iv,
1333 + sa_ctx->c_algo->iv_len);
1337 + if (sa_ctx->h_algo) {
1338 + /* prepare hashing */
1339 + cr_cont->ctl.crypt.auth_offs = cpu_to_npe16(a_offs);
1340 + cr_cont->ctl.crypt.auth_len = cpu_to_npe16(a_len);
1343 + data_phys = dma_map_single(sa_ctx->master->npe_dev,
1344 + data, datalen, DMA_BIDIRECTIONAL);
1346 + cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(data_phys + hmac);
1348 + /* Prepare the data ptr */
1349 + cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
1355 + cont->eth.next = 0;
1356 + cont->eth.buf_len = cpu_to_npe16(datalen);
1357 + cont->eth.pkt_len = 0;
1359 + cont->eth.phys_addr = cpu_to_npe32(data_phys);
1360 + cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);
1362 + atomic_inc(&sa_ctx->use_cnt);
1363 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1364 + if (queue_stat(sa_ctx->master->sendq) != 2) {
1369 + printk("%s: Overflow\n", __FUNCTION__);
1371 + atomic_dec(&sa_ctx->use_cnt);
1372 + qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev), cont);
1375 + dma_unmap_single(sa_ctx->master->npe_dev, data_phys, datalen,
1376 + DMA_BIDIRECTIONAL);
1378 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1383 +int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx,
1384 + const struct ix_cipher_algo *cipher,
1385 + const struct ix_hash_algo *auth, int len)
1387 + int ret = 0, sum = 0;
1390 + if (sa_ctx->state != STATE_UNREGISTERED)
1393 + atomic_inc(&sa_ctx->use_cnt);
1395 + cipher_op = auth ? OP_REGISTER : OP_REG_DONE;
1396 + if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, OP_REGISTER, 1)) < 0)
1399 + if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, cipher_op, 0)) < 0)
1402 + if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REGISTER, 1)) < 0)
1405 + if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REG_DONE, 0)) < 0)
1409 + /* Nothing registered ?
1410 + * Ok, then we are done and call the callback here.
1413 + if (sa_ctx->state == STATE_UNREGISTERED)
1414 + sa_ctx->state = STATE_REGISTERED;
1415 + if (sa_ctx->reg_cb)
1416 + sa_ctx->reg_cb(sa_ctx, 0);
1419 + atomic_dec(&sa_ctx->use_cnt);
1423 +static int __init init_crypto(void)
1425 + return init_sa_master(&sa_master);
1428 +static void __exit finish_crypto(void)
1430 + release_sa_master(&sa_master);
1433 +MODULE_LICENSE("GPL");
1434 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1436 +EXPORT_SYMBOL(ix_hash_by_id);
1437 +EXPORT_SYMBOL(ix_cipher_by_id);
1439 +EXPORT_SYMBOL(ix_sa_ctx_new);
1440 +EXPORT_SYMBOL(ix_sa_ctx_free);
1441 +EXPORT_SYMBOL(ix_sa_ctx_setup_cipher_auth);
1442 +EXPORT_SYMBOL(ix_sa_crypto_perform);
1444 +module_init(init_crypto);
1445 +module_exit(finish_crypto);
1447 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_qmgr.c
1448 ===================================================================
1449 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
1450 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_qmgr.c 2007-02-21 02:24:35.000000000 -0800
1453 + * qmgr.c - reimplementation of the queue configuration interface.
1455 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1457 + * This file is released under the GPLv2
1460 +#include <linux/kernel.h>
1461 +#include <linux/module.h>
1462 +#include <linux/platform_device.h>
1463 +#include <linux/fs.h>
1464 +#include <linux/init.h>
1465 +#include <linux/slab.h>
1466 +#include <linux/dmapool.h>
1467 +#include <linux/interrupt.h>
1468 +#include <linux/err.h>
1469 +#include <linux/delay.h>
1470 +#include <asm/uaccess.h>
1471 +#include <asm/io.h>
1473 +#include <linux/ixp_qmgr.h>
1474 +#include <linux/ixp_npe.h>
1476 +#define IXQMGR_VERSION "IXP4XX Q Manager 0.2.1"
1478 +static struct device *qmgr_dev = NULL;
1480 +static int poll_freq = 4000;
1481 +static int poll_enable = 0;
1482 +static u32 timer_countup_ticks;
1484 +module_param(poll_freq, int, 0644);
1485 +module_param(poll_enable, int, 0644);
1487 +int queue_len(struct qm_queue *queue)
1489 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1493 + offs = queue->id/8 + QUE_LOW_STAT0;
1494 + val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
1496 + diff = (val - (val >> 7)) & 0x7f;
1498 + /* diff == 0 means either empty or full, must look at STAT0 */
1499 + if ((*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x04)
1500 + diff = queue->len;
1505 +static int request_pool(struct device *dev, int count)
1508 + struct npe_cont *cont;
1509 + struct qm_qmgr *qmgr = dev_get_drvdata(dev);
1510 + dma_addr_t handle;
1512 + for (i=0; i<count; i++) {
1513 + cont = dma_pool_alloc(qmgr->dmapool, GFP_KERNEL, &handle);
1517 + cont->phys = handle;
1518 + cont->virt = cont;
1519 + write_lock(&qmgr->lock);
1520 + cont->next = qmgr->pool;
1521 + qmgr->pool = cont;
1522 + write_unlock(&qmgr->lock);
1527 +static int free_pool(struct device *dev, int count)
1530 + struct npe_cont *cont;
1531 + struct qm_qmgr *qmgr = dev_get_drvdata(dev);
1533 + for (i=0; i<count; i++) {
1534 + write_lock(&qmgr->lock);
1535 + cont = qmgr->pool;
1537 + write_unlock(&qmgr->lock);
1540 + qmgr->pool = cont->next;
1541 + write_unlock(&qmgr->lock);
1542 + dma_pool_free(qmgr->dmapool, cont, cont->phys);
1547 +static int get_free_qspace(struct qm_qmgr *qmgr, int len)
1549 + int words = (qmgr->res->end - qmgr->res->start + 1) / 4 -
1550 + IX_QMGR_SRAM_SPACE;
1553 + for (i=0; i<words; i+=len) {
1554 + for (q=0; q<MAX_QUEUES; q++) {
1555 + struct qm_queue *qu = qmgr->queues[q];
1558 + if ((qu->addr + qu->len > i) && (qu->addr < i + len))
1561 + if (q == MAX_QUEUES) {
1562 + /* we have a free address */
1569 +static inline int _log2(int x)
1578 + * 32bit Config registers at IX_QMGR_QUECONFIG_BASE_OFFSET[Qid]
1579 + * 0 - 6 WRPTR Word offset to baseaddr (index 0 .. BSIZE-1)
1581 + * 14 -21 BADDR baseaddr = (offset to IX_QMGR_QUEBUFFER_SPACE_OFFSET) >> 6
1582 + * 22 -23 ESIZE entrySizeInWords (always 00 because entrySizeInWords==1)
1583 + * 24 -25 BSIZE qSizeInWords 00=16,01=32,10=64,11=128
1584 + * 26 -28 NE nearly empty
1585 + * 29 -31 NF nearly full
1587 +static int conf_q_regs(struct qm_queue *queue)
1589 + int bsize = _log2(queue->len/16);
1590 + int baddr = queue->addr + IX_QMGR_QCFG_SIZE;
1592 + /* +2, because baddr is in words and not in bytes */
1593 + queue_write_cfg_reg(queue, (bsize << 24) | (baddr<<(14-6+2)) );
1598 +static void pmu_timer_restart(void)
1600 + unsigned long flags;
1602 + local_irq_save(flags);
1604 + __asm__(" mcr p14,0,%0,c1,c1,0\n" /* write current counter */
1605 + : : "r" (timer_countup_ticks));
1607 + __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */
1609 + " mcr p14,0,r1,c5,c1,0; " /* clear overflow */
1610 + " mcr p14,0,r1,c4,c1,0\n" /* enable interrupts */
1613 + local_irq_restore(flags);
1616 +static void pmu_timer_init(void)
1618 + u32 controlRegisterMask =
1619 + BIT(0) | /* enable counters */
1620 + BIT(2); /* reset clock counter; */
1623 + * Compute the number of xscale cycles needed between each
1624 + * PMU IRQ. This is done from the result of an OS calibration loop.
1626 + * For 533MHz CPU, 533000000 tick/s / 4000 times/sec = 138250
1627 + * 4000 times/sec = 37 mbufs/interrupt at line rate
1628 + * The pmu timer is reset to -138250 = 0xfffde3f6, to trigger an IRQ
1629 + * when this up counter overflows.
1631 + * The multiplication gives a number of instructions per second.
1632 + * which is close to the processor frequency, and then close to the
1635 + * 2 is the number of instructions per loop
1639 + timer_countup_ticks = - ((loops_per_jiffy * HZ * 2) / poll_freq);
1641 + /* enable the CCNT (clock count) timer from the PMU */
1642 + __asm__(" mcr p14,0,%0,c0,c1,0\n"
1643 + : : "r" (controlRegisterMask));
1646 +static void pmu_timer_disable(void)
1648 + unsigned long flags;
1650 + local_irq_save(flags);
1652 + __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */
1653 + " and r1,r1,#0x1e; "
1654 + " mcr p14,0,r1,c4,c1,0\n" /* disable interrupts */
1656 + local_irq_restore(flags);
1659 +void queue_set_watermarks(struct qm_queue *queue, unsigned ne, unsigned nf)
1662 + /* calculate the register values
1663 + * 0->0, 1->1, 2->2, 4->3, 8->4 16->5...*/
1664 + ne = _log2(ne<<1) & 0x7;
1665 + nf = _log2(nf<<1) & 0x7;
1667 + /* Mask out old watermarks */
1668 + val = queue_read_cfg_reg(queue) & ~0xfc000000;
1669 + queue_write_cfg_reg(queue, val | (ne << 26) | (nf << 29));
1672 +int queue_set_irq_src(struct qm_queue *queue, int flag)
1674 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1676 + int offs, bitoffs;
1678 + /* Q 0-7 are in REG0, 8-15 are in REG1, etc. They occupy 4 bits/Q */
1679 + offs = queue->id/8 + INT0_SRC_SELREG0;
1680 + bitoffs = (queue->id % 8)*4;
1682 + reg = *(qmgr->addr + offs) & ~(0xf << bitoffs);
1683 + *(qmgr->addr + offs) = reg | (flag << bitoffs);
1688 +static irqreturn_t irq_qm1(int irq, void *dev_id)
1690 + struct qm_qmgr *qmgr = dev_id;
1692 + struct qm_queue *queue;
1695 + pmu_timer_restart();
1697 + reg = *(qmgr->addr + QUE_INT_REG0);
1700 + * count leading zeros. "offs" gets
1701 + * the amount of leading 0 in "reg"
1703 + asm ("clz %0, %1;" : "=r"(offs) : "r"(reg));
1705 + reg &= ~(1 << offs);
1706 + queue = qmgr->queues[offs];
1707 + if (likely(queue)) {
1708 + if (likely(queue->irq_cb)) {
1709 + queue->irq_cb(queue);
1711 + printk(KERN_ERR "Missing callback for Q %d\n",
1715 + printk(KERN_ERR "IRQ for unregistered Q %d\n", offs);
1718 + return IRQ_HANDLED;
1721 +struct qm_queue *request_queue(int qid, int len)
1724 + struct qm_qmgr *qmgr;
1725 + struct qm_queue *queue;
1728 + return ERR_PTR(-ENODEV);
1730 + if ((qid < 0) || (qid > MAX_QUEUES))
1731 + return ERR_PTR(-ERANGE);
1738 + default : return ERR_PTR(-EINVAL);
1741 + qmgr = dev_get_drvdata(qmgr_dev);
1743 + if (qmgr->queues[qid]) {
1744 + /* not an error, just in use already */
1747 + if ((ram = get_free_qspace(qmgr, len)) < 0) {
1748 + printk(KERN_ERR "No free SRAM space for this queue\n");
1749 + return ERR_PTR(-ENOMEM);
1751 + if (!(queue = kzalloc(sizeof(struct qm_queue), GFP_KERNEL)))
1752 + return ERR_PTR(-ENOMEM);
1754 + if (!try_module_get(THIS_MODULE)) {
1756 + return ERR_PTR(-ENODEV);
1759 + queue->addr = ram;
1762 + queue->dev = get_device(qmgr_dev);
1763 + queue->acc_reg = qmgr->addr + (4 * qid);
1764 + qmgr->queues[qid] = queue;
1765 + if (request_pool(qmgr_dev, len)) {
1766 + printk(KERN_ERR "Failed to request DMA pool of Q %d\n", qid);
1769 + conf_q_regs(queue);
1773 +void release_queue(struct qm_queue *queue)
1775 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1777 + BUG_ON(qmgr->queues[queue->id] != queue);
1778 + qmgr->queues[queue->id] = NULL;
1780 + if (free_pool(queue->dev, queue->len)) {
1781 + printk(KERN_ERR "Failed to release DMA pool of Q %d\n",
1784 + queue_disable_irq(queue);
1785 + queue_write_cfg_reg(queue, 0);
1787 + module_put(THIS_MODULE);
1788 + put_device(queue->dev);
1795 +static int qmgr_probe(struct platform_device *pdev)
1797 + struct resource *res;
1798 + struct qm_qmgr *qmgr;
1799 + int size, ret=0, i;
1801 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
1804 + if ((i = platform_get_irq(pdev, 0)) < 0)
1807 + if (!(qmgr = kzalloc(sizeof(struct qm_qmgr), GFP_KERNEL)))
1811 + size = res->end - res->start +1;
1812 + qmgr->res = request_mem_region(res->start, size, "ixp_qmgr");
1818 + qmgr->addr = ioremap(res->start, size);
1819 + if (!qmgr->addr) {
1824 + /* Reset Q registers */
1825 + for (i=0; i<4; i++)
1826 + *(qmgr->addr + QUE_LOW_STAT0 +i) = 0x33333333;
1827 + for (i=0; i<10; i++)
1828 + *(qmgr->addr + QUE_UO_STAT0 +i) = 0x0;
1829 + for (i=0; i<4; i++)
1830 + *(qmgr->addr + INT0_SRC_SELREG0 +i) = 0x0;
1831 + for (i=0; i<2; i++) {
1832 + *(qmgr->addr + QUE_IE_REG0 +i) = 0x00;
1833 + *(qmgr->addr + QUE_INT_REG0 +i) = 0xffffffff;
1835 + for (i=0; i<64; i++) {
1836 + *(qmgr->addr + IX_QMGR_QCFG_BASE + i) = 0x0;
1839 + if (poll_enable) {
1841 + qmgr->irq = IRQ_IXP4XX_XSCALE_PMU;
1843 + ret = request_irq(qmgr->irq, irq_qm1, SA_SHIRQ | SA_INTERRUPT,
1846 + printk(KERN_ERR "Failed to request IRQ(%d)\n", qmgr->irq);
1851 + pmu_timer_restart();
1853 + rwlock_init(&qmgr->lock);
1854 + qmgr->dmapool = dma_pool_create("qmgr", &pdev->dev,
1855 + sizeof(struct npe_cont), 32, 0);
1856 + platform_set_drvdata(pdev, qmgr);
1858 + qmgr_dev = &pdev->dev;
1860 + printk(KERN_INFO IXQMGR_VERSION " initialized.\n");
1865 + release_resource(qmgr->res);
1871 +static int qmgr_remove(struct platform_device *pdev)
1873 + struct qm_qmgr *qmgr = platform_get_drvdata(pdev);
1876 + for (i=0; i<MAX_QUEUES; i++) {
1877 + if (qmgr->queues[i]) {
1878 + printk(KERN_ERR "WARNING Unreleased Q: %d\n", i);
1879 + release_queue(qmgr->queues[i]);
1884 + pmu_timer_disable();
1886 + synchronize_irq (qmgr->irq);
1887 + free_irq(qmgr->irq, qmgr);
1889 + dma_pool_destroy(qmgr->dmapool);
1890 + iounmap(qmgr->addr);
1891 + release_resource(qmgr->res);
1892 + platform_set_drvdata(pdev, NULL);
1898 +static struct platform_driver ixp4xx_qmgr = {
1899 + .driver.name = "ixp4xx_qmgr",
1900 + .probe = qmgr_probe,
1901 + .remove = qmgr_remove,
1905 +static int __init init_qmgr(void)
1907 + return platform_driver_register(&ixp4xx_qmgr);
1910 +static void __exit finish_qmgr(void)
1912 + platform_driver_unregister(&ixp4xx_qmgr);
1915 +module_init(init_qmgr);
1916 +module_exit(finish_qmgr);
1918 +MODULE_LICENSE("GPL");
1919 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1921 +EXPORT_SYMBOL(request_queue);
1922 +EXPORT_SYMBOL(release_queue);
1923 +EXPORT_SYMBOL(queue_set_irq_src);
1924 +EXPORT_SYMBOL(queue_set_watermarks);
1925 +EXPORT_SYMBOL(queue_len);
1926 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac.h
1927 ===================================================================
1928 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
1929 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac.h 2007-02-21 02:24:35.000000000 -0800
1932 + * Copyright (C) 2002-2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1934 + * This file is released under the GPLv2
1937 +#include <linux/resource.h>
1938 +#include <linux/netdevice.h>
1939 +#include <linux/io.h>
1940 +#include <linux/mii.h>
1941 +#include <linux/workqueue.h>
1942 +#include <asm/hardware.h>
1943 +#include <linux/ixp_qmgr.h>
1945 +/* 32 bit offsets to be added to u32 *pointers */
1946 +#define MAC_TX_CNTRL1 0x00 // 0x000
1947 +#define MAC_TX_CNTRL2 0x01 // 0x004
1948 +#define MAC_RX_CNTRL1 0x04 // 0x010
1949 +#define MAC_RX_CNTRL2 0x05 // 0x014
1950 +#define MAC_RANDOM_SEED 0x08 // 0x020
1951 +#define MAC_THRESH_P_EMPTY 0x0c // 0x030
1952 +#define MAC_THRESH_P_FULL 0x0e // 0x038
1953 +#define MAC_BUF_SIZE_TX 0x10 // 0x040
1954 +#define MAC_TX_DEFER 0x14 // 0x050
1955 +#define MAC_RX_DEFER 0x15 // 0x054
1956 +#define MAC_TX_TWO_DEFER_1 0x18 // 0x060
1957 +#define MAC_TX_TWO_DEFER_2 0x19 // 0x064
1958 +#define MAC_SLOT_TIME 0x1c // 0x070
1959 +#define MAC_MDIO_CMD 0x20 // 0x080 4 registers 0x20 - 0x23
1960 +#define MAC_MDIO_STS 0x24 // 0x090 4 registers 0x24 - 0x27
1961 +#define MAC_ADDR_MASK 0x28 // 0x0A0 6 registers 0x28 - 0x2d
1962 +#define MAC_ADDR 0x30 // 0x0C0 6 registers 0x30 - 0x35
1963 +#define MAC_INT_CLK_THRESH 0x38 // 0x0E0 1 register
1964 +#define MAC_UNI_ADDR 0x3c // 0x0F0 6 registers 0x3c - 0x41
1965 +#define MAC_CORE_CNTRL 0x7f // 0x1fC
1967 +/* TX Control Register 1*/
1969 +#define TX_CNTRL1_TX_EN BIT(0)
1970 +#define TX_CNTRL1_DUPLEX BIT(1)
1971 +#define TX_CNTRL1_RETRY BIT(2)
1972 +#define TX_CNTRL1_PAD_EN BIT(3)
1973 +#define TX_CNTRL1_FCS_EN BIT(4)
1974 +#define TX_CNTRL1_2DEFER BIT(5)
1975 +#define TX_CNTRL1_RMII BIT(6)
1977 +/* TX Control Register 2 */
1978 +#define TX_CNTRL2_RETRIES_MASK 0xf
1980 +/* RX Control Register 1 */
1981 +#define RX_CNTRL1_RX_EN BIT(0)
1982 +#define RX_CNTRL1_PADSTRIP_EN BIT(1)
1983 +#define RX_CNTRL1_CRC_EN BIT(2)
1984 +#define RX_CNTRL1_PAUSE_EN BIT(3)
1985 +#define RX_CNTRL1_LOOP_EN BIT(4)
1986 +#define RX_CNTRL1_ADDR_FLTR_EN BIT(5)
1987 +#define RX_CNTRL1_RX_RUNT_EN BIT(6)
1988 +#define RX_CNTRL1_BCAST_DIS BIT(7)
1990 +/* RX Control Register 2 */
1991 +#define RX_CNTRL2_DEFER_EN BIT(0)
1993 +/* Core Control Register */
1994 +#define CORE_RESET BIT(0)
1995 +#define CORE_RX_FIFO_FLUSH BIT(1)
1996 +#define CORE_TX_FIFO_FLUSH BIT(2)
1997 +#define CORE_SEND_JAM BIT(3)
1998 +#define CORE_MDC_EN BIT(4)
2000 +/* Definitions for MII access routines*/
2002 +#define MII_REG_SHL 16
2003 +#define MII_ADDR_SHL 21
2005 +#define MII_GO BIT(31)
2006 +#define MII_WRITE BIT(26)
2007 +#define MII_READ_FAIL BIT(31)
2009 +#define MII_TIMEOUT_10TH_SECS 5
2010 +#define MII_10TH_SEC_IN_MILLIS 100
2018 +#define MAC_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
2020 +#define MAC_TX_CNTRL1_DEFAULT (\
2021 + TX_CNTRL1_TX_EN | \
2022 + TX_CNTRL1_RETRY | \
2023 + TX_CNTRL1_FCS_EN | \
2024 + TX_CNTRL1_2DEFER | \
2025 + TX_CNTRL1_PAD_EN )
2027 +#define MAC_TX_MAX_RETRIES_DEFAULT 0x0f
2029 +#define MAC_RX_CNTRL1_DEFAULT ( \
2030 + RX_CNTRL1_PADSTRIP_EN | \
2031 + RX_CNTRL1_CRC_EN | \
2034 +#define MAC_RX_CNTRL2_DEFAULT 0x0
2035 +#define MAC_TX_CNTRL2_DEFAULT TX_CNTRL2_RETRIES_MASK
2037 +/* Thresholds determined by NPE firmware FS */
2038 +#define MAC_THRESH_P_EMPTY_DEFAULT 0x12
2039 +#define MAC_THRESH_P_FULL_DEFAULT 0x30
2041 +/* Number of bytes that must be in the tx fifo before
2042 + * transmission commences */
2043 +#define MAC_BUF_SIZE_TX_DEFAULT 0x8
2045 +/* One-part deferral values */
2046 +#define MAC_TX_DEFER_DEFAULT 0x15
2047 +#define MAC_RX_DEFER_DEFAULT 0x16
2049 +/* Two-part deferral values... */
2050 +#define MAC_TX_TWO_DEFER_1_DEFAULT 0x08
2051 +#define MAC_TX_TWO_DEFER_2_DEFAULT 0x07
2053 +/* This value applies to MII */
2054 +#define MAC_SLOT_TIME_DEFAULT 0x80
2056 +/* This value applies to RMII */
2057 +#define MAC_SLOT_TIME_RMII_DEFAULT 0xFF
2059 +#define MAC_ADDR_MASK_DEFAULT 0xFF
2061 +#define MAC_INT_CLK_THRESH_DEFAULT 0x1
2062 +/* The following is a value chosen at random */
2063 +#define MAC_RANDOM_SEED_DEFAULT 0x8
2065 +/* By default we must configure the MAC to generate the MDC clock*/
2066 +#define CORE_DEFAULT (CORE_MDC_EN)
2068 +/* End of Intel provided register information */
2071 +mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg);
2073 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val);
2074 +extern void init_mdio(struct net_device *dev, int phy_id);
2077 + u32 __iomem *addr;
2078 + struct resource *res;
2079 + struct device *npe_dev;
2080 + struct net_device *netdev;
2081 + struct qm_qmgr *qmgr;
2082 + struct qm_queue *rxq;
2083 + struct qm_queue *txq;
2084 + struct qm_queue *rxdoneq;
2086 + struct net_device_stats stat;
2087 + struct mii_if_info mii;
2088 + struct delayed_work mdio_thread;
2092 + struct mac_plat_info *plat;
2094 + spinlock_t rx_lock;
2098 +static inline void mac_write_reg(struct mac_info *mac, int offset, u32 val)
2100 + *(mac->addr + offset) = val;
2102 +static inline u32 mac_read_reg(struct mac_info *mac, int offset)
2104 + return *(mac->addr + offset);
2106 +static inline void mac_set_regbit(struct mac_info *mac, int offset, u32 bit)
2108 + mac_write_reg(mac, offset, mac_read_reg(mac, offset) | bit);
2110 +static inline void mac_reset_regbit(struct mac_info *mac, int offset, u32 bit)
2112 + mac_write_reg(mac, offset, mac_read_reg(mac, offset) & ~bit);
2115 +static inline void mac_mdio_cmd_write(struct mac_info *mac, u32 cmd)
2118 + for(i=0; i<4; i++) {
2119 + mac_write_reg(mac, MAC_MDIO_CMD + i, cmd & 0xff);
2124 +#define mac_mdio_cmd_read(mac) mac_mdio_read((mac), MAC_MDIO_CMD)
2125 +#define mac_mdio_status_read(mac) mac_mdio_read((mac), MAC_MDIO_STS)
2126 +static inline u32 mac_mdio_read(struct mac_info *mac, int offset)
2130 + for(i=0; i<4; i++) {
2131 + data |= (mac_read_reg(mac, offset + i) & 0xff) << (i*8);
2136 +static inline u32 mdio_cmd(int phy_addr, int phy_reg)
2138 + return phy_addr << MII_ADDR_SHL |
2139 + phy_reg << MII_REG_SHL |
2143 +#define MAC_REG_LIST { \
2144 + MAC_TX_CNTRL1, MAC_TX_CNTRL2, \
2145 + MAC_RX_CNTRL1, MAC_RX_CNTRL2, \
2146 + MAC_RANDOM_SEED, MAC_THRESH_P_EMPTY, MAC_THRESH_P_FULL, \
2147 + MAC_BUF_SIZE_TX, MAC_TX_DEFER, MAC_RX_DEFER, \
2148 + MAC_TX_TWO_DEFER_1, MAC_TX_TWO_DEFER_2, MAC_SLOT_TIME, \
2149 + MAC_ADDR_MASK +0, MAC_ADDR_MASK +1, MAC_ADDR_MASK +2, \
2150 + MAC_ADDR_MASK +3, MAC_ADDR_MASK +4, MAC_ADDR_MASK +5, \
2151 + MAC_ADDR +0, MAC_ADDR +1, MAC_ADDR +2, \
2152 + MAC_ADDR +3, MAC_ADDR +4, MAC_ADDR +5, \
2153 + MAC_INT_CLK_THRESH, \
2154 + MAC_UNI_ADDR +0, MAC_UNI_ADDR +1, MAC_UNI_ADDR +2, \
2155 + MAC_UNI_ADDR +3, MAC_UNI_ADDR +4, MAC_UNI_ADDR +5, \
2159 +#define NPE_STAT_NUM 34
2160 +#define NPE_STAT_NUM_BASE 22
2161 +#define NPE_Q_STAT_NUM 4
2163 +#define NPE_Q_STAT_STRINGS \
2164 + {"RX ready to use queue len "}, \
2165 + {"RX received queue len "}, \
2166 + {"TX to be send queue len "}, \
2167 + {"TX done queue len "},
2169 +#define NPE_STAT_STRINGS \
2170 + {"StatsAlignmentErrors "}, \
2171 + {"StatsFCSErrors "}, \
2172 + {"StatsInternalMacReceiveErrors "}, \
2173 + {"RxOverrunDiscards "}, \
2174 + {"RxLearnedEntryDiscards "}, \
2175 + {"RxLargeFramesDiscards "}, \
2176 + {"RxSTPBlockedDiscards "}, \
2177 + {"RxVLANTypeFilterDiscards "}, \
2178 + {"RxVLANIdFilterDiscards "}, \
2179 + {"RxInvalidSourceDiscards "}, \
2180 + {"RxBlackListDiscards "}, \
2181 + {"RxWhiteListDiscards "}, \
2182 + {"RxUnderflowEntryDiscards "}, \
2183 + {"StatsSingleCollisionFrames "}, \
2184 + {"StatsMultipleCollisionFrames "}, \
2185 + {"StatsDeferredTransmissions "}, \
2186 + {"StatsLateCollisions "}, \
2187 + {"StatsExcessiveCollsions "}, \
2188 + {"StatsInternalMacTransmitErrors"}, \
2189 + {"StatsCarrierSenseErrors "}, \
2190 + {"TxLargeFrameDiscards "}, \
2191 + {"TxVLANIdFilterDiscards "}, \
2193 + {"RxValidFramesTotalOctets "}, \
2194 + {"RxUcastPkts "}, \
2195 + {"RxBcastPkts "}, \
2196 + {"RxMcastPkts "}, \
2197 + {"RxPkts64Octets "}, \
2198 + {"RxPkts65to127Octets "}, \
2199 + {"RxPkts128to255Octets "}, \
2200 + {"RxPkts256to511Octets "}, \
2201 + {"RxPkts512to1023Octets "}, \
2202 + {"RxPkts1024to1518Octets "}, \
2203 + {"RxInternalNPEReceiveErrors "}, \
2204 + {"TxInternalNPETransmitErrors "}
2206 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac_driver.c
2207 ===================================================================
2208 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
2209 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac_driver.c 2007-02-21 02:24:46.000000000 -0800
2212 + * mac_driver.c - provide a network interface for each MAC
2214 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
2216 + * This file is released under the GPLv2
2219 +#include <linux/kernel.h>
2220 +#include <linux/module.h>
2221 +#include <linux/platform_device.h>
2222 +#include <linux/netdevice.h>
2223 +#include <linux/etherdevice.h>
2224 +#include <linux/ethtool.h>
2225 +#include <linux/slab.h>
2226 +#include <linux/delay.h>
2227 +#include <linux/err.h>
2228 +#include <linux/dma-mapping.h>
2229 +#include <linux/workqueue.h>
2230 +#include <asm/io.h>
2231 +#include <asm/irq.h>
2234 +#include <linux/ixp_qmgr.h>
2235 +#include <linux/ixp_npe.h>
2238 +#define MDIO_INTERVAL (3*HZ)
2239 +#define RX_QUEUE_PREFILL 64
2240 +#define TX_QUEUE_PREFILL 16
2242 +#define IXMAC_NAME "ixp4xx_mac"
2243 +#define IXMAC_VERSION "0.3.1"
2245 +#define MAC_DEFAULT_REG(mac, name) \
2246 + mac_write_reg(mac, MAC_ ## name, MAC_ ## name ## _DEFAULT)
2248 +#define TX_DONE_QID 31
2250 +#define DMA_ALLOC_SIZE 2048
2251 +#define DMA_HDR_SIZE (sizeof(struct npe_cont))
2252 +#define DMA_BUF_SIZE (DMA_ALLOC_SIZE - DMA_HDR_SIZE)
2254 +/* Since the NPEs use 1 Return Q for sent frames, we need a device
2255 + * independent return Q. We call it tx_doneq.
2256 + * It will be initialized during module load and uninitialized
2257 + * during module unload. Evil hack, but there is no choice :-(
2260 +static struct qm_queue *tx_doneq = NULL;
2261 +static int debug = -1;
2262 +module_param(debug, int, 0);
2264 +static int init_buffer(struct qm_queue *queue, int count)
2267 + struct npe_cont *cont;
2269 + for (i=0; i<count; i++) {
2270 + cont = kmalloc(DMA_ALLOC_SIZE, GFP_KERNEL | GFP_DMA);
2274 + cont->phys = dma_map_single(queue->dev, cont, DMA_ALLOC_SIZE,
2275 + DMA_BIDIRECTIONAL);
2276 + if (dma_mapping_error(cont->phys))
2279 + cont->data = cont+1;
2280 + /* now the buffer is on a 32 bit boundary.
2281 + * we add 2 bytes for good alignment to SKB */
2283 + cont->eth.next = 0;
2284 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2285 + cont->eth.pkt_len = 0;
2286 + /* also add 2 alignment bytes from cont->data*/
2287 + cont->eth.phys_addr = cpu_to_npe32(cont->phys+ DMA_HDR_SIZE+ 2);
2289 + dma_sync_single(queue->dev, cont->phys, DMA_HDR_SIZE,
2292 + queue_put_entry(queue, cont->phys);
2293 + if (queue_stat(queue) == 2) { /* overflow */
2294 + dma_unmap_single(queue->dev, cont->phys, DMA_ALLOC_SIZE,
2295 + DMA_BIDIRECTIONAL);
2306 +static int destroy_buffer(struct qm_queue *queue, int count)
2310 + struct npe_cont *cont;
2312 + for (i=0; i<count; i++) {
2313 + phys = queue_get_entry(queue) & ~0xf;
2316 + dma_unmap_single(queue->dev, phys, DMA_ALLOC_SIZE,
2317 + DMA_BIDIRECTIONAL);
2318 + cont = dma_to_virt(queue->dev, phys);
2324 +static void mac_init(struct mac_info *mac)
2326 + MAC_DEFAULT_REG(mac, TX_CNTRL2);
2327 + MAC_DEFAULT_REG(mac, RANDOM_SEED);
2328 + MAC_DEFAULT_REG(mac, THRESH_P_EMPTY);
2329 + MAC_DEFAULT_REG(mac, THRESH_P_FULL);
2330 + MAC_DEFAULT_REG(mac, TX_DEFER);
2331 + MAC_DEFAULT_REG(mac, TX_TWO_DEFER_1);
2332 + MAC_DEFAULT_REG(mac, TX_TWO_DEFER_2);
2333 + MAC_DEFAULT_REG(mac, SLOT_TIME);
2334 + MAC_DEFAULT_REG(mac, INT_CLK_THRESH);
2335 + MAC_DEFAULT_REG(mac, BUF_SIZE_TX);
2336 + MAC_DEFAULT_REG(mac, TX_CNTRL1);
2337 + MAC_DEFAULT_REG(mac, RX_CNTRL1);
2340 +static void mac_set_uniaddr(struct net_device *dev)
2343 + struct mac_info *mac = netdev_priv(dev);
2344 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2346 + /* check for multicast */
2347 + if (dev->dev_addr[0] & 1)
2350 + npe_mh_setportaddr(npe, mac->plat, dev->dev_addr);
2351 + npe_mh_disable_firewall(npe, mac->plat);
2352 + for (i=0; i<dev->addr_len; i++)
2353 + mac_write_reg(mac, MAC_UNI_ADDR + i, dev->dev_addr[i]);
2356 +static void update_duplex_mode(struct net_device *dev)
2358 + struct mac_info *mac = netdev_priv(dev);
2359 + if (netif_msg_link(mac)) {
2360 + printk(KERN_DEBUG "Link of %s is %s-duplex\n", dev->name,
2361 + mac->mii.full_duplex ? "full" : "half");
2363 + if (mac->mii.full_duplex) {
2364 + mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
2366 + mac_set_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
2370 +static int media_check(struct net_device *dev, int init)
2372 + struct mac_info *mac = netdev_priv(dev);
2374 + if (mii_check_media(&mac->mii, netif_msg_link(mac), init)) {
2375 + update_duplex_mode(dev);
2381 +static void get_npe_stats(struct mac_info *mac, u32 *buf, int len, int reset)
2383 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2386 + memset(buf, len, 0);
2387 + phys = dma_map_single(mac->npe_dev, buf, len, DMA_BIDIRECTIONAL);
2388 + npe_mh_get_stats(npe, mac->plat, phys, reset);
2389 + dma_unmap_single(mac->npe_dev, phys, len, DMA_BIDIRECTIONAL);
2392 +static void irqcb_recv(struct qm_queue *queue)
2394 + struct net_device *dev = queue->cb_data;
2396 + queue_ack_irq(queue);
2397 + queue_disable_irq(queue);
2398 + if (netif_running(dev))
2399 + netif_rx_schedule(dev);
2402 +int ix_recv(struct net_device *dev, int *budget, struct qm_queue *queue)
2404 + struct mac_info *mac = netdev_priv(dev);
2405 + struct sk_buff *skb;
2407 + struct npe_cont *cont;
2409 + while (*budget > 0 && netif_running(dev) ) {
2411 + phys = queue_get_entry(queue) & ~0xf;
2414 + dma_sync_single(queue->dev, phys, DMA_HDR_SIZE,
2416 + cont = dma_to_virt(queue->dev, phys);
2417 + len = npe_to_cpu16(cont->eth.pkt_len) -4; /* strip FCS */
2419 + if (unlikely(netif_msg_rx_status(mac))) {
2420 + printk(KERN_DEBUG "%s: RX packet size: %u\n",
2422 + queue_state(mac->rxq);
2423 + queue_state(mac->rxdoneq);
2425 + skb = dev_alloc_skb(len + 2);
2426 + if (likely(skb)) {
2428 + skb_reserve(skb, 2);
2429 + dma_sync_single(queue->dev, cont->eth.phys_addr, len,
2431 +#ifdef CONFIG_NPE_ADDRESS_COHERENT
2432 + /* swap the payload of the SKB */
2434 + u32 *t = (u32*)(skb->data-2);
2435 + u32 *s = (u32*)(cont->data-2);
2436 + int i, j = (len+5)/4;
2437 + for (i=0; i<j; i++)
2438 + t[i] = cpu_to_be32(s[i]);
2441 + eth_copy_and_sum(skb, cont->data, len, 0);
2443 + skb_put(skb, len);
2444 + skb->protocol = eth_type_trans(skb, dev);
2445 + dev->last_rx = jiffies;
2446 + netif_receive_skb(skb);
2447 + mac->stat.rx_packets++;
2448 + mac->stat.rx_bytes += skb->len;
2450 + mac->stat.rx_dropped++;
2452 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2453 + cont->eth.pkt_len = 0;
2454 + dma_sync_single(queue->dev, phys, DMA_HDR_SIZE, DMA_TO_DEVICE);
2455 + queue_put_entry(mac->rxq, phys);
2463 +static int ix_poll(struct net_device *dev, int *budget)
2465 + struct mac_info *mac = netdev_priv(dev);
2466 + struct qm_queue *queue = mac->rxdoneq;
2469 + if (ix_recv(dev, budget, queue))
2471 + netif_rx_complete(dev);
2472 + queue_enable_irq(queue);
2473 + if (!queue_len(queue))
2475 + queue_disable_irq(queue);
2476 + if (netif_rx_reschedule(dev, 0))
2482 +static void ixmac_set_rx_mode (struct net_device *dev)
2484 + struct mac_info *mac = netdev_priv(dev);
2485 + struct dev_mc_list *mclist;
2486 + u8 aset[dev->addr_len], aclear[dev->addr_len];
2489 + if (dev->flags & IFF_PROMISC) {
2490 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
2492 + mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
2494 + mclist = dev->mc_list;
2495 + memset(aset, 0xff, dev->addr_len);
2496 + memset(aclear, 0x00, dev->addr_len);
2497 + for (i = 0; mclist && i < dev->mc_count; i++) {
2498 + for (j=0; j< dev->addr_len; j++) {
2499 + aset[j] &= mclist->dmi_addr[j];
2500 + aclear[j] |= mclist->dmi_addr[j];
2502 + mclist = mclist->next;
2504 + for (j=0; j< dev->addr_len; j++) {
2505 + aclear[j] = aset[j] | ~aclear[j];
2507 + for (i=0; i<dev->addr_len; i++) {
2508 + mac_write_reg(mac, MAC_ADDR + i, aset[i]);
2509 + mac_write_reg(mac, MAC_ADDR_MASK + i, aclear[i]);
2514 +static int ixmac_open (struct net_device *dev)
2516 + struct mac_info *mac = netdev_priv(dev);
2517 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2518 + u32 buf[NPE_STAT_NUM];
2522 + /* first check if the NPE is up and running */
2523 + if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
2524 + printk(KERN_ERR "%s: %s not running\n", dev->name,
2528 + if (npe_mh_status(npe)) {
2529 + printk(KERN_ERR "%s: %s not responding\n", dev->name,
2533 + mac->txq_pkt += init_buffer(mac->txq, TX_QUEUE_PREFILL - mac->txq_pkt);
2534 + mac->rxq_pkt += init_buffer(mac->rxq, RX_QUEUE_PREFILL - mac->rxq_pkt);
2536 + queue_enable_irq(mac->rxdoneq);
2538 + /* drain all buffers from then RX-done-q to make the IRQ happen */
2539 + while ((phys = queue_get_entry(mac->rxdoneq) & ~0xf)) {
2540 + struct npe_cont *cont;
2541 + cont = dma_to_virt(mac->rxdoneq->dev, phys);
2542 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2543 + cont->eth.pkt_len = 0;
2544 + dma_sync_single(mac->rxdoneq->dev, phys, DMA_HDR_SIZE,
2546 + queue_put_entry(mac->rxq, phys);
2549 + npe_mh_set_rxqid(npe, mac->plat, mac->plat->rxdoneq_id);
2550 + get_npe_stats(mac, buf, sizeof(buf), 1); /* reset stats */
2551 + get_npe_stats(mac, buf, sizeof(buf), 0);
2553 + * if the extended stats contain random values
2554 + * the NPE image lacks extendet statistic counters
2556 + for (i=NPE_STAT_NUM_BASE; i<NPE_STAT_NUM; i++) {
2557 + if (buf[i] >10000)
2560 + mac->npe_stat_num = i<NPE_STAT_NUM ? NPE_STAT_NUM_BASE : NPE_STAT_NUM;
2561 + mac->npe_stat_num += NPE_Q_STAT_NUM;
2563 + mac_set_uniaddr(dev);
2564 + media_check(dev, 1);
2565 + ixmac_set_rx_mode(dev);
2566 + netif_start_queue(dev);
2567 + schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
2568 + if (netif_msg_ifup(mac)) {
2569 + printk(KERN_DEBUG "%s: open " IXMAC_NAME
2570 + " RX queue %d bufs, TX queue %d bufs\n",
2571 + dev->name, mac->rxq_pkt, mac->txq_pkt);
2576 +static int ixmac_start_xmit (struct sk_buff *skb, struct net_device *dev)
2578 + struct mac_info *mac = netdev_priv(dev);
2579 + struct npe_cont *cont;
2581 + struct qm_queue *queue = mac->txq;
2583 + if (unlikely(skb->len > DMA_BUF_SIZE)) {
2584 + dev_kfree_skb(skb);
2585 + mac->stat.tx_errors++;
2586 + return NETDEV_TX_OK;
2588 + phys = queue_get_entry(tx_doneq) & ~0xf;
2591 + cont = dma_to_virt(queue->dev, phys);
2592 +#ifdef CONFIG_NPE_ADDRESS_COHERENT
2593 + /* swap the payload of the SKB */
2595 + u32 *s = (u32*)(skb->data-2);
2596 + u32 *t = (u32*)(cont->data-2);
2597 + int i,j = (skb->len+5) / 4;
2598 + for (i=0; i<j; i++)
2599 + t[i] = cpu_to_be32(s[i]);
2602 + //skb_copy_and_csum_dev(skb, cont->data);
2603 + memcpy(cont->data, skb->data, skb->len);
2605 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2606 + cont->eth.pkt_len = cpu_to_npe16(skb->len);
2607 + /* disable VLAN functions in NPE image for now */
2608 + cont->eth.flags = 0;
2609 + dma_sync_single(queue->dev, phys, skb->len + DMA_HDR_SIZE,
2611 + queue_put_entry(queue, phys);
2612 + if (queue_stat(queue) == 2) { /* overflow */
2613 + queue_put_entry(tx_doneq, phys);
2616 + dev_kfree_skb(skb);
2618 + mac->stat.tx_packets++;
2619 + mac->stat.tx_bytes += skb->len;
2620 + dev->trans_start = jiffies;
2621 + if (netif_msg_tx_queued(mac)) {
2622 + printk(KERN_DEBUG "%s: TX packet size %u\n",
2623 + dev->name, skb->len);
2624 + queue_state(mac->txq);
2625 + queue_state(tx_doneq);
2627 + return NETDEV_TX_OK;
2629 + return NETDEV_TX_BUSY;
2632 +static int ixmac_close (struct net_device *dev)
2634 + struct mac_info *mac = netdev_priv(dev);
2636 + netif_stop_queue (dev);
2637 + queue_disable_irq(mac->rxdoneq);
2639 + mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
2640 + mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
2642 + cancel_rearming_delayed_work(&(mac->mdio_thread));
2644 + if (netif_msg_ifdown(mac)) {
2645 + printk(KERN_DEBUG "%s: close " IXMAC_NAME
2646 + " RX queue %d bufs, TX queue %d bufs\n",
2647 + dev->name, mac->rxq_pkt, mac->txq_pkt);
2652 +static int ixmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2654 + struct mac_info *mac = netdev_priv(dev);
2655 + int rc, duplex_changed;
2657 + if (!netif_running(dev))
2659 + if (!try_module_get(THIS_MODULE))
2661 + rc = generic_mii_ioctl(&mac->mii, if_mii(rq), cmd, &duplex_changed);
2662 + module_put(THIS_MODULE);
2663 + if (duplex_changed)
2664 + update_duplex_mode(dev);
2668 +static struct net_device_stats *ixmac_stats (struct net_device *dev)
2670 + struct mac_info *mac = netdev_priv(dev);
2671 + return &mac->stat;
2674 +static void ixmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2676 + struct mac_info *mac = netdev_priv(dev);
2677 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2679 + strcpy(info->driver, IXMAC_NAME);
2680 + strcpy(info->version, IXMAC_VERSION);
2681 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2682 + snprintf(info->fw_version, 32, "%d.%d func [%d]",
2683 + npe->img_info[2], npe->img_info[3], npe->img_info[1]);
2685 + strncpy(info->bus_info, npe->plat->name, ETHTOOL_BUSINFO_LEN);
2688 +static int ixmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2690 + struct mac_info *mac = netdev_priv(dev);
2691 + mii_ethtool_gset(&mac->mii, cmd);
2695 +static int ixmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2697 + struct mac_info *mac = netdev_priv(dev);
2699 + rc = mii_ethtool_sset(&mac->mii, cmd);
2703 +static int ixmac_nway_reset(struct net_device *dev)
2705 + struct mac_info *mac = netdev_priv(dev);
2706 + return mii_nway_restart(&mac->mii);
2709 +static u32 ixmac_get_link(struct net_device *dev)
2711 + struct mac_info *mac = netdev_priv(dev);
2712 + return mii_link_ok(&mac->mii);
2715 +static const int mac_reg_list[] = MAC_REG_LIST;
2717 +static int ixmac_get_regs_len(struct net_device *dev)
2719 + return ARRAY_SIZE(mac_reg_list);
2723 +ixmac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
2726 + struct mac_info *mac = netdev_priv(dev);
2729 + for (i=0; i<regs->len; i++) {
2730 + buf[i] = mac_read_reg(mac, mac_reg_list[i]);
2735 + const char str[ETH_GSTRING_LEN];
2736 +} ethtool_stats_keys[NPE_STAT_NUM + NPE_Q_STAT_NUM] = {
2737 + NPE_Q_STAT_STRINGS
2741 +static void ixmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2743 + struct mac_info *mac = netdev_priv(dev);
2744 + memcpy(data, ethtool_stats_keys, mac->npe_stat_num * ETH_GSTRING_LEN);
2747 +static int ixmac_get_stats_count(struct net_device *dev)
2749 + struct mac_info *mac = netdev_priv(dev);
2750 + return mac->npe_stat_num;
2753 +static u32 ixmac_get_msglevel(struct net_device *dev)
2755 + struct mac_info *mac = netdev_priv(dev);
2756 + return mac->msg_enable;
2759 +static void ixmac_set_msglevel(struct net_device *dev, u32 datum)
2761 + struct mac_info *mac = netdev_priv(dev);
2762 + mac->msg_enable = datum;
2765 +static void ixmac_get_ethtool_stats(struct net_device *dev,
2766 + struct ethtool_stats *stats, u64 *data)
2769 + struct mac_info *mac = netdev_priv(dev);
2770 + u32 buf[NPE_STAT_NUM];
2772 + data[0] = queue_len(mac->rxq);
2773 + data[1] = queue_len(mac->rxdoneq);
2774 + data[2] = queue_len(mac->txq);
2775 + data[3] = queue_len(tx_doneq);
2777 + get_npe_stats(mac, buf, sizeof(buf), 0);
2779 + for (i=0; i<stats->n_stats-4; i++) {
2780 + data[i+4] = npe_to_cpu32(buf[i]);
2784 +static struct ethtool_ops ixmac_ethtool_ops = {
2785 + .get_drvinfo = ixmac_get_drvinfo,
2786 + .get_settings = ixmac_get_settings,
2787 + .set_settings = ixmac_set_settings,
2788 + .nway_reset = ixmac_nway_reset,
2789 + .get_link = ixmac_get_link,
2790 + .get_msglevel = ixmac_get_msglevel,
2791 + .set_msglevel = ixmac_set_msglevel,
2792 + .get_regs_len = ixmac_get_regs_len,
2793 + .get_regs = ixmac_get_regs,
2794 + .get_perm_addr = ethtool_op_get_perm_addr,
2795 + .get_strings = ixmac_get_strings,
2796 + .get_stats_count = ixmac_get_stats_count,
2797 + .get_ethtool_stats = ixmac_get_ethtool_stats,
2800 +static void mac_mdio_thread(struct work_struct *work)
2802 + struct mac_info *mac = container_of(work, struct mac_info,
2803 + mdio_thread.work);
2804 + struct net_device *dev = mac->netdev;
2806 + media_check(dev, 0);
2807 + schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
2810 +static int mac_probe(struct platform_device *pdev)
2812 + struct resource *res;
2813 + struct mac_info *mac;
2814 + struct net_device *dev;
2815 + struct npe_info *npe;
2816 + struct mac_plat_info *plat = pdev->dev.platform_data;
2819 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) {
2822 + if (!(dev = alloc_etherdev (sizeof(struct mac_info)))) {
2825 + SET_MODULE_OWNER(dev);
2826 + SET_NETDEV_DEV(dev, &pdev->dev);
2827 + mac = netdev_priv(dev);
2828 + mac->netdev = dev;
2830 + size = res->end - res->start +1;
2831 + mac->res = request_mem_region(res->start, size, IXMAC_NAME);
2837 + mac->addr = ioremap(res->start, size);
2843 + dev->open = ixmac_open;
2844 + dev->hard_start_xmit = ixmac_start_xmit;
2845 + dev->poll = ix_poll;
2846 + dev->stop = ixmac_close;
2847 + dev->get_stats = ixmac_stats;
2848 + dev->do_ioctl = ixmac_ioctl;
2849 + dev->set_multicast_list = ixmac_set_rx_mode;
2850 + dev->ethtool_ops = &ixmac_ethtool_ops;
2853 + dev->tx_queue_len = 100;
2855 + mac->npe_dev = get_npe_by_id(plat->npe_id);
2856 + if (!mac->npe_dev) {
2860 + npe = dev_get_drvdata(mac->npe_dev);
2862 + mac->rxq = request_queue(plat->rxq_id, 128);
2863 + if (IS_ERR(mac->rxq)) {
2864 + printk(KERN_ERR "Error requesting Q: %d\n", plat->rxq_id);
2868 + mac->txq = request_queue(plat->txq_id, 128);
2869 + if (IS_ERR(mac->txq)) {
2870 + printk(KERN_ERR "Error requesting Q: %d\n", plat->txq_id);
2874 + mac->rxdoneq = request_queue(plat->rxdoneq_id, 128);
2875 + if (IS_ERR(mac->rxdoneq)) {
2876 + printk(KERN_ERR "Error requesting Q: %d\n", plat->rxdoneq_id);
2880 + mac->rxdoneq->irq_cb = irqcb_recv;
2881 + mac->rxdoneq->cb_data = dev;
2882 + queue_set_watermarks(mac->rxdoneq, 0, 0);
2883 + queue_set_irq_src(mac->rxdoneq, Q_IRQ_ID_NOT_E);
2885 + mac->qmgr = dev_get_drvdata(mac->rxq->dev);
2886 + if (register_netdev (dev)) {
2892 + mac->npe_stat_num = NPE_STAT_NUM_BASE;
2893 + mac->msg_enable = netif_msg_init(debug, MAC_DEF_MSG_ENABLE);
2895 + platform_set_drvdata(pdev, dev);
2897 + mac_write_reg(mac, MAC_CORE_CNTRL, CORE_RESET);
2899 + mac_write_reg(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
2901 + init_mdio(dev, plat->phy_id);
2903 + INIT_DELAYED_WORK(&mac->mdio_thread, mac_mdio_thread);
2905 + /* The place of the MAC address is very system dependent.
2906 + * Here we use a random one to be replaced by one of the
2907 + * following commands:
2908 + * "ip link set address 02:03:04:04:04:01 dev eth0"
2909 + * "ifconfig eth0 hw ether 02:03:04:04:04:07"
2912 + if (is_zero_ether_addr(plat->hwaddr)) {
2913 + random_ether_addr(dev->dev_addr);
2914 + dev->dev_addr[5] = plat->phy_id;
2917 + memcpy(dev->dev_addr, plat->hwaddr, 6);
2919 + printk(KERN_INFO IXMAC_NAME " driver " IXMAC_VERSION
2920 + ": %s on %s with PHY[%d] initialized\n",
2921 + dev->name, npe->plat->name, plat->phy_id);
2927 + release_queue(mac->rxq);
2929 + release_queue(mac->txq);
2931 + release_queue(mac->rxdoneq);
2932 + module_put(mac->npe_dev->driver->owner);
2934 + iounmap(mac->addr);
2936 + release_resource(mac->res);
2942 +static void drain_npe(struct mac_info *mac)
2944 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2945 + struct npe_cont *cont;
2949 + /* Now there are some skb hold by the NPE.
2950 + * We switch the MAC in loopback mode and send a pseudo packet
2951 + * that will be returned by the NPE in its last SKB.
2952 + * We will also try to isolate the PHY to keep the packets internal.
2955 + if (mac->txq_pkt <2)
2956 + mac->txq_pkt += init_buffer(tx_doneq, 5);
2958 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2959 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
2960 + mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
2962 + npe_mh_npe_loopback_mode(npe, mac->plat, 1);
2965 + while (mac->rxq_pkt && loop++ < 2000 ) {
2966 + phys = queue_get_entry(tx_doneq) & ~0xf;
2969 + cont = dma_to_virt(queue->dev, phys);
2970 + /* actually the packets should never leave the system,
2971 + * but if they do, they shall contain 0s instead of
2972 + * intresting random data....
2974 + memset(cont->data, 0, 64);
2975 + cont->eth.pkt_len = 64;
2976 + dma_sync_single(mac->txq->dev, phys, 64 + DMA_HDR_SIZE,
2978 + queue_put_entry(mac->txq, phys);
2979 + if (queue_stat(mac->txq) == 2) { /* overflow */
2980 + queue_put_entry(tx_doneq, phys);
2984 + mac->rxq_pkt -= destroy_buffer(mac->rxdoneq,
2987 + npe_mh_npe_loopback_mode(npe, mac->plat, 0);
2989 + /* Flush MAC TX fifo to drain the bogus packages */
2990 + mac_set_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2991 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_RX_EN);
2992 + mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_TX_EN);
2993 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
2994 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2995 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2998 +static int mac_remove(struct platform_device *pdev)
3000 + struct net_device* dev = platform_get_drvdata(pdev);
3001 + struct mac_info *mac = netdev_priv(dev);
3003 + unregister_netdev(dev);
3005 + mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
3009 + mac->txq_pkt -= destroy_buffer(mac->txq, mac->txq_pkt);
3010 + mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
3012 + if (mac->rxq_pkt || mac->txq_pkt)
3013 + printk("Buffers lost in NPE: RX:%d, TX:%d\n",
3014 + mac->rxq_pkt, mac->txq_pkt);
3016 + release_queue(mac->txq);
3017 + release_queue(mac->rxq);
3018 + release_queue(mac->rxdoneq);
3020 + flush_scheduled_work();
3021 + return_npe_dev(mac->npe_dev);
3023 + iounmap(mac->addr);
3024 + release_resource(mac->res);
3025 + platform_set_drvdata(pdev, NULL);
3030 +static struct platform_driver ixp4xx_mac = {
3031 + .driver.name = IXMAC_NAME,
3032 + .probe = mac_probe,
3033 + .remove = mac_remove,
3036 +static int __init init_mac(void)
3038 + /* The TX done Queue handles skbs sent out by the NPE */
3039 + tx_doneq = request_queue(TX_DONE_QID, 128);
3040 + if (IS_ERR(tx_doneq)) {
3041 + printk(KERN_ERR "Error requesting Q: %d\n", TX_DONE_QID);
3044 + return platform_driver_register(&ixp4xx_mac);
3047 +static void __exit finish_mac(void)
3049 + platform_driver_unregister(&ixp4xx_mac);
3051 + release_queue(tx_doneq);
3055 +module_init(init_mac);
3056 +module_exit(finish_mac);
3058 +MODULE_LICENSE("GPL");
3059 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
3061 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe.c
3062 ===================================================================
3063 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
3064 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe.c 2007-02-21 02:24:35.000000000 -0800
3067 +#include <linux/ixp_npe.h>
3068 +#include <asm/hardware.h>
3070 +#define RESET_NPE_PARITY 0x0800
3071 +#define PARITY_BIT_MASK 0x3F00FFFF
3072 +#define CONFIG_CTRL_REG_MASK 0x3F3FFFFF
3073 +#define MAX_RETRIES 1000000
3074 +#define NPE_PHYS_REG 32
3075 +#define RESET_MBST_VAL 0x0000F0F0
3076 +#define NPE_REGMAP 0x0000001E
3077 +#define INSTR_WR_REG_SHORT 0x0000C000
3078 +#define INSTR_WR_REG_BYTE 0x00004000
3079 +#define MASK_ECS_REG_0_NEXTPC 0x1FFF0000
3081 +#define INSTR_RD_FIFO 0x0F888220
3082 +#define INSTR_RESET_MBOX 0x0FAC8210
3084 +#define ECS_REG_0_LDUR 8
3085 +#define ECS_REG_1_CCTXT 16
3086 +#define ECS_REG_1_SELCTXT 0
3088 +#define ECS_BG_CTXT_REG_0 0x00
3089 +#define ECS_BG_CTXT_REG_1 0x01
3090 +#define ECS_BG_CTXT_REG_2 0x02
3091 +#define ECS_PRI_1_CTXT_REG_0 0x04
3092 +#define ECS_PRI_1_CTXT_REG_1 0x05
3093 +#define ECS_PRI_1_CTXT_REG_2 0x06
3094 +#define ECS_PRI_2_CTXT_REG_0 0x08
3095 +#define ECS_PRI_2_CTXT_REG_1 0x09
3096 +#define ECS_PRI_2_CTXT_REG_2 0x0A
3097 +#define ECS_DBG_CTXT_REG_0 0x0C
3098 +#define ECS_DBG_CTXT_REG_1 0x0D
3099 +#define ECS_DBG_CTXT_REG_2 0x0E
3100 +#define ECS_INSTRUCT_REG 0x11
3102 +#define ECS_BG_CTXT_REG_0_RESET 0xA0000000
3103 +#define ECS_BG_CTXT_REG_1_RESET 0x01000000
3104 +#define ECS_BG_CTXT_REG_2_RESET 0x00008000
3105 +#define ECS_PRI_1_CTXT_REG_0_RESET 0x20000080
3106 +#define ECS_PRI_1_CTXT_REG_1_RESET 0x01000000
3107 +#define ECS_PRI_1_CTXT_REG_2_RESET 0x00008000
3108 +#define ECS_PRI_2_CTXT_REG_0_RESET 0x20000080
3109 +#define ECS_PRI_2_CTXT_REG_1_RESET 0x01000000
3110 +#define ECS_PRI_2_CTXT_REG_2_RESET 0x00008000
3111 +#define ECS_DBG_CTXT_REG_0_RESET 0x20000000
3112 +#define ECS_DBG_CTXT_REG_1_RESET 0x00000000
3113 +#define ECS_DBG_CTXT_REG_2_RESET 0x001E0000
3114 +#define ECS_INSTRUCT_REG_RESET 0x1003C00F
3116 +static struct { u32 reg; u32 val; } ecs_reset[] =
3118 + { ECS_BG_CTXT_REG_0, ECS_BG_CTXT_REG_0_RESET },
3119 + { ECS_BG_CTXT_REG_1, ECS_BG_CTXT_REG_1_RESET },
3120 + { ECS_BG_CTXT_REG_2, ECS_BG_CTXT_REG_2_RESET },
3121 + { ECS_PRI_1_CTXT_REG_0, ECS_PRI_1_CTXT_REG_0_RESET },
3122 + { ECS_PRI_1_CTXT_REG_1, ECS_PRI_1_CTXT_REG_1_RESET },
3123 + { ECS_PRI_1_CTXT_REG_2, ECS_PRI_1_CTXT_REG_2_RESET },
3124 + { ECS_PRI_2_CTXT_REG_0, ECS_PRI_2_CTXT_REG_0_RESET },
3125 + { ECS_PRI_2_CTXT_REG_1, ECS_PRI_2_CTXT_REG_1_RESET },
3126 + { ECS_PRI_2_CTXT_REG_2, ECS_PRI_2_CTXT_REG_2_RESET },
3127 + { ECS_DBG_CTXT_REG_0, ECS_DBG_CTXT_REG_0_RESET },
3128 + { ECS_DBG_CTXT_REG_1, ECS_DBG_CTXT_REG_1_RESET },
3129 + { ECS_DBG_CTXT_REG_2, ECS_DBG_CTXT_REG_2_RESET },
3130 + { ECS_INSTRUCT_REG, ECS_INSTRUCT_REG_RESET }
3133 +/* actually I have no idea what I'm doing here !!
3134 + * I only rewrite the "reset" sequence the way Intel does it.
3137 +static void npe_debg_preexec(struct npe_info *npe)
3139 + u32 r = IX_NPEDL_MASK_ECS_DBG_REG_2_IF | IX_NPEDL_MASK_ECS_DBG_REG_2_IE;
3141 + npe->exec_count = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCT);
3142 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, 0);
3143 + npe->ctx_reg2 = npe_read_ecs_reg(npe, ECS_DBG_CTXT_REG_2);
3144 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2 | r);
3147 +static void npe_debg_postexec(struct npe_info *npe)
3149 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0, 0);
3150 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3151 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, npe->exec_count);
3152 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2);
3156 +npe_debg_inst_exec(struct npe_info *npe, u32 instr, u32 ctx, u32 ldur)
3161 + regval = IX_NPEDL_MASK_ECS_REG_0_ACTIVE |
3162 + (ldur << ECS_REG_0_LDUR);
3163 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0 , regval);
3164 + /* set CCTXT at ECS DEBUG L3 to specify in which context
3165 + * to execute the instruction
3167 + regval = (ctx << ECS_REG_1_CCTXT) |
3168 + (ctx << ECS_REG_1_SELCTXT);
3169 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_1, regval);
3171 + /* clear the pipeline */
3172 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3174 + /* load NPE instruction into the instruction register */
3175 + npe_write_ecs_reg(npe, ECS_INSTRUCT_REG, instr);
3176 + /* we need this value later to wait for
3177 + * completion of NPE execution step
3179 + wc = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC);
3180 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STEP);
3182 + /* Watch Count register increments when NPE completes an instruction */
3183 + while (wc == npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC) &&
3184 + ++c < MAX_RETRIES);
3186 + if (c >= MAX_RETRIES) {
3187 + printk(KERN_ERR "%s reset:npe_debg_inst_exec(): Timeout\n",
3194 +static int npe_logical_reg_write8(struct npe_info *npe, u32 addr, u32 val)
3198 + /* here we build the NPE assembler instruction:
3200 + instr = INSTR_WR_REG_BYTE | /* OpCode */
3201 + addr << 9 | /* base Operand */
3202 + (val & 0x1f) << 4 | /* lower 5 bits to immediate data */
3203 + (val & ~0x1f) << (18-5);/* higher 3 bits to CoProc instr. */
3204 + /* and execute it */
3205 + return npe_debg_inst_exec(npe, instr, 0, 1);
3208 +static int npe_logical_reg_write16(struct npe_info *npe, u32 addr, u32 val)
3211 + /* here we build the NPE assembler instruction:
3214 + instr = INSTR_WR_REG_SHORT | /* OpCode */
3215 + addr << 9 | /* base Operand */
3216 + (val & 0x1f) << 4 | /* lower 5 bits to immediate data */
3217 + (val & ~0x1f) << (18-5);/* higher 11 bits to CoProc instr. */
3218 + /* and execute it */
3219 + return npe_debg_inst_exec(npe, instr, 0, 1);
3222 +static int npe_logical_reg_write32(struct npe_info *npe, u32 addr, u32 val)
3224 + /* write in 16 bit steps first the high and then the low value */
3225 + npe_logical_reg_write16(npe, addr, val >> 16);
3226 + return npe_logical_reg_write16(npe, addr+2, val & 0xffff);
3229 +void npe_reset(struct npe_info *npe)
3231 + u32 reg, cfg_ctrl;
3233 + struct { u32 reset; int addr; int size; } ctx_reg[] = {
3234 + { 0x80, 0x1b, 8 },
3236 + { 0x820, 0x1e, 16 },
3240 + cfg_ctrl = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
3241 + cfg_ctrl |= 0x3F000000;
3242 + /* disable the parity interrupt */
3243 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, cfg_ctrl & PARITY_BIT_MASK);
3245 + npe_debg_preexec(npe);
3247 + /* clear the FIFOs */
3248 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WFIFO) ==
3249 + IX_NPEDL_MASK_WFIFO_VALID);
3250 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) ==
3251 + IX_NPEDL_MASK_STAT_OFNE)
3254 + reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_FIFO);
3255 + printk("%s reset: Read FIFO:=%x\n", npe->plat->name, reg);
3257 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) ==
3258 + IX_NPEDL_MASK_STAT_IFNE) {
3259 + npe_debg_inst_exec(npe, INSTR_RD_FIFO, 0, 0);
3262 + /* Reset the mailbox reg */
3263 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_MBST, RESET_MBST_VAL);
3264 + npe_debg_inst_exec(npe, INSTR_RESET_MBOX, 0, 0);
3266 + /* Reset the physical registers in the NPE register file */
3267 + for (i=0; i<NPE_PHYS_REG; i++) {
3268 + npe_logical_reg_write16(npe, NPE_REGMAP, i >> 1);
3269 + npe_logical_reg_write32(npe, (i&1) *4, 0);
3272 + /* Reset the context store. Iterate over the 16 ctx s */
3273 + for(i=0; i<16; i++) {
3274 + for (reg=0; reg<4; reg++) {
3275 + /* There is no (STEVT) register for Context 0.
3276 + * ignore if register=0 and ctx=0 */
3279 + /* Context 0 has no STARTPC. Instead, this value is
3280 + * used to set NextPC for Background ECS,
3281 + * to set where NPE starts executing code
3283 + if (!i && reg==1) {
3285 + r = npe_read_ecs_reg(npe, ECS_BG_CTXT_REG_0);
3286 + r &= ~MASK_ECS_REG_0_NEXTPC;
3287 + r |= (cr->reset << 16) & MASK_ECS_REG_0_NEXTPC;
3290 + cr = ctx_reg + reg;
3291 + switch (cr->size) {
3293 + npe_logical_reg_write8(npe, cr->addr,
3297 + npe_logical_reg_write16(npe, cr->addr,
3302 + npe_debg_postexec(npe);
3304 + for (i=0; i< ARRAY_SIZE(ecs_reset); i++) {
3305 + npe_write_ecs_reg(npe, ecs_reset[i].reg, ecs_reset[i].val);
3307 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT);
3309 + for (i=IX_NPEDL_REG_OFFSET_EXCT; i<=IX_NPEDL_REG_OFFSET_AP3; i+=4) {
3310 + npe_reg_write(npe, i, 0);
3313 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_WC, 0);
3315 + reg = *IXP4XX_EXP_CFG2;
3316 + reg |= 0x800 << npe->plat->id; /* IX_FUSE_NPE[ABC] */
3317 + *IXP4XX_EXP_CFG2 = reg;
3318 + reg &= ~(0x800 << npe->plat->id); /* IX_FUSE_NPE[ABC] */
3319 + *IXP4XX_EXP_CFG2 = reg;
3323 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL,
3324 + cfg_ctrl & CONFIG_CTRL_REG_MASK);
3329 +void npe_stop(struct npe_info *npe)
3331 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STOP);
3332 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3335 +static void npe_reset_active(struct npe_info *npe, u32 reg)
3339 + regval = npe_read_ecs_reg(npe, reg);
3340 + regval &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
3341 + npe_write_ecs_reg(npe, reg, regval);
3344 +void npe_start(struct npe_info *npe)
3346 + npe_reset_active(npe, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
3347 + npe_reset_active(npe, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
3348 + npe_reset_active(npe, IX_NPEDL_ECS_DBG_CTXT_REG_0);
3350 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3351 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_START);
3354 +EXPORT_SYMBOL(npe_stop);
3355 +EXPORT_SYMBOL(npe_start);
3356 +EXPORT_SYMBOL(npe_reset);
3357 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe_mh.c
3358 ===================================================================
3359 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
3360 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe_mh.c 2007-02-21 02:24:35.000000000 -0800
3363 + * npe_mh.c - NPE message handler.
3365 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3367 + * This file is released under the GPLv2
3370 +#include <linux/ixp_npe.h>
3371 +#include <linux/slab.h>
3373 +#define MAX_RETRY 200
3375 +struct npe_mh_msg {
3377 + u8 byte[8]; /* Very desciptive name, I know ... */
3383 + * The whole code in this function must be reworked.
3384 + * It is in a state that works but is not rock solid
3386 +static int send_message(struct npe_info *npe, struct npe_mh_msg *msg)
3389 + u32 send[2], recv[2];
3391 + for (i=0; i<2; i++)
3392 + send[i] = be32_to_cpu(msg->u.data[i]);
3394 + if ((npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3395 + IX_NPEMH_NPE_STAT_IFNE))
3398 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[0]);
3399 + for(i=0; i<MAX_RETRY; i++) {
3400 + /* if the IFNF status bit is unset then the inFIFO is full */
3401 + if (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3402 + IX_NPEMH_NPE_STAT_IFNF)
3407 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[1]);
3409 + while (!(npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3410 + IX_NPEMH_NPE_STAT_OFNE)) {
3411 + if (i++>MAX_RETRY) {
3412 + printk("Waiting for Output FIFO NotEmpty failed\n");
3416 + //printk("Output FIFO Not Empty. Loops: %d\n", i);
3418 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3419 + IX_NPEMH_NPE_STAT_OFNE) {
3420 + recv[j&1] = npe_reg_read(npe,IX_NPEDL_REG_OFFSET_FIFO);
3423 + if ((recv[0] != send[0]) || (recv[1] != send[1])) {
3424 + if (send[0] || send[1]) {
3425 + /* all CMDs return the complete message as answer,
3426 + * only GETSTATUS returns the ImageID of the NPE
3428 + printk("Unexpected answer: "
3429 + "Send %08x:%08x Ret %08x:%08x\n",
3430 + send[0], send[1], recv[0], recv[1]);
3440 +#define IX_ETHNPE_NPE_GETSTATUS 0x00
3441 +#define IX_ETHNPE_EDB_SETPORTADDRESS 0x01
3442 +#define IX_ETHNPE_GETSTATS 0x04
3443 +#define IX_ETHNPE_RESETSTATS 0x05
3444 +#define IX_ETHNPE_FW_SETFIREWALLMODE 0x0E
3445 +#define IX_ETHNPE_VLAN_SETRXQOSENTRY 0x0B
3446 +#define IX_ETHNPE_SETLOOPBACK_MODE 0x12
3448 +#define logical_id(mp) (((mp)->npe_id << 4) | ((mp)->port_id & 0xf))
3450 +int npe_mh_status(struct npe_info *npe)
3452 + struct npe_mh_msg msg;
3454 + memset(&msg, 0, sizeof(msg));
3455 + msg.u.byte[CMD] = IX_ETHNPE_NPE_GETSTATUS;
3456 + return send_message(npe, &msg);
3459 +int npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp,
3462 + struct npe_mh_msg msg;
3464 + msg.u.byte[CMD] = IX_ETHNPE_EDB_SETPORTADDRESS;
3465 + msg.u.byte[PORT] = mp->eth_id;
3466 + memcpy(msg.u.byte + MAC, macaddr, 6);
3468 + return send_message(npe, &msg);
3471 +int npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp)
3473 + struct npe_mh_msg msg;
3475 + memset(&msg, 0, sizeof(msg));
3476 + msg.u.byte[CMD] = IX_ETHNPE_FW_SETFIREWALLMODE;
3477 + msg.u.byte[PORT] = logical_id(mp);
3479 + return send_message(npe, &msg);
3482 +int npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp,
3485 + struct npe_mh_msg msg;
3487 + memset(&msg, 0, sizeof(msg));
3488 + msg.u.byte[CMD] = IX_ETHNPE_SETLOOPBACK_MODE;
3489 + msg.u.byte[PORT] = logical_id(mp);
3490 + msg.u.byte[3] = enable ? 1 : 0;
3492 + return send_message(npe, &msg);
3495 +int npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid)
3497 + struct npe_mh_msg msg;
3500 + memset(&msg, 0, sizeof(msg));
3501 + msg.u.byte[CMD] = IX_ETHNPE_VLAN_SETRXQOSENTRY;
3502 + msg.u.byte[PORT] = logical_id(mp);
3503 + msg.u.byte[5] = qid | 0x80;
3504 + msg.u.byte[7] = qid<<4;
3505 + for(i=0; i<8; i++) {
3506 + msg.u.byte[3] = i;
3507 + if ((ret = send_message(npe, &msg)))
3513 +int npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys,
3516 + struct npe_mh_msg msg;
3517 + memset(&msg, 0, sizeof(msg));
3518 + msg.u.byte[CMD] = reset ? IX_ETHNPE_RESETSTATS : IX_ETHNPE_GETSTATS;
3519 + msg.u.byte[PORT] = logical_id(mp);
3520 + msg.u.data[1] = cpu_to_npe32(cpu_to_be32(phys));
3522 + return send_message(npe, &msg);
3526 +EXPORT_SYMBOL(npe_mh_status);
3527 +EXPORT_SYMBOL(npe_mh_setportaddr);
3528 +EXPORT_SYMBOL(npe_mh_disable_firewall);
3529 +EXPORT_SYMBOL(npe_mh_set_rxqid);
3530 +EXPORT_SYMBOL(npe_mh_npe_loopback_mode);
3531 +EXPORT_SYMBOL(npe_mh_get_stats);
3532 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/phy.c
3533 ===================================================================
3534 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
3535 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/phy.c 2007-02-21 02:24:35.000000000 -0800
3538 + * phy.c - MDIO functions and mii initialisation
3540 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3542 + * This file is released under the GPLv2
3546 +#include <linux/mutex.h>
3549 +#define MAX_PHYS (1<<5)
3552 + * We must always use the same MAC for acessing the MDIO
3553 + * We may not use each MAC for its PHY :-(
3556 +static struct net_device *phy_dev = NULL;
3557 +static struct mutex mtx;
3559 +/* here we remember if the PHY is alive, to avoid log dumping */
3560 +static int phy_works[MAX_PHYS];
3562 +int mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg)
3564 + struct mac_info *mac;
3571 + mac = netdev_priv(phy_dev);
3572 + cmd = mdio_cmd(phy_addr, phy_reg);
3573 + mutex_lock_interruptible(&mtx);
3574 + mac_mdio_cmd_write(mac, cmd);
3575 + while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
3576 + if (++cnt >= 100) {
3577 + printk("%s: PHY[%d] access failed\n",
3578 + dev->name, phy_addr);
3583 + reg = mac_mdio_status_read(mac);
3584 + mutex_unlock(&mtx);
3585 + if (reg & MII_READ_FAIL) {
3586 + if (phy_works[phy_addr]) {
3587 + printk("%s: PHY[%d] unresponsive\n",
3588 + dev->name, phy_addr);
3591 + phy_works[phy_addr] = 0;
3593 + if ( !phy_works[phy_addr]) {
3594 + printk("%s: PHY[%d] responsive again\n",
3595 + dev->name, phy_addr);
3597 + phy_works[phy_addr] = 1;
3599 + return reg & 0xffff;
3603 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val)
3605 + struct mac_info *mac;
3612 + mac = netdev_priv(phy_dev);
3613 + cmd = mdio_cmd(phy_addr, phy_reg) | MII_WRITE | val;
3615 + mutex_lock_interruptible(&mtx);
3616 + mac_mdio_cmd_write(mac, cmd);
3617 + while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
3618 + if (++cnt >= 100) {
3619 + printk("%s: PHY[%d] access failed\n",
3620 + dev->name, phy_addr);
3625 + mutex_unlock(&mtx);
3628 +void init_mdio(struct net_device *dev, int phy_id)
3630 + struct mac_info *mac = netdev_priv(dev);
3633 + /* All phy operations should use the same MAC
3636 + if (mac->plat->eth_id == 0) {
3639 + for (i=0; i<MAX_PHYS; i++)
3642 + mac->mii.dev = dev;
3643 + mac->mii.phy_id = phy_id;
3644 + mac->mii.phy_id_mask = MAX_PHYS - 1;
3645 + mac->mii.reg_num_mask = 0x1f;
3646 + mac->mii.mdio_read = mdio_read_register;
3647 + mac->mii.mdio_write = mdio_write_register;
3650 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ucode_dl.c
3651 ===================================================================
3652 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
3653 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ucode_dl.c 2007-02-21 02:24:35.000000000 -0800
3656 + * ucode_dl.c - provide an NPE device and a char-dev for microcode download
3658 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3660 + * This file is released under the GPLv2
3663 +#include <linux/kernel.h>
3664 +#include <linux/module.h>
3665 +#include <linux/miscdevice.h>
3666 +#include <linux/platform_device.h>
3667 +#include <linux/fs.h>
3668 +#include <linux/init.h>
3669 +#include <linux/slab.h>
3670 +#include <linux/firmware.h>
3671 +#include <linux/dma-mapping.h>
3672 +#include <linux/byteorder/swab.h>
3673 +#include <asm/uaccess.h>
3674 +#include <asm/io.h>
3676 +#include <linux/ixp_npe.h>
3678 +#define IXNPE_VERSION "IXP4XX NPE driver Version 0.3.0"
3680 +#define DL_MAGIC 0xfeedf00d
3681 +#define DL_MAGIC_SWAP 0x0df0edfe
3683 +#define EOF_BLOCK 0xf
3684 +#define IMG_SIZE(image) (((image)->size * sizeof(u32)) + \
3685 + sizeof(struct dl_image))
3706 + struct dl_block block[0];
3710 +struct dl_codeblock {
3716 +static struct platform_driver ixp4xx_npe_driver;
3718 +static int match_by_npeid(struct device *dev, void *id)
3720 + struct npe_info *npe = dev_get_drvdata(dev);
3723 + return (npe->plat->id == *(int*)id);
3726 +struct device *get_npe_by_id(int id)
3728 + struct device *dev = driver_find_device(&ixp4xx_npe_driver.driver,
3729 + NULL, &id, match_by_npeid);
3731 + struct npe_info *npe = dev_get_drvdata(dev);
3732 + if (!try_module_get(THIS_MODULE)) {
3741 +void return_npe_dev(struct device *dev)
3743 + struct npe_info *npe = dev_get_drvdata(dev);
3745 + module_put(THIS_MODULE);
3750 +download_block(struct npe_info *npe, struct dl_codeblock *cb, unsigned type)
3757 + cmd = IX_NPEDL_EXCTL_CMD_WR_DATA_MEM;
3758 + if (cb->npe_addr + cb->size > npe->plat->data_size) {
3759 + printk(KERN_INFO "Data size too large: %d+%d > %d\n",
3760 + cb->npe_addr, cb->size, npe->plat->data_size);
3765 + cmd = IX_NPEDL_EXCTL_CMD_WR_INS_MEM;
3766 + if (cb->npe_addr + cb->size > npe->plat->inst_size) {
3767 + printk(KERN_INFO "Instr size too large: %d+%d > %d\n",
3768 + cb->npe_addr, cb->size, npe->plat->inst_size);
3773 + printk(KERN_INFO "Unknown CMD: %d\n", type);
3777 + for (i=0; i < cb->size; i++) {
3778 + npe_write_cmd(npe, cb->npe_addr + i, cb->data[i], cmd);
3784 +static int store_npe_image(struct dl_image *image, struct device *dev)
3786 + struct dl_block *blk;
3787 + struct dl_codeblock *cb;
3788 + struct npe_info *npe;
3792 + dev = get_npe_by_id( (image->id >> 24) & 0xf);
3793 + return_npe_dev(dev);
3798 + npe = dev_get_drvdata(dev);
3799 + if (npe->loaded && (npe->usage > 0)) {
3800 + printk(KERN_INFO "Cowardly refusing to reload an Image "
3801 + "into the used and running %s\n", npe->plat->name);
3802 + return 0; /* indicate success anyway... */
3804 + if (!cpu_is_ixp46x() && ((image->id >> 28) & 0xf)) {
3805 + printk(KERN_INFO "IXP46x NPE image ignored on IXP42x\n");
3812 + for (blk = image->u.block; blk->type != EOF_BLOCK; blk++) {
3813 + if (blk->offset > image->size) {
3814 + printk(KERN_INFO "Block offset out of range\n");
3817 + cb = (struct dl_codeblock*)&image->u.data[blk->offset];
3818 + if (blk->offset + cb->size + 2 > image->size) {
3819 + printk(KERN_INFO "Codeblock size out of range\n");
3822 + if ((ret = download_block(npe, cb, blk->type)))
3825 + *(u32*)npe->img_info = cpu_to_be32(image->id);
3828 + printk(KERN_INFO "Image loaded to %s Func:%x, Rel: %x:%x, Status: %x\n",
3829 + npe->plat->name, npe->img_info[1], npe->img_info[2],
3830 + npe->img_info[3], npe_status(npe));
3831 + if (npe_mh_status(npe)) {
3832 + printk(KERN_ERR "%s not responding\n", npe->plat->name);
3838 +static int ucode_open(struct inode *inode, struct file *file)
3840 + file->private_data = kmalloc(sizeof(struct dl_image), GFP_KERNEL);
3841 + if (!file->private_data)
3846 +static int ucode_close(struct inode *inode, struct file *file)
3848 + kfree(file->private_data);
3852 +static ssize_t ucode_write(struct file *file, const char __user *buf,
3853 + size_t count, loff_t *ppos)
3857 + struct dl_image *image;
3859 + const char __user *cbuf = buf;
3861 + u.data = file->private_data;
3865 + if (*ppos < sizeof(struct dl_image)) {
3866 + len = sizeof(struct dl_image) - *ppos;
3867 + len = len > count ? count : len;
3868 + if (copy_from_user(u.data + *ppos, cbuf, len))
3874 + } else if (*ppos == sizeof(struct dl_image)) {
3876 + if (u.image->magic == DL_MAGIC_SWAP) {
3877 + printk(KERN_INFO "swapped image found\n");
3878 + u.image->id = swab32(u.image->id);
3879 + u.image->size = swab32(u.image->size);
3880 + } else if (u.image->magic != DL_MAGIC) {
3881 + printk(KERN_INFO "Bad magic:%x\n",
3885 + len = IMG_SIZE(u.image);
3886 + data = kmalloc(len, GFP_KERNEL);
3889 + memcpy(data, u.data, *ppos);
3891 + u.data = (char*)data;
3892 + file->private_data = data;
3894 + len = IMG_SIZE(u.image) - *ppos;
3895 + len = len > count ? count : len;
3896 + if (copy_from_user(u.data + *ppos, cbuf, len))
3901 + if (*ppos == IMG_SIZE(u.image)) {
3904 + if (u.image->magic == DL_MAGIC_SWAP) {
3905 + for (i=0; i<u.image->size; i++) {
3906 + u.image->u.data[i] =
3907 + swab32(u.image->u.data[i]);
3909 + u.image->magic = swab32(u.image->magic);
3911 + ret = store_npe_image(u.image, NULL);
3913 + printk(KERN_INFO "Error in NPE image: %x\n",
3919 + return (cbuf-buf);
3922 +static void npe_firmware_probe(struct device *dev)
3924 +#if (defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)) \
3925 + && defined(MODULE)
3926 + const struct firmware *fw_entry;
3927 + struct npe_info *npe = dev_get_drvdata(dev);
3928 + struct dl_image *image;
3931 + if (request_firmware(&fw_entry, npe->plat->name, dev) != 0) {
3934 + image = (struct dl_image*)fw_entry->data;
3935 + /* Sanity checks */
3936 + if (fw_entry->size < sizeof(struct dl_image)) {
3937 + printk(KERN_ERR "Firmware error: too small\n");
3940 + if (image->magic == DL_MAGIC_SWAP) {
3941 + printk(KERN_INFO "swapped image found\n");
3942 + image->id = swab32(image->id);
3943 + image->size = swab32(image->size);
3944 + } else if (image->magic != DL_MAGIC) {
3945 + printk(KERN_ERR "Bad magic:%x\n", image->magic);
3948 + if (IMG_SIZE(image) != fw_entry->size) {
3949 + printk(KERN_ERR "Firmware error: bad size\n");
3952 + if (((image->id >> 24) & 0xf) != npe->plat->id) {
3953 + printk(KERN_ERR "NPE id missmatch\n");
3956 + if (image->magic == DL_MAGIC_SWAP) {
3957 + for (i=0; i<image->size; i++) {
3958 + image->u.data[i] = swab32(image->u.data[i]);
3960 + image->magic = swab32(image->magic);
3963 + ret = store_npe_image(image, dev);
3966 + printk(KERN_ERR "Error downloading Firmware for %s\n",
3969 + release_firmware(fw_entry);
3973 +static void disable_npe_irq(struct npe_info *npe)
3976 + reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
3977 + reg &= ~(IX_NPEMH_NPE_CTL_OFE | IX_NPEMH_NPE_CTL_IFE);
3978 + reg |= IX_NPEMH_NPE_CTL_OFEWE | IX_NPEMH_NPE_CTL_IFEWE;
3979 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, reg);
3982 +static ssize_t show_npe_state(struct device *dev, struct device_attribute *attr,
3985 + struct npe_info *npe = dev_get_drvdata(dev);
3987 + strcpy(buf, npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN ?
3988 + "start\n" : "stop\n");
3989 + return strlen(buf);
3992 +static ssize_t set_npe_state(struct device *dev, struct device_attribute *attr,
3993 + const char *buf, size_t count)
3995 + struct npe_info *npe = dev_get_drvdata(dev);
3998 + printk("%s in use: read-only\n", npe->plat->name);
4001 + if (!strncmp(buf, "start", 5)) {
4004 + if (!strncmp(buf, "stop", 4)) {
4007 + if (!strncmp(buf, "reset", 5)) {
4014 +static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_npe_state, set_npe_state);
4016 +static int npe_probe(struct platform_device *pdev)
4018 + struct resource *res;
4019 + struct npe_info *npe;
4020 + struct npe_plat_data *plat = pdev->dev.platform_data;
4021 + int err, size, ret=0;
4023 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
4026 + if (!(npe = kzalloc(sizeof(struct npe_info), GFP_KERNEL)))
4029 + size = res->end - res->start +1;
4030 + npe->res = request_mem_region(res->start, size, plat->name);
4033 + printk(KERN_ERR "Failed to get memregion(%x, %x)\n",
4034 + res->start, size);
4038 + npe->addr = ioremap(res->start, size);
4041 + printk(KERN_ERR "Failed to ioremap(%x, %x)\n",
4042 + res->start, size);
4046 + pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
4048 + platform_set_drvdata(pdev, npe);
4050 + err = device_create_file(&pdev->dev, &dev_attr_state);
4055 + disable_npe_irq(npe);
4058 + npe_firmware_probe(&pdev->dev);
4063 + release_resource(npe->res);
4069 +static struct file_operations ucode_dl_fops = {
4070 + .owner = THIS_MODULE,
4071 + .write = ucode_write,
4072 + .open = ucode_open,
4073 + .release = ucode_close,
4076 +static struct miscdevice ucode_dl_dev = {
4077 + .minor = MICROCODE_MINOR,
4078 + .name = "ixp4xx_ucode",
4079 + .fops = &ucode_dl_fops,
4082 +static int npe_remove(struct platform_device *pdev)
4084 + struct npe_info *npe = platform_get_drvdata(pdev);
4086 + device_remove_file(&pdev->dev, &dev_attr_state);
4088 + iounmap(npe->addr);
4089 + release_resource(npe->res);
4094 +static struct platform_driver ixp4xx_npe_driver = {
4096 + .name = "ixp4xx_npe",
4097 + .owner = THIS_MODULE,
4099 + .probe = npe_probe,
4100 + .remove = npe_remove,
4103 +static int __init init_npedriver(void)
4106 + if ((ret = misc_register(&ucode_dl_dev))){
4107 + printk(KERN_ERR "Failed to register misc device %d\n",
4111 + if ((ret = platform_driver_register(&ixp4xx_npe_driver)))
4112 + misc_deregister(&ucode_dl_dev);
4114 + printk(KERN_INFO IXNPE_VERSION " initialized\n");
4120 +static void __exit finish_npedriver(void)
4122 + misc_deregister(&ucode_dl_dev);
4123 + platform_driver_unregister(&ixp4xx_npe_driver);
4126 +module_init(init_npedriver);
4127 +module_exit(finish_npedriver);
4129 +MODULE_LICENSE("GPL");
4130 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
4132 +EXPORT_SYMBOL(get_npe_by_id);
4133 +EXPORT_SYMBOL(return_npe_dev);
4134 Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h
4135 ===================================================================
4136 --- linux-2.6.21-rc1-arm.orig/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2007-02-21 02:24:18.000000000 -0800
4137 +++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2007-02-21 02:24:35.000000000 -0800
4139 #ifndef _ASM_ARM_IXP4XX_H_
4140 #define _ASM_ARM_IXP4XX_H_
4142 +#include "npe_regs.h"
4145 * IXP4xx Linux Memory Map:
4151 + * PCI Memory Space
4153 +#define IXP4XX_PCIMEM_BASE_PHYS (0x48000000)
4154 +#define IXP4XX_PCIMEM_REGION_SIZE (0x04000000)
4155 +#define IXP4XX_PCIMEM_BAR_SIZE (0x01000000)
4159 #define IXP4XX_QMGR_BASE_PHYS (0x60000000)
4160 @@ -322,7 +330,13 @@
4161 #define PCI_ATPDMA0_LENADDR_OFFSET 0x48
4162 #define PCI_ATPDMA1_AHBADDR_OFFSET 0x4C
4163 #define PCI_ATPDMA1_PCIADDR_OFFSET 0x50
4164 -#define PCI_ATPDMA1_LENADDR_OFFSET 0x54
4165 +#define PCI_ATPDMA1_LENADDR_OFFSET 0x54
4166 +#define PCI_PTADMA0_AHBADDR_OFFSET 0x58
4167 +#define PCI_PTADMA0_PCIADDR_OFFSET 0x5c
4168 +#define PCI_PTADMA0_LENADDR_OFFSET 0x60
4169 +#define PCI_PTADMA1_AHBADDR_OFFSET 0x64
4170 +#define PCI_PTADMA1_PCIADDR_OFFSET 0x68
4171 +#define PCI_PTADMA1_LENADDR_OFFSET 0x6c
4174 * PCI Control/Status Registers
4175 @@ -351,6 +365,12 @@
4176 #define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
4177 #define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
4178 #define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
4179 +#define PCI_PTADMA0_AHBADDR IXP4XX_PCI_CSR(PCI_PTADMA0_AHBADDR_OFFSET)
4180 +#define PCI_PTADMA0_PCIADDR IXP4XX_PCI_CSR(PCI_PTADMA0_PCIADDR_OFFSET)
4181 +#define PCI_PTADMA0_LENADDR IXP4XX_PCI_CSR(PCI_PTADMA0_LENADDR_OFFSET)
4182 +#define PCI_PTADMA1_AHBADDR IXP4XX_PCI_CSR(PCI_PTADMA1_AHBADDR_OFFSET)
4183 +#define PCI_PTADMA1_PCIADDR IXP4XX_PCI_CSR(PCI_PTADMA1_PCIADDR_OFFSET)
4184 +#define PCI_PTADMA1_LENADDR IXP4XX_PCI_CSR(PCI_PTADMA1_LENADDR_OFFSET)
4187 * PCI register values and bit definitions
4188 @@ -607,6 +627,34 @@
4190 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
4193 +/* Fuse Bits of IXP_EXP_CFG2 */
4194 +#define IX_FUSE_RCOMP (1 << 0)
4195 +#define IX_FUSE_USB (1 << 1)
4196 +#define IX_FUSE_HASH (1 << 2)
4197 +#define IX_FUSE_AES (1 << 3)
4198 +#define IX_FUSE_DES (1 << 4)
4199 +#define IX_FUSE_HDLC (1 << 5)
4200 +#define IX_FUSE_AAL (1 << 6)
4201 +#define IX_FUSE_HSS (1 << 7)
4202 +#define IX_FUSE_UTOPIA (1 << 8)
4203 +#define IX_FUSE_ETH0 (1 << 9)
4204 +#define IX_FUSE_ETH1 (1 << 10)
4205 +#define IX_FUSE_NPEA (1 << 11)
4206 +#define IX_FUSE_NPEB (1 << 12)
4207 +#define IX_FUSE_NPEC (1 << 13)
4208 +#define IX_FUSE_PCI (1 << 14)
4209 +#define IX_FUSE_ECC (1 << 15)
4210 +#define IX_FUSE_UTOPIA_PHY_LIMIT (3 << 16)
4211 +#define IX_FUSE_USB_HOST (1 << 18)
4212 +#define IX_FUSE_NPEA_ETH (1 << 19)
4213 +#define IX_FUSE_NPEB_ETH (1 << 20)
4214 +#define IX_FUSE_RSA (1 << 21)
4215 +#define IX_FUSE_XSCALE_MAX_FREQ (3 << 22)
4217 +#define IX_FUSE_IXP46X_ONLY IX_FUSE_XSCALE_MAX_FREQ | IX_FUSE_RSA | \
4218 + IX_FUSE_NPEB_ETH | IX_FUSE_NPEA_ETH | IX_FUSE_USB_HOST | IX_FUSE_ECC
4220 #ifndef __ASSEMBLY__
4221 static inline int cpu_is_ixp46x(void)
4223 @@ -620,6 +668,15 @@
4228 +static inline u32 ix_fuse(void)
4230 + unsigned int fuses = ~(*IXP4XX_EXP_CFG2);
4231 + if (!cpu_is_ixp46x())
4232 + fuses &= ~IX_FUSE_IXP46X_ONLY;
4239 Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/npe_regs.h
4240 ===================================================================
4241 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
4242 +++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/npe_regs.h 2007-02-21 02:24:35.000000000 -0800
4247 +/* Execution Address */
4248 +#define IX_NPEDL_REG_OFFSET_EXAD 0x00
4249 +/* Execution Data */
4250 +#define IX_NPEDL_REG_OFFSET_EXDATA 0x04
4251 +/* Execution Control */
4252 +#define IX_NPEDL_REG_OFFSET_EXCTL 0x08
4253 +/* Execution Count */
4254 +#define IX_NPEDL_REG_OFFSET_EXCT 0x0C
4255 +/* Action Point 0 */
4256 +#define IX_NPEDL_REG_OFFSET_AP0 0x10
4257 +/* Action Point 1 */
4258 +#define IX_NPEDL_REG_OFFSET_AP1 0x14
4259 +/* Action Point 2 */
4260 +#define IX_NPEDL_REG_OFFSET_AP2 0x18
4261 +/* Action Point 3 */
4262 +#define IX_NPEDL_REG_OFFSET_AP3 0x1C
4263 +/* Watchpoint FIFO */
4264 +#define IX_NPEDL_REG_OFFSET_WFIFO 0x20
4266 +#define IX_NPEDL_REG_OFFSET_WC 0x24
4267 +/* Profile Count */
4268 +#define IX_NPEDL_REG_OFFSET_PROFCT 0x28
4270 +/* Messaging Status */
4271 +#define IX_NPEDL_REG_OFFSET_STAT 0x2C
4272 +/* Messaging Control */
4273 +#define IX_NPEDL_REG_OFFSET_CTL 0x30
4274 +/* Mailbox Status */
4275 +#define IX_NPEDL_REG_OFFSET_MBST 0x34
4276 +/* messaging in/out FIFO */
4277 +#define IX_NPEDL_REG_OFFSET_FIFO 0x38
4280 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IF 0x00100000
4281 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IE 0x00080000
4282 +#define IX_NPEDL_MASK_ECS_REG_0_ACTIVE 0x80000000
4284 +#define IX_NPEDL_EXCTL_CMD_NPE_STEP 0x01
4285 +#define IX_NPEDL_EXCTL_CMD_NPE_START 0x02
4286 +#define IX_NPEDL_EXCTL_CMD_NPE_STOP 0x03
4287 +#define IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE 0x04
4288 +#define IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT 0x0C
4289 +#define IX_NPEDL_EXCTL_CMD_RD_INS_MEM 0x10
4290 +#define IX_NPEDL_EXCTL_CMD_WR_INS_MEM 0x11
4291 +#define IX_NPEDL_EXCTL_CMD_RD_DATA_MEM 0x12
4292 +#define IX_NPEDL_EXCTL_CMD_WR_DATA_MEM 0x13
4293 +#define IX_NPEDL_EXCTL_CMD_RD_ECS_REG 0x14
4294 +#define IX_NPEDL_EXCTL_CMD_WR_ECS_REG 0x15
4296 +#define IX_NPEDL_EXCTL_STATUS_RUN 0x80000000
4297 +#define IX_NPEDL_EXCTL_STATUS_STOP 0x40000000
4298 +#define IX_NPEDL_EXCTL_STATUS_CLEAR 0x20000000
4300 +#define IX_NPEDL_MASK_WFIFO_VALID 0x80000000
4301 +#define IX_NPEDL_MASK_STAT_OFNE 0x00010000
4302 +#define IX_NPEDL_MASK_STAT_IFNE 0x00080000
4304 +#define IX_NPEDL_ECS_DBG_CTXT_REG_0 0x0C
4305 +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_0 0x04
4306 +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_0 0x08
4308 +/* NPE control register bit definitions */
4309 +#define IX_NPEMH_NPE_CTL_OFE (1 << 16) /**< OutFifoEnable */
4310 +#define IX_NPEMH_NPE_CTL_IFE (1 << 17) /**< InFifoEnable */
4311 +#define IX_NPEMH_NPE_CTL_OFEWE (1 << 24) /**< OutFifoEnableWriteEnable */
4312 +#define IX_NPEMH_NPE_CTL_IFEWE (1 << 25) /**< InFifoEnableWriteEnable */
4314 +/* NPE status register bit definitions */
4315 +#define IX_NPEMH_NPE_STAT_OFNE (1 << 16) /**< OutFifoNotEmpty */
4316 +#define IX_NPEMH_NPE_STAT_IFNF (1 << 17) /**< InFifoNotFull */
4317 +#define IX_NPEMH_NPE_STAT_OFNF (1 << 18) /**< OutFifoNotFull */
4318 +#define IX_NPEMH_NPE_STAT_IFNE (1 << 19) /**< InFifoNotEmpty */
4319 +#define IX_NPEMH_NPE_STAT_MBINT (1 << 20) /**< Mailbox interrupt */
4320 +#define IX_NPEMH_NPE_STAT_IFINT (1 << 21) /**< InFifo interrupt */
4321 +#define IX_NPEMH_NPE_STAT_OFINT (1 << 22) /**< OutFifo interrupt */
4322 +#define IX_NPEMH_NPE_STAT_WFINT (1 << 23) /**< WatchFifo interrupt */
4326 Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/platform.h
4327 ===================================================================
4328 --- linux-2.6.21-rc1-arm.orig/include/asm-arm/arch-ixp4xx/platform.h 2007-02-21 02:24:18.000000000 -0800
4329 +++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/platform.h 2007-02-21 02:24:35.000000000 -0800
4331 unsigned long scl_pin;
4334 +struct npe_plat_data {
4338 + int id; /* Node ID */
4341 +struct mac_plat_info {
4342 + int npe_id; /* Node ID of the NPE for this port */
4343 + int port_id; /* Port ID for NPE-B @ ixp465 */
4344 + int eth_id; /* Physical ID */
4345 + int phy_id; /* ID of the connected PHY (PCB/platform dependent) */
4346 + int rxq_id; /* Queue ID of the RX-free q */
4347 + int rxdoneq_id; /* where incoming packets are returned */
4348 + int txq_id; /* Where to push the outgoing packets */
4349 + unsigned char hwaddr[6]; /* Desired hardware address */
4354 * This structure provide a means for the board setup code
4355 * to give information to th pata_ixp4xx driver. It is
4356 Index: linux-2.6.21-rc1-arm/include/linux/ixp_crypto.h
4357 ===================================================================
4358 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
4359 +++ linux-2.6.21-rc1-arm/include/linux/ixp_crypto.h 2007-02-21 02:24:35.000000000 -0800
4362 +#ifndef IX_CRYPTO_H
4363 +#define IX_CRYPTO_H
4365 +#define MAX_KEYLEN 64
4366 +#define NPE_CTX_LEN 80
4367 +#define AES_BLOCK128 16
4369 +#define NPE_OP_HASH_GEN_ICV 0x50
4370 +#define NPE_OP_ENC_GEN_KEY 0xc9
4373 +#define NPE_OP_HASH_VERIFY 0x01
4374 +#define NPE_OP_CCM_ENABLE 0x04
4375 +#define NPE_OP_CRYPT_ENABLE 0x08
4376 +#define NPE_OP_HASH_ENABLE 0x10
4377 +#define NPE_OP_NOT_IN_PLACE 0x20
4378 +#define NPE_OP_HMAC_DISABLE 0x40
4379 +#define NPE_OP_CRYPT_ENCRYPT 0x80
4381 +#define MOD_ECB 0x0000
4382 +#define MOD_CTR 0x1000
4383 +#define MOD_CBC_ENC 0x2000
4384 +#define MOD_CBC_DEC 0x3000
4385 +#define MOD_CCM_ENC 0x4000
4386 +#define MOD_CCM_DEC 0x5000
4388 +#define ALGO_AES 0x0800
4389 +#define CIPH_DECR 0x0000
4390 +#define CIPH_ENCR 0x0400
4392 +#define MOD_DES 0x0000
4393 +#define MOD_TDEA2 0x0100
4394 +#define MOD_TDEA3 0x0200
4395 +#define MOD_AES128 0x0000
4396 +#define MOD_AES192 0x0100
4397 +#define MOD_AES256 0x0200
4399 +#define KEYLEN_128 4
4400 +#define KEYLEN_192 6
4401 +#define KEYLEN_256 8
4403 +#define CIPHER_TYPE_NULL 0
4404 +#define CIPHER_TYPE_DES 1
4405 +#define CIPHER_TYPE_3DES 2
4406 +#define CIPHER_TYPE_AES 3
4408 +#define CIPHER_MODE_ECB 1
4409 +#define CIPHER_MODE_CTR 2
4410 +#define CIPHER_MODE_CBC 3
4411 +#define CIPHER_MODE_CCM 4
4413 +#define HASH_TYPE_NULL 0
4414 +#define HASH_TYPE_MD5 1
4415 +#define HASH_TYPE_SHA1 2
4416 +#define HASH_TYPE_CBCMAC 3
4418 +#define OP_REG_DONE 1
4419 +#define OP_REGISTER 2
4420 +#define OP_PERFORM 3
4422 +#define STATE_UNREGISTERED 0
4423 +#define STATE_REGISTERED 1
4424 +#define STATE_UNLOADING 2
4427 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4428 + u8 mode; /* NPE operation */
4434 + u8 mode; /* NPE operation */
4436 + u8 iv[16]; /* IV for CBC mode or CTR IV for CTR mode */
4443 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4444 + u16 auth_offs; /* Authentication start offset */
4445 + u16 auth_len; /* Authentication data length */
4446 + u16 crypt_offs; /* Cryption start offset */
4447 + u16 crypt_len; /* Cryption data length */
4449 + u16 auth_len; /* Authentication data length */
4450 + u16 auth_offs; /* Authentication start offset */
4451 + u16 crypt_len; /* Cryption data length */
4452 + u16 crypt_offs; /* Cryption start offset */
4454 + u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
4455 + u32 crypto_ctx; /* NPE Crypto Param structure address */
4457 + /* Used by Host */
4458 + struct ix_sa_ctx *sa_ctx;
4462 +struct npe_crypt_cont {
4464 + struct crypt_ctl crypt;
4465 + u8 rev_aes_key[NPE_CTX_LEN];
4467 + struct npe_crypt_cont *next;
4468 + struct npe_crypt_cont *virt;
4472 +struct ix_hash_algo {
4477 + unsigned char *icv;
4481 +struct ix_cipher_algo {
4492 + u8 key[MAX_KEYLEN];
4496 +struct ix_sa_master {
4497 + struct device *npe_dev;
4498 + struct qm_queue *sendq;
4499 + struct qm_queue *recvq;
4500 + struct dma_pool *dmapool;
4501 + struct npe_crypt_cont *pool;
4507 + unsigned char *npe_ctx;
4508 + dma_addr_t npe_ctx_phys;
4514 + struct list_head list;
4515 + struct ix_sa_master *master;
4517 + const struct ix_hash_algo *h_algo;
4518 + const struct ix_cipher_algo *c_algo;
4519 + struct ix_key c_key;
4520 + struct ix_key h_key;
4524 + struct ix_sa_dir encrypt;
4525 + struct ix_sa_dir decrypt;
4527 + struct npe_crypt_cont *rev_aes;
4533 + void(*reg_cb)(struct ix_sa_ctx*, int);
4534 + void(*perf_cb)(struct ix_sa_ctx*, void*, int);
4538 +const struct ix_hash_algo *ix_hash_by_id(int type);
4539 +const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode);
4541 +struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags);
4542 +void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx);
4544 +int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
4545 + int datalen, int c_offs, int c_len, int a_offs, int a_len,
4546 + int hmac, char *iv, int encrypt);
4548 +int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx,
4549 + const struct ix_cipher_algo *cipher,
4550 + const struct ix_hash_algo *auth, int len);
4553 Index: linux-2.6.21-rc1-arm/include/linux/ixp_npe.h
4554 ===================================================================
4555 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
4556 +++ linux-2.6.21-rc1-arm/include/linux/ixp_npe.h 2007-02-21 02:24:35.000000000 -0800
4559 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
4561 + * This file is released under the GPLv2
4564 +#ifndef NPE_DEVICE_H
4565 +#define NPE_DEVICE_H
4567 +#include <linux/miscdevice.h>
4568 +#include <asm/hardware.h>
4571 +#undef CONFIG_NPE_ADDRESS_COHERENT
4573 +#define CONFIG_NPE_ADDRESS_COHERENT
4576 +#if defined(__ARMEB__) || defined (CONFIG_NPE_ADDRESS_COHERENT)
4577 +#define npe_to_cpu32(x) (x)
4578 +#define npe_to_cpu16(x) (x)
4579 +#define cpu_to_npe32(x) (x)
4580 +#define cpu_to_npe16(x) (x)
4582 +#error NPE_DATA_COHERENT
4583 +#define NPE_DATA_COHERENT
4584 +#define npe_to_cpu32(x) be32_to_cpu(x)
4585 +#define npe_to_cpu16(x) be16_to_cpu(x)
4586 +#define cpu_to_npe32(x) cpu_to_be32(x)
4587 +#define cpu_to_npe16(x) cpu_to_be16(x)
4592 + struct resource *res;
4593 + void __iomem *addr;
4594 + struct npe_plat_data *plat;
4603 +static inline void npe_reg_write(struct npe_info *npe, u32 reg, u32 val)
4605 + *(volatile u32*)((u8*)(npe->addr) + reg) = val;
4608 +static inline u32 npe_reg_read(struct npe_info *npe, u32 reg)
4610 + return *(volatile u32*)((u8*)(npe->addr) + reg);
4613 +static inline u32 npe_status(struct npe_info *npe)
4615 + return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCTL);
4618 +/* ixNpeDlNpeMgrCommandIssue */
4619 +static inline void npe_write_exctl(struct npe_info *npe, u32 cmd)
4621 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4623 +/* ixNpeDlNpeMgrWriteCommandIssue */
4625 +npe_write_cmd(struct npe_info *npe, u32 addr, u32 data, int cmd)
4627 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXDATA, data);
4628 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
4629 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4631 +/* ixNpeDlNpeMgrReadCommandIssue */
4633 +npe_read_cmd(struct npe_info *npe, u32 addr, int cmd)
4635 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
4636 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4637 + /* Intel reads the data twice - so do we... */
4638 + npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
4639 + return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
4642 +/* ixNpeDlNpeMgrExecAccRegWrite */
4643 +static inline void npe_write_ecs_reg(struct npe_info *npe, u32 addr, u32 data)
4645 + npe_write_cmd(npe, addr, data, IX_NPEDL_EXCTL_CMD_WR_ECS_REG);
4647 +/* ixNpeDlNpeMgrExecAccRegRead */
4648 +static inline u32 npe_read_ecs_reg(struct npe_info *npe, u32 addr)
4650 + return npe_read_cmd(npe, addr, IX_NPEDL_EXCTL_CMD_RD_ECS_REG);
4653 +extern void npe_stop(struct npe_info *npe);
4654 +extern void npe_start(struct npe_info *npe);
4655 +extern void npe_reset(struct npe_info *npe);
4657 +extern struct device *get_npe_by_id(int id);
4658 +extern void return_npe_dev(struct device *dev);
4662 +npe_mh_status(struct npe_info *npe);
4664 +npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp, u8 *macaddr);
4666 +npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp);
4668 +npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid);
4670 +npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp, int enable);
4672 +npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys, int reset);
4675 Index: linux-2.6.21-rc1-arm/include/linux/ixp_qmgr.h
4676 ===================================================================
4677 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
4678 +++ linux-2.6.21-rc1-arm/include/linux/ixp_qmgr.h 2007-02-21 02:24:35.000000000 -0800
4681 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
4683 + * This file is released under the GPLv2
4689 +#include <linux/skbuff.h>
4690 +#include <linux/list.h>
4691 +#include <linux/if_ether.h>
4692 +#include <linux/spinlock.h>
4693 +#include <linux/platform_device.h>
4694 +#include <linux/ixp_npe.h>
4695 +#include <asm/atomic.h>
4697 +/* All offsets are in 32bit words */
4698 +#define QUE_LOW_STAT0 0x100 /* 4x Status of the 32 lower queues 0-31 */
4699 +#define QUE_UO_STAT0 0x104 /* 2x Underflow/Overflow status bits*/
4700 +#define QUE_UPP_STAT0 0x106 /* 2x Status of thew 32 upper queues 32-63 */
4701 +#define INT0_SRC_SELREG0 0x108 /* 4x */
4702 +#define QUE_IE_REG0 0x10c /* 2x */
4703 +#define QUE_INT_REG0 0x10e /* 2x IRQ reg, write 1 to reset IRQ */
4705 +#define IX_QMGR_QCFG_BASE 0x800
4706 +#define IX_QMGR_QCFG_SIZE 0x40
4707 +#define IX_QMGR_SRAM_SPACE (IX_QMGR_QCFG_BASE + IX_QMGR_QCFG_SIZE)
4709 +#define MAX_QUEUES 32 /* first, we only support the lower 32 queues */
4713 + Q_IRQ_ID_E = 0, /* Queue Empty due to last read */
4714 + Q_IRQ_ID_NE, /* Queue Nearly Empty due to last read */
4715 + Q_IRQ_ID_NF, /* Queue Nearly Full due to last write */
4716 + Q_IRQ_ID_F, /* Queue Full due to last write */
4717 + Q_IRQ_ID_NOT_E, /* Queue Not Empty due to last write */
4718 + Q_IRQ_ID_NOT_NE, /* Queue Not Nearly Empty due to last write */
4719 + Q_IRQ_ID_NOT_NF, /* Queue Not Nearly Full due to last read */
4720 + Q_IRQ_ID_NOT_F /* Queue Not Full due to last read */
4723 +extern struct qm_queue *request_queue(int qid, int len);
4724 +extern void release_queue(struct qm_queue *queue);
4725 +extern int queue_set_irq_src(struct qm_queue *queue, int flag);
4726 +extern void queue_set_watermarks(struct qm_queue *, unsigned ne, unsigned nf);
4727 +extern int queue_len(struct qm_queue *queue);
4732 +typedef void(*queue_cb)(struct qm_queue *);
4735 + int addr; /* word offset from IX_QMGR_SRAM_SPACE */
4736 + int len; /* size in words */
4737 + int id; /* Q Id */
4738 + u32 __iomem *acc_reg;
4739 + struct device *dev;
4745 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4757 + u8 dest_mac[ETH_ALEN];
4758 + u8 src_mac[ETH_ALEN];
4773 + u8 dest_mac[ETH_ALEN];
4774 + u8 src_mac[ETH_ALEN];
4779 + struct eth_ctl eth;
4781 + struct npe_cont *next;
4782 + struct npe_cont *virt;
4787 + u32 __iomem *addr;
4788 + struct resource *res;
4789 + struct qm_queue *queues[MAX_QUEUES];
4791 + struct npe_cont *pool;
4792 + struct dma_pool *dmapool;
4796 +static inline void queue_write_cfg_reg(struct qm_queue *queue, u32 val)
4798 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4799 + *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id) = val;
4801 +static inline u32 queue_read_cfg_reg(struct qm_queue *queue)
4803 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4804 + return *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
4807 +static inline void queue_ack_irq(struct qm_queue *queue)
4809 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4810 + *(qmgr->addr + QUE_INT_REG0) = 1 << queue->id;
4813 +static inline void queue_enable_irq(struct qm_queue *queue)
4815 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4816 + *(qmgr->addr + QUE_IE_REG0) |= 1 << queue->id;
4819 +static inline void queue_disable_irq(struct qm_queue *queue)
4821 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4822 + *(qmgr->addr + QUE_IE_REG0) &= ~(1 << queue->id);
4825 +static inline void queue_put_entry(struct qm_queue *queue, u32 entry)
4827 + *(queue->acc_reg) = npe_to_cpu32(entry);
4830 +static inline u32 queue_get_entry(struct qm_queue *queue)
4832 + return cpu_to_npe32(*queue->acc_reg);
4835 +static inline struct npe_cont *qmgr_get_cont(struct qm_qmgr *qmgr)
4837 + unsigned long flags;
4838 + struct npe_cont *cont;
4842 + write_lock_irqsave(&qmgr->lock, flags);
4843 + cont = qmgr->pool;
4844 + qmgr->pool = cont->next;
4845 + write_unlock_irqrestore(&qmgr->lock, flags);
4849 +static inline void qmgr_return_cont(struct qm_qmgr *qmgr,struct npe_cont *cont)
4851 + unsigned long flags;
4853 + write_lock_irqsave(&qmgr->lock, flags);
4854 + cont->next = qmgr->pool;
4855 + qmgr->pool = cont;
4856 + write_unlock_irqrestore(&qmgr->lock, flags);
4859 +static inline int queue_stat(struct qm_queue *queue)
4861 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4862 + u32 reg = *(qmgr->addr + QUE_UO_STAT0 + (queue->id >> 4));
4863 + return (reg >> (queue->id & 0xf) << 1) & 3;
4866 +/* Prints the queue state, which is very, very helpfull for debugging */
4867 +static inline void queue_state(struct qm_queue *queue)
4869 + u32 val=0, lstat=0;
4871 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4873 + offs = queue->id/8 + QUE_LOW_STAT0;
4874 + val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
4875 + lstat = (*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x0f;
4877 + printk("Qid[%02d]: Wptr=%4x, Rptr=%4x, diff=%4x, Stat:%x\n", queue->id,
4878 + val&0x7f, (val>>7) &0x7f, (val - (val >> 7)) & 0x7f, lstat);