++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
++
++EXPORT_SYMBOL(ix_hash_by_id);
++EXPORT_SYMBOL(ix_cipher_by_id);
++
++EXPORT_SYMBOL(ix_sa_ctx_new);
++EXPORT_SYMBOL(ix_sa_ctx_free);
++EXPORT_SYMBOL(ix_sa_ctx_setup_cipher_auth);
++EXPORT_SYMBOL(ix_sa_crypto_perform);
++
++module_init(init_crypto);
++module_exit(finish_crypto);
++
+diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/ixp4xx_qmgr.c linux-2.6.19/drivers/net/ixp4xx/ixp4xx_qmgr.c
+--- linux-2.6.19.orig/drivers/net/ixp4xx/ixp4xx_qmgr.c 1969-12-31 17:00:00.000000000 -0700
++++ linux-2.6.19/drivers/net/ixp4xx/ixp4xx_qmgr.c 2007-01-12 21:54:40.000000000 -0700
+@@ -0,0 +1,474 @@
++/*
++ * qmgr.c - reimplementation of the queue configuration interface.
++ *
++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
++ *
++ * This file is released under the GPLv2
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/dmapool.h>
++#include <linux/interrupt.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++
++#include <linux/ixp_qmgr.h>
++#include <linux/ixp_npe.h>
++
++#define IXQMGR_VERSION "IXP4XX Q Manager 0.2.1"
++
++static struct device *qmgr_dev = NULL;
++
++static int poll_freq = 4000;
++static int poll_enable = 0;
++static u32 timer_countup_ticks;
++
++module_param(poll_freq, int, 0644);
++module_param(poll_enable, int, 0644);
++
++int queue_len(struct qm_queue *queue)
++{
++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
++ int diff, offs;
++ u32 val;
++
++ offs = queue->id/8 + QUE_LOW_STAT0;
++ val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
++
++ diff = (val - (val >> 7)) & 0x7f;
++ if (!diff) {
++ /* diff == 0 means either empty or full, must look at STAT0 */
++ if ((*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x04)
++ diff = queue->len;
++ }
++ return diff;
++}
++
++static int request_pool(struct device *dev, int count)
++{
++ int i;
++ struct npe_cont *cont;
++ struct qm_qmgr *qmgr = dev_get_drvdata(dev);
++ dma_addr_t handle;
++
++ for (i=0; i<count; i++) {
++ cont = dma_pool_alloc(qmgr->dmapool, GFP_KERNEL, &handle);
++ if (!cont) {
++ return -ENOMEM;
++ }
++ cont->phys = handle;
++ cont->virt = cont;
++ write_lock(&qmgr->lock);
++ cont->next = qmgr->pool;
++ qmgr->pool = cont;
++ write_unlock(&qmgr->lock);
++ }
++ return 0;
++}
++
++static int free_pool(struct device *dev, int count)
++{
++ int i;
++ struct npe_cont *cont;
++ struct qm_qmgr *qmgr = dev_get_drvdata(dev);
++
++ for (i=0; i<count; i++) {
++ write_lock(&qmgr->lock);
++ cont = qmgr->pool;
++ if (!cont) {
++ write_unlock(&qmgr->lock);
++ return -1;
++ }
++ qmgr->pool = cont->next;
++ write_unlock(&qmgr->lock);
++ dma_pool_free(qmgr->dmapool, cont, cont->phys);
++ }
++ return 0;
++}
++
++static int get_free_qspace(struct qm_qmgr *qmgr, int len)
++{
++ int words = (qmgr->res->end - qmgr->res->start + 1) / 4 -
++ IX_QMGR_SRAM_SPACE;
++ int i,q;
++
++ for (i=0; i<words; i+=len) {
++ for (q=0; q<MAX_QUEUES; q++) {
++ struct qm_queue *qu = qmgr->queues[q];
++ if (!qu)
++ continue;
++ if ((qu->addr + qu->len > i) && (qu->addr < i + len))
++ break;
++ }
++ if (q == MAX_QUEUES) {
++ /* we have a free address */
++ return i;
++ }
++ }
++ return -1;
++}
++
++static inline int _log2(int x)
++{
++ int r=0;
++ while(x>>=1)
++ r++;
++ return r;
++}
++
++/*
++ * 32bit Config registers at IX_QMGR_QUECONFIG_BASE_OFFSET[Qid]
++ * 0 - 6 WRPTR Word offset to baseaddr (index 0 .. BSIZE-1)
++ * 7 -13 RDPTR ''
++ * 14 -21 BADDR baseaddr = (offset to IX_QMGR_QUEBUFFER_SPACE_OFFSET) >> 6
++ * 22 -23 ESIZE entrySizeInWords (always 00 because entrySizeInWords==1)
++ * 24 -25 BSIZE qSizeInWords 00=16,01=32,10=64,11=128
++ * 26 -28 NE nearly empty
++ * 29 -31 NF nearly full
++ */
++static int conf_q_regs(struct qm_queue *queue)
++{
++ int bsize = _log2(queue->len/16);
++ int baddr = queue->addr + IX_QMGR_QCFG_SIZE;
++
++ /* +2, because baddr is in words and not in bytes */
++ queue_write_cfg_reg(queue, (bsize << 24) | (baddr<<(14-6+2)) );
++
++ return 0;
++}
++
++static void pmu_timer_restart(void)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++
++ __asm__(" mcr p14,0,%0,c1,c1,0\n" /* write current counter */
++ : : "r" (timer_countup_ticks));
++
++ __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */
++ " orr r1,r1,#1; "
++ " mcr p14,0,r1,c5,c1,0; " /* clear overflow */
++ " mcr p14,0,r1,c4,c1,0\n" /* enable interrupts */
++ : : : "r1");
++
++ local_irq_restore(flags);
++}
++
++static void pmu_timer_init(void)
++{
++ u32 controlRegisterMask =
++ BIT(0) | /* enable counters */
++ BIT(2); /* reset clock counter; */
++
++ /*
++ * Compute the number of xscale cycles needed between each
++ * PMU IRQ. This is done from the result of an OS calibration loop.
++ *
++ * For 533MHz CPU, 533000000 tick/s / 4000 times/sec = 138250
++ * 4000 times/sec = 37 mbufs/interrupt at line rate
++ * The pmu timer is reset to -138250 = 0xfffde3f6, to trigger an IRQ
++ * when this up counter overflows.
++ *
++ * The multiplication gives a number of instructions per second.
++ * which is close to the processor frequency, and then close to the
++ * PMU clock rate.
++ *
++ * 2 is the number of instructions per loop
++ *
++ */
++
++ timer_countup_ticks = - ((loops_per_jiffy * HZ * 2) / poll_freq);
++
++ /* enable the CCNT (clock count) timer from the PMU */
++ __asm__(" mcr p14,0,%0,c0,c1,0\n"
++ : : "r" (controlRegisterMask));
++}
++
++static void pmu_timer_disable(void)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++
++ __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */
++ " and r1,r1,#0x1e; "
++ " mcr p14,0,r1,c4,c1,0\n" /* disable interrupts */
++ : : : "r1");
++ local_irq_restore(flags);
++}
++
++void queue_set_watermarks(struct qm_queue *queue, unsigned ne, unsigned nf)
++{
++ u32 val;
++ /* calculate the register values
++ * 0->0, 1->1, 2->2, 4->3, 8->4 16->5...*/
++ ne = _log2(ne<<1) & 0x7;
++ nf = _log2(nf<<1) & 0x7;
++
++ /* Mask out old watermarks */
++ val = queue_read_cfg_reg(queue) & ~0xfc000000;
++ queue_write_cfg_reg(queue, val | (ne << 26) | (nf << 29));
++}
++
++int queue_set_irq_src(struct qm_queue *queue, int flag)
++{
++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
++ u32 reg;
++ int offs, bitoffs;
++
++ /* Q 0-7 are in REG0, 8-15 are in REG1, etc. They occupy 4 bits/Q */
++ offs = queue->id/8 + INT0_SRC_SELREG0;
++ bitoffs = (queue->id % 8)*4;
++
++ reg = *(qmgr->addr + offs) & ~(0xf << bitoffs);
++ *(qmgr->addr + offs) = reg | (flag << bitoffs);
++
++ return 0;
++}
++
++static irqreturn_t irq_qm1(int irq, void *dev_id)
++{
++ struct qm_qmgr *qmgr = dev_id;
++ int offs, reg;
++ struct qm_queue *queue;
++
++ if (poll_enable)
++ pmu_timer_restart();
++
++ reg = *(qmgr->addr + QUE_INT_REG0);
++ while(reg) {
++ /*
++ * count leading zeros. "offs" gets
++ * the amount of leading 0 in "reg"
++ */
++ asm ("clz %0, %1;" : "=r"(offs) : "r"(reg));
++ offs = 31 - offs;
++ reg &= ~(1 << offs);
++ queue = qmgr->queues[offs];
++ if (likely(queue)) {
++ if (likely(queue->irq_cb)) {
++ queue->irq_cb(queue);
++ } else {
++ printk(KERN_ERR "Missing callback for Q %d\n",
++ offs);
++ }
++ } else {
++ printk(KERN_ERR "IRQ for unregistered Q %d\n", offs);
++ }
++ }
++ return IRQ_HANDLED;
++}
++
++struct qm_queue *request_queue(int qid, int len)
++{
++ int ram;
++ struct qm_qmgr *qmgr;
++ struct qm_queue *queue;
++
++ if (!qmgr_dev)
++ return ERR_PTR(-ENODEV);
++
++ if ((qid < 0) || (qid > MAX_QUEUES))
++ return ERR_PTR(-ERANGE);
++
++ switch (len) {
++ case 16:
++ case 32:
++ case 64:
++ case 128: break;
++ default : return ERR_PTR(-EINVAL);
++ }
++
++ qmgr = dev_get_drvdata(qmgr_dev);
++
++ if (qmgr->queues[qid]) {
++ /* not an error, just in use already */
++ return NULL;
++ }
++ if ((ram = get_free_qspace(qmgr, len)) < 0) {
++ printk(KERN_ERR "No free SRAM space for this queue\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ if (!(queue = kzalloc(sizeof(struct qm_queue), GFP_KERNEL)))
++ return ERR_PTR(-ENOMEM);
++
++ if (!try_module_get(THIS_MODULE)) {
++ kfree(queue);
++ return ERR_PTR(-ENODEV);
++ }
++
++ queue->addr = ram;
++ queue->len = len;
++ queue->id = qid;
++ queue->dev = get_device(qmgr_dev);
++ queue->acc_reg = qmgr->addr + (4 * qid);
++ qmgr->queues[qid] = queue;
++ if (request_pool(qmgr_dev, len)) {
++ printk(KERN_ERR "Failed to request DMA pool of Q %d\n", qid);
++ }
++
++ conf_q_regs(queue);
++ return queue;
++}
++
++void release_queue(struct qm_queue *queue)
++{
++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
++
++ BUG_ON(qmgr->queues[queue->id] != queue);
++ qmgr->queues[queue->id] = NULL;
++
++ if (free_pool(queue->dev, queue->len)) {
++ printk(KERN_ERR "Failed to release DMA pool of Q %d\n",
++ queue->id);
++ }
++ queue_disable_irq(queue);
++ queue_write_cfg_reg(queue, 0);
++
++ module_put(THIS_MODULE);
++ put_device(queue->dev);
++ kfree(queue);
++}
++
++
++
++
++static int qmgr_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++ struct qm_qmgr *qmgr;
++ int size, ret=0, i;
++
++ if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
++ return -EIO;
++
++ if ((i = platform_get_irq(pdev, 0)) < 0)
++ return -EIO;
++
++ if (!(qmgr = kzalloc(sizeof(struct qm_qmgr), GFP_KERNEL)))
++ return -ENOMEM;
++
++ qmgr->irq = i;
++ size = res->end - res->start +1;
++ qmgr->res = request_mem_region(res->start, size, "ixp_qmgr");
++ if (!qmgr->res) {
++ ret = -EBUSY;
++ goto out_free;
++ }
++
++ qmgr->addr = ioremap(res->start, size);
++ if (!qmgr->addr) {
++ ret = -ENOMEM;
++ goto out_rel;
++ }
++
++ /* Reset Q registers */
++ for (i=0; i<4; i++)
++ *(qmgr->addr + QUE_LOW_STAT0 +i) = 0x33333333;
++ for (i=0; i<10; i++)
++ *(qmgr->addr + QUE_UO_STAT0 +i) = 0x0;
++ for (i=0; i<4; i++)
++ *(qmgr->addr + INT0_SRC_SELREG0 +i) = 0x0;
++ for (i=0; i<2; i++) {
++ *(qmgr->addr + QUE_IE_REG0 +i) = 0x00;
++ *(qmgr->addr + QUE_INT_REG0 +i) = 0xffffffff;
++ }
++ for (i=0; i<64; i++) {
++ *(qmgr->addr + IX_QMGR_QCFG_BASE + i) = 0x0;
++ }
++
++ if (poll_enable) {
++ pmu_timer_init();
++ qmgr->irq = IRQ_IXP4XX_XSCALE_PMU;
++ }
++ ret = request_irq(qmgr->irq, irq_qm1, SA_SHIRQ | SA_INTERRUPT,
++ "qmgr", qmgr);
++ if (ret) {
++ printk(KERN_ERR "Failed to request IRQ(%d)\n", qmgr->irq);
++ ret = -EIO;
++ goto out_rel;
++ }
++ if (poll_enable)
++ pmu_timer_restart();
++
++ rwlock_init(&qmgr->lock);
++ qmgr->dmapool = dma_pool_create("qmgr", &pdev->dev,
++ sizeof(struct npe_cont), 32, 0);
++ platform_set_drvdata(pdev, qmgr);
++
++ qmgr_dev = &pdev->dev;
++
++ printk(KERN_INFO IXQMGR_VERSION " initialized.\n");
++
++ return 0;
++
++out_rel:
++ release_resource(qmgr->res);
++out_free:
++ kfree(qmgr);
++ return ret;
++}
++
++static int qmgr_remove(struct platform_device *pdev)
++{
++ struct qm_qmgr *qmgr = platform_get_drvdata(pdev);
++ int i;
++
++ for (i=0; i<MAX_QUEUES; i++) {
++ if (qmgr->queues[i]) {
++ printk(KERN_ERR "WARNING Unreleased Q: %d\n", i);
++ release_queue(qmgr->queues[i]);
++ }
++ }
++
++ if (poll_enable)
++ pmu_timer_disable();
++
++ synchronize_irq (qmgr->irq);
++ free_irq(qmgr->irq, qmgr);
++
++ dma_pool_destroy(qmgr->dmapool);
++ iounmap(qmgr->addr);
++ release_resource(qmgr->res);
++ platform_set_drvdata(pdev, NULL);
++ qmgr_dev = NULL;
++ kfree(qmgr);
++ return 0;
++}
++
++static struct platform_driver ixp4xx_qmgr = {
++ .driver.name = "ixp4xx_qmgr",
++ .probe = qmgr_probe,
++ .remove = qmgr_remove,
++};
++
++
++static int __init init_qmgr(void)
++{
++ return platform_driver_register(&ixp4xx_qmgr);
++}
++
++static void __exit finish_qmgr(void)
++{
++ platform_driver_unregister(&ixp4xx_qmgr);
++}
++
++module_init(init_qmgr);
++module_exit(finish_qmgr);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
++
++EXPORT_SYMBOL(request_queue);
++EXPORT_SYMBOL(release_queue);
++EXPORT_SYMBOL(queue_set_irq_src);
++EXPORT_SYMBOL(queue_set_watermarks);
++EXPORT_SYMBOL(queue_len);
+diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/Kconfig linux-2.6.19/drivers/net/ixp4xx/Kconfig
+--- linux-2.6.19.orig/drivers/net/ixp4xx/Kconfig 1969-12-31 17:00:00.000000000 -0700
++++ linux-2.6.19/drivers/net/ixp4xx/Kconfig 2007-01-12 21:54:40.000000000 -0700
+@@ -0,0 +1,48 @@
++config IXP4XX_QMGR
++ tristate "IXP4xx Queue Manager support"
++ depends on ARCH_IXP4XX
++ depends on NET_ETHERNET
++ help
++ The IXP4XX Queue manager is a configurable hardware ringbuffer.
++ It is used by the NPEs to exchange data from and to the CPU.
++ You can either use this OR the Intel Access Library (IAL)
++
++config IXP4XX_NPE
++ tristate "IXP4xx NPE support"
++ depends on ARCH_IXP4XX
++ depends on NET_ETHERNET
++ help
++ The IXP4XX NPE driver supports the 3 CPU co-processors called
++ "Network Processing Engines" (NPE). It adds support fo downloading
++ the Microcode (firmware) via Hotplug or character-special-device.
++ More about this at: Documentation/networking/ixp4xx/README.
++ You can either use this OR the Intel Access Library (IAL)
++
++config IXP4XX_FW_LOAD
++ bool "Use Firmware hotplug for Microcode download"
++ depends on IXP4XX_NPE
++ select HOTPLUG
++ select FW_LOADER
++ help
++ The default hotplug script will load the Firmware from
++ /usr/lib/hotplug/firmware/NPE-[ABC]
++ see Documentation/firmware_class/hotplug-script
++
++config IXP4XX_MAC
++ tristate "IXP4xx MAC support"
++ depends on IXP4XX_NPE
++ depends on IXP4XX_QMGR
++ depends on NET_ETHERNET
++ select MII
++ help
++ The IXP4XX MAC driver supports the MACs on the IXP4XX CPUs.
++ There are 2 on ixp425 and up to 5 on ixdp465.
++ You can either use this OR the Intel Access Library (IAL)
++
++config IXP4XX_CRYPTO
++ tristate "IXP4xx crypto support"
++ depends on IXP4XX_NPE
++ depends on IXP4XX_QMGR
++ help
++ This driver is a generic NPE-crypto access layer.
++ You need additional code in OCF for example.
+diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/mac_driver.c linux-2.6.19/drivers/net/ixp4xx/mac_driver.c
+--- linux-2.6.19.orig/drivers/net/ixp4xx/mac_driver.c 1969-12-31 17:00:00.000000000 -0700
++++ linux-2.6.19/drivers/net/ixp4xx/mac_driver.c 2007-01-12 21:54:40.000000000 -0700
+@@ -0,0 +1,849 @@
++/*
++ * mac_driver.c - provide a network interface for each MAC
++ *
++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
++ *
++ * This file is released under the GPLv2
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/dma-mapping.h>
++#include <linux/workqueue.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++
++
++#include <linux/ixp_qmgr.h>
++#include <linux/ixp_npe.h>
++#include "mac.h"
++
++#define MDIO_INTERVAL (3*HZ)
++#define RX_QUEUE_PREFILL 64
++#define TX_QUEUE_PREFILL 16
++
++#define IXMAC_NAME "ixp4xx_mac"
++#define IXMAC_VERSION "0.3.1"
++
++#define MAC_DEFAULT_REG(mac, name) \
++ mac_write_reg(mac, MAC_ ## name, MAC_ ## name ## _DEFAULT)
++
++#define TX_DONE_QID 31
++
++#define DMA_ALLOC_SIZE 2048
++#define DMA_HDR_SIZE (sizeof(struct npe_cont))
++#define DMA_BUF_SIZE (DMA_ALLOC_SIZE - DMA_HDR_SIZE)
++
++/* Since the NPEs use 1 Return Q for sent frames, we need a device
++ * independent return Q. We call it tx_doneq.
++ * It will be initialized during module load and uninitialized
++ * during module unload. Evil hack, but there is no choice :-(
++ */
++
++static struct qm_queue *tx_doneq = NULL;
++static int debug = -1;
++module_param(debug, int, 0);
++
++static int init_buffer(struct qm_queue *queue, int count)
++{
++ int i;
++ struct npe_cont *cont;
++
++ for (i=0; i<count; i++) {
++ cont = kmalloc(DMA_ALLOC_SIZE, GFP_KERNEL | GFP_DMA);
++ if (!cont)
++ goto err;
++
++ cont->phys = dma_map_single(queue->dev, cont, DMA_ALLOC_SIZE,
++ DMA_BIDIRECTIONAL);
++ if (dma_mapping_error(cont->phys))
++ goto err;
++
++ cont->data = cont+1;
++ /* now the buffer is on a 32 bit boundary.
++ * we add 2 bytes for good alignment to SKB */
++ cont->data+=2;
++ cont->eth.next = 0;
++ cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
++ cont->eth.pkt_len = 0;
++ /* also add 2 alignment bytes from cont->data*/
++ cont->eth.phys_addr = cpu_to_npe32(cont->phys+ DMA_HDR_SIZE+ 2);
++
++ dma_sync_single(queue->dev, cont->phys, DMA_HDR_SIZE,
++ DMA_TO_DEVICE);
++
++ queue_put_entry(queue, cont->phys);
++ if (queue_stat(queue) == 2) { /* overflow */
++ dma_unmap_single(queue->dev, cont->phys, DMA_ALLOC_SIZE,
++ DMA_BIDIRECTIONAL);
++ goto err;
++ }
++ }
++ return i;
++err:
++ if (cont)
++ kfree(cont);
++ return i;
++}
++
++static int destroy_buffer(struct qm_queue *queue, int count)
++{
++ u32 phys;
++ int i;
++ struct npe_cont *cont;
++
++ for (i=0; i<count; i++) {
++ phys = queue_get_entry(queue) & ~0xf;
++ if (!phys)
++ break;
++ dma_unmap_single(queue->dev, phys, DMA_ALLOC_SIZE,
++ DMA_BIDIRECTIONAL);
++ cont = dma_to_virt(queue->dev, phys);
++ kfree(cont);
++ }
++ return i;
++}
++
++static void mac_init(struct mac_info *mac)
++{
++ MAC_DEFAULT_REG(mac, TX_CNTRL2);
++ MAC_DEFAULT_REG(mac, RANDOM_SEED);
++ MAC_DEFAULT_REG(mac, THRESH_P_EMPTY);
++ MAC_DEFAULT_REG(mac, THRESH_P_FULL);
++ MAC_DEFAULT_REG(mac, TX_DEFER);
++ MAC_DEFAULT_REG(mac, TX_TWO_DEFER_1);
++ MAC_DEFAULT_REG(mac, TX_TWO_DEFER_2);
++ MAC_DEFAULT_REG(mac, SLOT_TIME);
++ MAC_DEFAULT_REG(mac, INT_CLK_THRESH);
++ MAC_DEFAULT_REG(mac, BUF_SIZE_TX);
++ MAC_DEFAULT_REG(mac, TX_CNTRL1);
++ MAC_DEFAULT_REG(mac, RX_CNTRL1);
++}
++
++static void mac_set_uniaddr(struct net_device *dev)
++{
++ int i;
++ struct mac_info *mac = netdev_priv(dev);
++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
++
++ /* check for multicast */
++ if (dev->dev_addr[0] & 1)
++ return;
++
++ npe_mh_setportaddr(npe, mac->plat, dev->dev_addr);
++ npe_mh_disable_firewall(npe, mac->plat);
++ for (i=0; i<dev->addr_len; i++)
++ mac_write_reg(mac, MAC_UNI_ADDR + i, dev->dev_addr[i]);
++}
++
++static void update_duplex_mode(struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ if (netif_msg_link(mac)) {
++ printk(KERN_DEBUG "Link of %s is %s-duplex\n", dev->name,
++ mac->mii.full_duplex ? "full" : "half");
++ }
++ if (mac->mii.full_duplex) {
++ mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
++ } else {
++ mac_set_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
++ }
++}
++
++static int media_check(struct net_device *dev, int init)
++{
++ struct mac_info *mac = netdev_priv(dev);
++
++ if (mii_check_media(&mac->mii, netif_msg_link(mac), init)) {
++ update_duplex_mode(dev);
++ return 1;
++ }
++ return 0;
++}
++
++static void get_npe_stats(struct mac_info *mac, u32 *buf, int len, int reset)
++{
++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
++ u32 phys;
++
++ memset(buf, len, 0);
++ phys = dma_map_single(mac->npe_dev, buf, len, DMA_BIDIRECTIONAL);
++ npe_mh_get_stats(npe, mac->plat, phys, reset);
++ dma_unmap_single(mac->npe_dev, phys, len, DMA_BIDIRECTIONAL);
++}
++
++static void irqcb_recv(struct qm_queue *queue)
++{
++ struct net_device *dev = queue->cb_data;
++
++ queue_ack_irq(queue);
++ queue_disable_irq(queue);
++ if (netif_running(dev))
++ netif_rx_schedule(dev);
++}
++
++int ix_recv(struct net_device *dev, int *budget, struct qm_queue *queue)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ struct sk_buff *skb;
++ u32 phys;
++ struct npe_cont *cont;
++
++ while (*budget > 0 && netif_running(dev) ) {
++ int len;
++ phys = queue_get_entry(queue) & ~0xf;
++ if (!phys)
++ break;
++ dma_sync_single(queue->dev, phys, DMA_HDR_SIZE,
++ DMA_FROM_DEVICE);
++ cont = dma_to_virt(queue->dev, phys);
++ len = npe_to_cpu16(cont->eth.pkt_len) -4; /* strip FCS */
++
++ if (unlikely(netif_msg_rx_status(mac))) {
++ printk(KERN_DEBUG "%s: RX packet size: %u\n",
++ dev->name, len);
++ queue_state(mac->rxq);
++ queue_state(mac->rxdoneq);
++ }
++ skb = dev_alloc_skb(len + 2);
++ if (likely(skb)) {
++ skb->dev = dev;
++ skb_reserve(skb, 2);
++ dma_sync_single(queue->dev, cont->eth.phys_addr, len,
++ DMA_FROM_DEVICE);
++#ifdef CONFIG_NPE_ADDRESS_COHERENT
++ /* swap the payload of the SKB */
++ {
++ u32 *t = (u32*)(skb->data-2);
++ u32 *s = (u32*)(cont->data-2);
++ int i, j = (len+5)/4;
++ for (i=0; i<j; i++)
++ t[i] = cpu_to_be32(s[i]);
++ }
++#else
++ eth_copy_and_sum(skb, cont->data, len, 0);
++#endif
++ skb_put(skb, len);
++ skb->protocol = eth_type_trans(skb, dev);
++ dev->last_rx = jiffies;
++ netif_receive_skb(skb);
++ mac->stat.rx_packets++;
++ mac->stat.rx_bytes += skb->len;
++ } else {
++ mac->stat.rx_dropped++;
++ }
++ cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
++ cont->eth.pkt_len = 0;
++ dma_sync_single(queue->dev, phys, DMA_HDR_SIZE, DMA_TO_DEVICE);
++ queue_put_entry(mac->rxq, phys);
++ dev->quota--;
++ (*budget)--;
++ }
++
++ return !budget;
++}
++
++static int ix_poll(struct net_device *dev, int *budget)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ struct qm_queue *queue = mac->rxdoneq;
++
++ for (;;) {
++ if (ix_recv(dev, budget, queue))
++ return 1;
++ netif_rx_complete(dev);
++ queue_enable_irq(queue);
++ if (!queue_len(queue))
++ break;
++ queue_disable_irq(queue);
++ if (netif_rx_reschedule(dev, 0))
++ break;
++ }
++ return 0;
++}
++
++static void ixmac_set_rx_mode (struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ struct dev_mc_list *mclist;
++ u8 aset[dev->addr_len], aclear[dev->addr_len];
++ int i,j;
++
++ if (dev->flags & IFF_PROMISC) {
++ mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
++ } else {
++ mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
++
++ mclist = dev->mc_list;
++ memset(aset, 0xff, dev->addr_len);
++ memset(aclear, 0x00, dev->addr_len);
++ for (i = 0; mclist && i < dev->mc_count; i++) {
++ for (j=0; j< dev->addr_len; j++) {
++ aset[j] &= mclist->dmi_addr[j];
++ aclear[j] |= mclist->dmi_addr[j];
++ }
++ mclist = mclist->next;
++ }
++ for (j=0; j< dev->addr_len; j++) {
++ aclear[j] = aset[j] | ~aclear[j];
++ }
++ for (i=0; i<dev->addr_len; i++) {
++ mac_write_reg(mac, MAC_ADDR + i, aset[i]);
++ mac_write_reg(mac, MAC_ADDR_MASK + i, aclear[i]);
++ }
++ }
++}
++
++static int ixmac_open (struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
++ u32 buf[NPE_STAT_NUM];
++ int i;
++ u32 phys;
++
++ /* first check if the NPE is up and running */
++ if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
++ printk(KERN_ERR "%s: %s not running\n", dev->name,
++ npe->plat->name);
++ return -EIO;
++ }
++ if (npe_mh_status(npe)) {
++ printk(KERN_ERR "%s: %s not responding\n", dev->name,
++ npe->plat->name);
++ return -EIO;
++ }
++ mac->txq_pkt += init_buffer(mac->txq, TX_QUEUE_PREFILL - mac->txq_pkt);
++ mac->rxq_pkt += init_buffer(mac->rxq, RX_QUEUE_PREFILL - mac->rxq_pkt);
++
++ queue_enable_irq(mac->rxdoneq);
++
++ /* drain all buffers from then RX-done-q to make the IRQ happen */
++ while ((phys = queue_get_entry(mac->rxdoneq) & ~0xf)) {
++ struct npe_cont *cont;
++ cont = dma_to_virt(mac->rxdoneq->dev, phys);
++ cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
++ cont->eth.pkt_len = 0;
++ dma_sync_single(mac->rxdoneq->dev, phys, DMA_HDR_SIZE,
++ DMA_TO_DEVICE);
++ queue_put_entry(mac->rxq, phys);
++ }
++ mac_init(mac);
++ npe_mh_set_rxqid(npe, mac->plat, mac->plat->rxdoneq_id);
++ get_npe_stats(mac, buf, sizeof(buf), 1); /* reset stats */
++ get_npe_stats(mac, buf, sizeof(buf), 0);
++ /*
++ * if the extended stats contain random values
++ * the NPE image lacks extendet statistic counters
++ */
++ for (i=NPE_STAT_NUM_BASE; i<NPE_STAT_NUM; i++) {
++ if (buf[i] >10000)
++ break;
++ }
++ mac->npe_stat_num = i<NPE_STAT_NUM ? NPE_STAT_NUM_BASE : NPE_STAT_NUM;
++ mac->npe_stat_num += NPE_Q_STAT_NUM;
++
++ mac_set_uniaddr(dev);
++ media_check(dev, 1);
++ ixmac_set_rx_mode(dev);
++ netif_start_queue(dev);
++ schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
++ if (netif_msg_ifup(mac)) {
++ printk(KERN_DEBUG "%s: open " IXMAC_NAME
++ " RX queue %d bufs, TX queue %d bufs\n",
++ dev->name, mac->rxq_pkt, mac->txq_pkt);
++ }
++ return 0;
++}
++
++static int ixmac_start_xmit (struct sk_buff *skb, struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ struct npe_cont *cont;
++ u32 phys;
++ struct qm_queue *queue = mac->txq;
++
++ if (unlikely(skb->len > DMA_BUF_SIZE)) {
++ dev_kfree_skb(skb);
++ mac->stat.tx_errors++;
++ return NETDEV_TX_OK;
++ }
++ phys = queue_get_entry(tx_doneq) & ~0xf;
++ if (!phys)
++ goto busy;
++ cont = dma_to_virt(queue->dev, phys);
++#ifdef CONFIG_NPE_ADDRESS_COHERENT
++ /* swap the payload of the SKB */
++ {
++ u32 *s = (u32*)(skb->data-2);
++ u32 *t = (u32*)(cont->data-2);
++ int i,j = (skb->len+5) / 4;
++ for (i=0; i<j; i++)
++ t[i] = cpu_to_be32(s[i]);
++ }
++#else
++ //skb_copy_and_csum_dev(skb, cont->data);
++ memcpy(cont->data, skb->data, skb->len);
++#endif
++ cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
++ cont->eth.pkt_len = cpu_to_npe16(skb->len);
++ /* disable VLAN functions in NPE image for now */
++ cont->eth.flags = 0;
++ dma_sync_single(queue->dev, phys, skb->len + DMA_HDR_SIZE,
++ DMA_TO_DEVICE);
++ queue_put_entry(queue, phys);
++ if (queue_stat(queue) == 2) { /* overflow */
++ queue_put_entry(tx_doneq, phys);
++ goto busy;
++ }
++ dev_kfree_skb(skb);
++
++ mac->stat.tx_packets++;
++ mac->stat.tx_bytes += skb->len;
++ dev->trans_start = jiffies;
++ if (netif_msg_tx_queued(mac)) {
++ printk(KERN_DEBUG "%s: TX packet size %u\n",
++ dev->name, skb->len);
++ queue_state(mac->txq);
++ queue_state(tx_doneq);
++ }
++ return NETDEV_TX_OK;
++busy:
++ return NETDEV_TX_BUSY;
++}
++
++static int ixmac_close (struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++
++ netif_stop_queue (dev);
++ queue_disable_irq(mac->rxdoneq);
++
++ mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
++ mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
++
++ cancel_rearming_delayed_work(&(mac->mdio_thread));
++
++ if (netif_msg_ifdown(mac)) {
++ printk(KERN_DEBUG "%s: close " IXMAC_NAME
++ " RX queue %d bufs, TX queue %d bufs\n",
++ dev->name, mac->rxq_pkt, mac->txq_pkt);
++ }
++ return 0;
++}
++
++static int ixmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ int rc, duplex_changed;
++
++ if (!netif_running(dev))
++ return -EINVAL;
++ if (!try_module_get(THIS_MODULE))
++ return -ENODEV;
++ rc = generic_mii_ioctl(&mac->mii, if_mii(rq), cmd, &duplex_changed);
++ module_put(THIS_MODULE);
++ if (duplex_changed)
++ update_duplex_mode(dev);
++ return rc;
++}
++
++static struct net_device_stats *ixmac_stats (struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ return &mac->stat;
++}
++
++static void ixmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
++
++ strcpy(info->driver, IXMAC_NAME);
++ strcpy(info->version, IXMAC_VERSION);
++ if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
++ snprintf(info->fw_version, 32, "%d.%d func [%d]",
++ npe->img_info[2], npe->img_info[3], npe->img_info[1]);
++ }
++ strncpy(info->bus_info, npe->plat->name, ETHTOOL_BUSINFO_LEN);
++}
++
++static int ixmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ mii_ethtool_gset(&mac->mii, cmd);
++ return 0;
++}
++
++static int ixmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ int rc;
++ rc = mii_ethtool_sset(&mac->mii, cmd);
++ return rc;
++}
++
++static int ixmac_nway_reset(struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ return mii_nway_restart(&mac->mii);
++}
++
++static u32 ixmac_get_link(struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ return mii_link_ok(&mac->mii);
++}
++
++static const int mac_reg_list[] = MAC_REG_LIST;
++
++static int ixmac_get_regs_len(struct net_device *dev)
++{
++ return ARRAY_SIZE(mac_reg_list);
++}
++
++static void
++ixmac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
++{
++ int i;
++ struct mac_info *mac = netdev_priv(dev);
++ u8 *buf = regbuf;
++
++ for (i=0; i<regs->len; i++) {
++ buf[i] = mac_read_reg(mac, mac_reg_list[i]);
++ }
++}
++
++static struct {
++ const char str[ETH_GSTRING_LEN];
++} ethtool_stats_keys[NPE_STAT_NUM + NPE_Q_STAT_NUM] = {
++ NPE_Q_STAT_STRINGS
++ NPE_STAT_STRINGS
++};
++
++static void ixmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ memcpy(data, ethtool_stats_keys, mac->npe_stat_num * ETH_GSTRING_LEN);
++}
++
++static int ixmac_get_stats_count(struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ return mac->npe_stat_num;
++}
++
++static u32 ixmac_get_msglevel(struct net_device *dev)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ return mac->msg_enable;
++}
++
++static void ixmac_set_msglevel(struct net_device *dev, u32 datum)
++{
++ struct mac_info *mac = netdev_priv(dev);
++ mac->msg_enable = datum;
++}
++
++static void ixmac_get_ethtool_stats(struct net_device *dev,
++ struct ethtool_stats *stats, u64 *data)
++{
++ int i;
++ struct mac_info *mac = netdev_priv(dev);
++ u32 buf[NPE_STAT_NUM];
++
++ data[0] = queue_len(mac->rxq);
++ data[1] = queue_len(mac->rxdoneq);
++ data[2] = queue_len(mac->txq);
++ data[3] = queue_len(tx_doneq);
++
++ get_npe_stats(mac, buf, sizeof(buf), 0);
++
++ for (i=0; i<stats->n_stats-4; i++) {
++ data[i+4] = npe_to_cpu32(buf[i]);
++ }
++}
++
++static struct ethtool_ops ixmac_ethtool_ops = {
++ .get_drvinfo = ixmac_get_drvinfo,
++ .get_settings = ixmac_get_settings,
++ .set_settings = ixmac_set_settings,
++ .nway_reset = ixmac_nway_reset,
++ .get_link = ixmac_get_link,
++ .get_msglevel = ixmac_get_msglevel,
++ .set_msglevel = ixmac_set_msglevel,
++ .get_regs_len = ixmac_get_regs_len,
++ .get_regs = ixmac_get_regs,
++ .get_perm_addr = ethtool_op_get_perm_addr,
++ .get_strings = ixmac_get_strings,
++ .get_stats_count = ixmac_get_stats_count,
++ .get_ethtool_stats = ixmac_get_ethtool_stats,
++};
++
++static void mac_mdio_thread(void *_data)
++{
++ struct net_device *dev = _data;
++ struct mac_info *mac = netdev_priv(dev);
++
++ media_check(dev, 0);
++ schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
++}
++
++static int mac_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++ struct mac_info *mac;
++ struct net_device *dev;
++ struct npe_info *npe;
++ struct mac_plat_info *plat = pdev->dev.platform_data;
++ int size, ret;
++
++ if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) {
++ return -EIO;
++ }
++ if (!(dev = alloc_etherdev (sizeof(struct mac_info)))) {
++ return -ENOMEM;
++ }
++ SET_MODULE_OWNER(dev);
++ SET_NETDEV_DEV(dev, &pdev->dev);
++ mac = netdev_priv(dev);
++ mac->netdev = dev;
++
++ size = res->end - res->start +1;
++ mac->res = request_mem_region(res->start, size, IXMAC_NAME);
++ if (!mac->res) {
++ ret = -EBUSY;
++ goto out_free;
++ }
++
++ mac->addr = ioremap(res->start, size);
++ if (!mac->addr) {
++ ret = -ENOMEM;
++ goto out_rel;
++ }
++
++ dev->open = ixmac_open;
++ dev->hard_start_xmit = ixmac_start_xmit;
++ dev->poll = ix_poll;
++ dev->stop = ixmac_close;
++ dev->get_stats = ixmac_stats;
++ dev->do_ioctl = ixmac_ioctl;
++ dev->set_multicast_list = ixmac_set_rx_mode;
++ dev->ethtool_ops = &ixmac_ethtool_ops;
++
++ dev->weight = 16;
++ dev->tx_queue_len = 100;
++
++ mac->npe_dev = get_npe_by_id(plat->npe_id);
++ if (!mac->npe_dev) {
++ ret = -EIO;
++ goto out_unmap;