++obj-$(CONFIG_IXP4XX_CRYPTO) += ixp4xx_crypto.o
++
++ixp4xx_npe-objs := ucode_dl.o npe_mh.o npe.o
++ixp4xx_mac-objs := mac_driver.o phy.o
+Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_crypto.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_crypto.c 2007-02-21 02:24:35.000000000 -0800
+@@ -0,0 +1,851 @@
++/*
++ * ixp4xx_crypto.c - interface to the HW crypto
++ *
++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
++ *
++ * This file is released under the GPLv2
++ */
++
++#include <linux/ixp_qmgr.h>
++#include <linux/ixp_npe.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/ixp_crypto.h>
++
++#define SEND_QID 29
++#define RECV_QID 30
++
++#define NPE_ID 2 /* NPE C */
++
++#define QUEUE_SIZE 64
++#define MY_VERSION "0.0.1"
++
++/* local head for all sa_ctx */
++static struct ix_sa_master sa_master;
++
++static const struct ix_hash_algo _hash_algos[] = {
++{
++ .name = "MD5",
++ .cfgword = 0xAA010004,
++ .digest_len = 16,
++ .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
++ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
++ .type = HASH_TYPE_MD5,
++},{
++ .name = "SHA1",
++ .cfgword = 0x00000005,
++ .digest_len = 20,
++ .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
++ "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
++ .type = HASH_TYPE_SHA1,
++#if 0
++},{
++ .name = "CBC MAC",
++ .digest_len = 64,
++ .aad_len = 48,
++ .type = HASH_TYPE_CBCMAC,
++#endif
++} };
++
++static const struct ix_cipher_algo _cipher_algos[] = {
++{
++ .name = "DES ECB",
++ .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
++ .cfgword_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
++ .block_len = 8,
++ .type = CIPHER_TYPE_DES,
++ .mode = CIPHER_MODE_ECB,
++},{
++ .name = "DES CBC",
++ .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
++ .cfgword_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
++ .iv_len = 8,
++ .block_len = 8,
++ .type = CIPHER_TYPE_DES,
++ .mode = CIPHER_MODE_CBC,
++},{
++ .name = "3DES ECB",
++ .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
++ .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
++ .block_len = 8,
++ .type = CIPHER_TYPE_3DES,
++ .mode = CIPHER_MODE_ECB,
++},{
++ .name = "3DES CBC",
++ .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_CBC_ENC | KEYLEN_192,
++ .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_CBC_DEC | KEYLEN_192,
++ .iv_len = 8,
++ .block_len = 8,
++ .type = CIPHER_TYPE_3DES,
++ .mode = CIPHER_MODE_CBC,
++},{
++ .name = "AES ECB",
++ .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_ECB,
++ .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_ECB,
++ .block_len = 16,
++ .type = CIPHER_TYPE_AES,
++ .mode = CIPHER_MODE_ECB,
++},{
++ .name = "AES CBC",
++ .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CBC_ENC,
++ .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_CBC_DEC,
++ .block_len = 16,
++ .iv_len = 16,
++ .type = CIPHER_TYPE_AES,
++ .mode = CIPHER_MODE_CBC,
++},{
++ .name = "AES CTR",
++ .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CTR,
++ .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CTR,
++ .block_len = 16,
++ .iv_len = 16,
++ .type = CIPHER_TYPE_AES,
++ .mode = CIPHER_MODE_CTR,
++#if 0
++},{
++ .name = "AES CCM",
++ .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CCM_ENC,
++ .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CCM_DEC,
++ .block_len = 16,
++ .iv_len = 16,
++ .type = CIPHER_TYPE_AES,
++ .mode = CIPHER_MODE_CCM,
++#endif
++} };
++
++const struct ix_hash_algo *ix_hash_by_id(int type)
++{
++ int i;
++
++ for(i=0; i<ARRAY_SIZE(_hash_algos); i++) {
++ if (_hash_algos[i].type == type)
++ return _hash_algos + i;
++ }
++ return NULL;
++}
++
++const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode)
++{
++ int i;
++
++ for(i=0; i<ARRAY_SIZE(_cipher_algos); i++) {
++ if (_cipher_algos[i].type==type && _cipher_algos[i].mode==mode)
++ return _cipher_algos + i;
++ }
++ return NULL;
++}
++
++static void irqcb_recv(struct qm_queue *queue);
++
++static int init_sa_master(struct ix_sa_master *master)
++{
++ struct npe_info *npe;
++ int ret = -ENODEV;
++
++ if (! (ix_fuse() & (IX_FUSE_HASH | IX_FUSE_AES | IX_FUSE_DES))) {
++ printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
++ return ret;
++ }
++ memset(master, 0, sizeof(struct ix_sa_master));
++ master->npe_dev = get_npe_by_id(NPE_ID);
++ if (! master->npe_dev)
++ goto err;
++
++ npe = dev_get_drvdata(master->npe_dev);
++
++ if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
++ switch (npe->img_info[1]) {
++ case 4:
++ printk(KERN_INFO "Crypto AES avaialable\n");
++ break;
++ case 5:
++ printk(KERN_INFO "Crypto AES and CCM avaialable\n");
++ break;
++ default:
++ printk(KERN_WARNING "Current microcode for %s has no"
++ " crypto capabilities\n", npe->plat->name);
++ break;
++ }
++ }
++ rwlock_init(&master->lock);
++ master->dmapool = dma_pool_create("ixp4xx_crypto", master->npe_dev,
++ sizeof(struct npe_crypt_cont), 32, 0);
++ if (!master->dmapool) {
++ ret = -ENOMEM;
++ goto err;
++ }
++ master->sendq = request_queue(SEND_QID, QUEUE_SIZE);
++ if (IS_ERR(master->sendq)) {
++ printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
++ SEND_QID);
++ ret = PTR_ERR(master->sendq);
++ goto err;
++ }
++ master->recvq = request_queue(RECV_QID, QUEUE_SIZE);
++ if (IS_ERR(master->recvq)) {
++ printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
++ RECV_QID);
++ ret = PTR_ERR(master->recvq);
++ release_queue(master->sendq);
++ goto err;
++ }
++
++ master->recvq->irq_cb = irqcb_recv;
++ queue_set_watermarks(master->recvq, 0, 0);
++ queue_set_irq_src(master->recvq, Q_IRQ_ID_NOT_E);
++ queue_enable_irq(master->recvq);
++ printk(KERN_INFO "ixp4xx_crypto " MY_VERSION " registered successfully\n");
++
++ return 0;
++err:
++ if (master->dmapool)
++ dma_pool_destroy(master->dmapool);
++ if (! master->npe_dev)
++ put_device(master->npe_dev);
++ return ret;
++
++}
++
++static void release_sa_master(struct ix_sa_master *master)
++{
++ struct npe_crypt_cont *cont;
++ unsigned long flags;
++
++ write_lock_irqsave(&master->lock, flags);
++ while (master->pool) {
++ cont = master->pool;
++ master->pool = cont->next;
++ dma_pool_free(master->dmapool, cont, cont->phys);
++ master->pool_size--;
++ }
++ write_unlock_irqrestore(&master->lock, flags);
++ if (master->pool_size) {
++ printk(KERN_ERR "ixp4xx_crypto: %d items lost from DMA pool\n",
++ master->pool_size);
++ }
++
++ dma_pool_destroy(master->dmapool);
++ release_queue(master->sendq);
++ release_queue(master->recvq);
++ return_npe_dev(master->npe_dev);
++}
++
++static struct npe_crypt_cont *ix_sa_get_cont(struct ix_sa_master *master)
++{
++ unsigned long flags;
++ struct npe_crypt_cont *cont;
++ dma_addr_t handle;
++
++ write_lock_irqsave(&master->lock, flags);
++ if (!master->pool) {
++ cont = dma_pool_alloc(master->dmapool, GFP_ATOMIC, &handle);
++ if (cont) {
++ master->pool_size++;
++ cont->phys = handle;
++ cont->virt = cont;
++ }
++ } else {
++ cont = master->pool;
++ master->pool = cont->next;
++ }
++ write_unlock_irqrestore(&master->lock, flags);
++ return cont;
++}
++
++static void
++ix_sa_return_cont(struct ix_sa_master *master,struct npe_crypt_cont *cont)
++{
++ unsigned long flags;
++
++ write_lock_irqsave(&master->lock, flags);
++ cont->next = master->pool;
++ master->pool = cont;
++ write_unlock_irqrestore(&master->lock, flags);
++}
++
++static void free_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
++{
++ memset(dir->npe_ctx, 0, NPE_CTX_LEN);
++ dma_pool_free(sa_ctx->master->dmapool, dir->npe_ctx,
++ dir->npe_ctx_phys);
++}
++
++static void ix_sa_ctx_destroy(struct ix_sa_ctx *sa_ctx)
++{
++ BUG_ON(sa_ctx->state != STATE_UNLOADING);
++ free_sa_dir(sa_ctx, &sa_ctx->encrypt);
++ free_sa_dir(sa_ctx, &sa_ctx->decrypt);
++ kfree(sa_ctx);
++ module_put(THIS_MODULE);
++}
++
++static void recv_pack(struct qm_queue *queue, u32 phys)
++{
++ struct ix_sa_ctx *sa_ctx;
++ struct npe_crypt_cont *cr_cont;
++ struct npe_cont *cont;
++ int failed;
++
++ failed = phys & 0x1;
++ phys &= ~0x3;
++
++ cr_cont = dma_to_virt(queue->dev, phys);
++ cr_cont = cr_cont->virt;
++ sa_ctx = cr_cont->ctl.crypt.sa_ctx;
++
++ phys = npe_to_cpu32(cr_cont->ctl.crypt.src_buf);
++ if (phys) {
++ cont = dma_to_virt(queue->dev, phys);
++ cont = cont->virt;
++ } else {
++ cont = NULL;
++ }
++ if (cr_cont->ctl.crypt.oper_type == OP_PERFORM) {
++ dma_unmap_single(sa_ctx->master->npe_dev,
++ cont->eth.phys_addr,
++ cont->eth.buf_len,
++ DMA_BIDIRECTIONAL);
++ if (sa_ctx->perf_cb)
++ sa_ctx->perf_cb(sa_ctx, cont->data, failed);
++ qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
++ ix_sa_return_cont(sa_ctx->master, cr_cont);
++ if (atomic_dec_and_test(&sa_ctx->use_cnt))
++ ix_sa_ctx_destroy(sa_ctx);
++ return;
++ }
++
++ /* We are registering */
++ switch (cr_cont->ctl.crypt.mode) {
++ case NPE_OP_HASH_GEN_ICV:
++ /* 1 out of 2 HMAC preparation operations completed */
++ dma_unmap_single(sa_ctx->master->npe_dev,
++ cont->eth.phys_addr,
++ cont->eth.buf_len,
++ DMA_TO_DEVICE);
++ kfree(cont->data);
++ qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
++ break;
++ case NPE_OP_ENC_GEN_KEY:
++ memcpy(sa_ctx->decrypt.npe_ctx + sizeof(u32),
++ sa_ctx->rev_aes->ctl.rev_aes_key + sizeof(u32),
++ sa_ctx->c_key.len);
++ /* REV AES data not needed anymore, free it */
++ ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
++ sa_ctx->rev_aes = NULL;
++ break;
++ default:
++ printk(KERN_ERR "Unknown crypt-register mode: %x\n",
++ cr_cont->ctl.crypt.mode);
++
++ }
++ if (cr_cont->ctl.crypt.oper_type == OP_REG_DONE) {
++ if (sa_ctx->state == STATE_UNREGISTERED)
++ sa_ctx->state = STATE_REGISTERED;
++ if (sa_ctx->reg_cb)
++ sa_ctx->reg_cb(sa_ctx, failed);
++ }
++ ix_sa_return_cont(sa_ctx->master, cr_cont);
++ if (atomic_dec_and_test(&sa_ctx->use_cnt))
++ ix_sa_ctx_destroy(sa_ctx);
++}
++
++static void irqcb_recv(struct qm_queue *queue)
++{
++ u32 phys;
++
++ queue_ack_irq(queue);
++ while ((phys = queue_get_entry(queue)))
++ recv_pack(queue, phys);
++}
++
++static int init_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
++{
++ dir->npe_ctx = dma_pool_alloc(sa_ctx->master->dmapool,
++ sa_ctx->gfp_flags, &dir->npe_ctx_phys);
++ if (!dir->npe_ctx) {
++ return 1;
++ }
++ memset(dir->npe_ctx, 0, NPE_CTX_LEN);
++ return 0;
++}
++
++struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags)
++{
++ struct ix_sa_ctx *sa_ctx;
++ struct ix_sa_master *master = &sa_master;
++ struct npe_info *npe = dev_get_drvdata(master->npe_dev);
++
++ /* first check if Microcode was downloaded into this NPE */
++ if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
++ printk(KERN_ERR "%s not running\n", npe->plat->name);
++ return NULL;
++ }
++ switch (npe->img_info[1]) {
++ case 4:
++ case 5:
++ break;
++ default:
++ /* No crypto Microcode */
++ return NULL;
++ }
++ if (!try_module_get(THIS_MODULE)) {
++ return NULL;
++ }
++
++ sa_ctx = kzalloc(sizeof(struct ix_sa_ctx) + priv_len, flags);
++ if (!sa_ctx) {
++ goto err_put;
++ }
++
++ sa_ctx->master = master;
++ sa_ctx->gfp_flags = flags;
++
++ if (init_sa_dir(sa_ctx, &sa_ctx->encrypt))
++ goto err_free;
++ if (init_sa_dir(sa_ctx, &sa_ctx->decrypt)) {
++ free_sa_dir(sa_ctx, &sa_ctx->encrypt);
++ goto err_free;
++ }
++ if (priv_len)
++ sa_ctx->priv = sa_ctx + 1;
++
++ atomic_set(&sa_ctx->use_cnt, 1);
++ return sa_ctx;
++
++err_free:
++ kfree(sa_ctx);
++err_put:
++ module_put(THIS_MODULE);
++ return NULL;
++}
++
++void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx)
++{
++ sa_ctx->state = STATE_UNLOADING;
++ if (atomic_dec_and_test(&sa_ctx->use_cnt))
++ ix_sa_ctx_destroy(sa_ctx);
++ else
++ printk("ix_sa_ctx_free -> delayed: %p %d\n",
++ sa_ctx, atomic_read(&sa_ctx->use_cnt));
++}
++
++/* http://www.ietf.org/rfc/rfc2104.txt */
++#define HMAC_IPAD_VALUE 0x36
++#define HMAC_OPAD_VALUE 0x5C
++#define PAD_BLOCKLEN 64
++
++static int register_chain_var(struct ix_sa_ctx *sa_ctx,
++ unsigned char *pad, u32 target, int init_len, u32 ctx_addr, int oper)
++{
++ struct npe_crypt_cont *cr_cont;
++ struct npe_cont *cont;
++
++ cr_cont = ix_sa_get_cont(sa_ctx->master);
++ if (!cr_cont)
++ return -ENOMEM;