2 * An OCF module that uses Intels IXP CryptACC API to do the crypto.
3 * This driver requires the IXP400 Access Library that is available
4 * from Intel in order to operate (or compile).
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2006-2011 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
36 #include <linux/version.h>
37 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
38 #include <linux/config.h>
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/list.h>
43 #include <linux/slab.h>
44 #include <linux/sched.h>
45 #include <linux/wait.h>
46 #include <linux/crypto.h>
47 #include <linux/interrupt.h>
48 #include <asm/scatterlist.h>
51 #include <IxOsBuffMgt.h>
53 #include <IxCryptoAcc.h>
55 #include <IxOsServices.h>
56 #include <IxOsCacheMMU.h>
58 #include <cryptodev.h>
62 #define IX_MBUF_PRIV(x) ((x)->priv)
68 struct list_head ixp_q_list
;
69 struct ixp_data
*ixp_q_data
;
70 struct cryptop
*ixp_q_crp
;
71 struct cryptodesc
*ixp_q_ccrd
;
72 struct cryptodesc
*ixp_q_acrd
;
74 UINT8
*ixp_hash_dest
; /* Location for hash in client buffer */
75 UINT8
*ixp_hash_src
; /* Location of hash in internal buffer */
76 unsigned char ixp_q_iv_data
[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH
];
77 unsigned char *ixp_q_iv
;
81 int ixp_registered
; /* is the context registered */
82 int ixp_crd_flags
; /* detect direction changes */
88 UINT32 ixp_hash_key_id
; /* used when hashing */
89 IxCryptoAccCtx ixp_ctx
;
93 struct work_struct ixp_pending_work
;
94 struct work_struct ixp_registration_work
;
95 struct list_head ixp_q
; /* unprocessed requests */
100 #define MAX_IOP_SIZE 64 /* words */
101 #define MAX_OOP_SIZE 128
106 struct list_head pkq_list
;
107 struct cryptkop
*pkq_krp
;
109 IxCryptoAccPkeEauInOperands pkq_op
;
110 IxCryptoAccPkeEauOpResult pkq_result
;
112 UINT32 pkq_ibuf0
[MAX_IOP_SIZE
];
113 UINT32 pkq_ibuf1
[MAX_IOP_SIZE
];
114 UINT32 pkq_ibuf2
[MAX_IOP_SIZE
];
115 UINT32 pkq_obuf
[MAX_OOP_SIZE
];
118 static LIST_HEAD(ixp_pkq
); /* current PK wait list */
119 static struct ixp_pkq
*ixp_pk_cur
;
120 static spinlock_t ixp_pkq_lock
;
122 #endif /* __ixp46X */
124 static int ixp_blocked
= 0;
126 static int32_t ixp_id
= -1;
127 static struct ixp_data
**ixp_sessions
= NULL
;
128 static u_int32_t ixp_sesnum
= 0;
130 static int ixp_process(device_t
, struct cryptop
*, int);
131 static int ixp_newsession(device_t
, u_int32_t
*, struct cryptoini
*);
132 static int ixp_freesession(device_t
, u_int64_t
);
134 static int ixp_kprocess(device_t
, struct cryptkop
*krp
, int hint
);
137 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
138 static kmem_cache_t
*qcache
;
140 static struct kmem_cache
*qcache
;
143 #define debug ixp_debug
144 static int ixp_debug
= 0;
145 module_param(ixp_debug
, int, 0644);
146 MODULE_PARM_DESC(ixp_debug
, "Enable debug");
148 static int ixp_init_crypto
= 1;
149 module_param(ixp_init_crypto
, int, 0444); /* RO after load/boot */
150 MODULE_PARM_DESC(ixp_init_crypto
, "Call ixCryptoAccInit (default is 1)");
152 static void ixp_process_pending(void *arg
);
153 static void ixp_registration(void *arg
);
154 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
155 static void ixp_process_pending_wq(struct work_struct
*work
);
156 static void ixp_registration_wq(struct work_struct
*work
);
160 * dummy device structure
164 softc_device_decl sc_dev
;
167 static device_method_t ixp_methods
= {
168 /* crypto device methods */
169 DEVMETHOD(cryptodev_newsession
, ixp_newsession
),
170 DEVMETHOD(cryptodev_freesession
,ixp_freesession
),
171 DEVMETHOD(cryptodev_process
, ixp_process
),
173 DEVMETHOD(cryptodev_kprocess
, ixp_kprocess
),
178 * Generate a new software session.
181 ixp_newsession(device_t dev
, u_int32_t
*sid
, struct cryptoini
*cri
)
183 struct ixp_data
*ixp
;
185 #define AUTH_LEN(cri, def) \
186 (cri->cri_mlen ? cri->cri_mlen : (def))
188 dprintk("%s():alg %d\n", __FUNCTION__
,cri
->cri_alg
);
189 if (sid
== NULL
|| cri
== NULL
) {
190 dprintk("%s,%d - EINVAL\n", __FILE__
, __LINE__
);
195 for (i
= 1; i
< ixp_sesnum
; i
++)
196 if (ixp_sessions
[i
] == NULL
)
199 i
= 1; /* NB: to silence compiler warning */
201 if (ixp_sessions
== NULL
|| i
== ixp_sesnum
) {
202 struct ixp_data
**ixpd
;
204 if (ixp_sessions
== NULL
) {
205 i
= 1; /* We leave ixp_sessions[0] empty */
206 ixp_sesnum
= CRYPTO_SW_SESSIONS
;
210 ixpd
= kmalloc(ixp_sesnum
* sizeof(struct ixp_data
*), SLAB_ATOMIC
);
212 /* Reset session number */
213 if (ixp_sesnum
== CRYPTO_SW_SESSIONS
)
217 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
220 memset(ixpd
, 0, ixp_sesnum
* sizeof(struct ixp_data
*));
222 /* Copy existing sessions */
224 memcpy(ixpd
, ixp_sessions
,
225 (ixp_sesnum
/ 2) * sizeof(struct ixp_data
*));
232 ixp_sessions
[i
] = (struct ixp_data
*) kmalloc(sizeof(struct ixp_data
),
234 if (ixp_sessions
[i
] == NULL
) {
235 ixp_freesession(NULL
, i
);
236 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
242 ixp
= ixp_sessions
[i
];
243 memset(ixp
, 0, sizeof(*ixp
));
245 ixp
->ixp_cipher_alg
= -1;
246 ixp
->ixp_auth_alg
= -1;
247 ixp
->ixp_ctx_id
= -1;
248 INIT_LIST_HEAD(&ixp
->ixp_q
);
250 ixp
->ixp_ctx
.useDifferentSrcAndDestMbufs
= 0;
253 switch (cri
->cri_alg
) {
255 ixp
->ixp_cipher_alg
= cri
->cri_alg
;
256 ixp
->ixp_ctx
.cipherCtx
.cipherAlgo
= IX_CRYPTO_ACC_CIPHER_DES
;
257 ixp
->ixp_ctx
.cipherCtx
.cipherMode
= IX_CRYPTO_ACC_MODE_CBC
;
258 ixp
->ixp_ctx
.cipherCtx
.cipherKeyLen
= (cri
->cri_klen
+ 7) / 8;
259 ixp
->ixp_ctx
.cipherCtx
.cipherBlockLen
= IX_CRYPTO_ACC_DES_BLOCK_64
;
260 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
=
261 IX_CRYPTO_ACC_DES_IV_64
;
262 memcpy(ixp
->ixp_ctx
.cipherCtx
.key
.cipherKey
,
263 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
266 case CRYPTO_3DES_CBC
:
267 ixp
->ixp_cipher_alg
= cri
->cri_alg
;
268 ixp
->ixp_ctx
.cipherCtx
.cipherAlgo
= IX_CRYPTO_ACC_CIPHER_3DES
;
269 ixp
->ixp_ctx
.cipherCtx
.cipherMode
= IX_CRYPTO_ACC_MODE_CBC
;
270 ixp
->ixp_ctx
.cipherCtx
.cipherKeyLen
= (cri
->cri_klen
+ 7) / 8;
271 ixp
->ixp_ctx
.cipherCtx
.cipherBlockLen
= IX_CRYPTO_ACC_DES_BLOCK_64
;
272 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
=
273 IX_CRYPTO_ACC_DES_IV_64
;
274 memcpy(ixp
->ixp_ctx
.cipherCtx
.key
.cipherKey
,
275 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
278 case CRYPTO_RIJNDAEL128_CBC
:
279 ixp
->ixp_cipher_alg
= cri
->cri_alg
;
280 ixp
->ixp_ctx
.cipherCtx
.cipherAlgo
= IX_CRYPTO_ACC_CIPHER_AES
;
281 ixp
->ixp_ctx
.cipherCtx
.cipherMode
= IX_CRYPTO_ACC_MODE_CBC
;
282 ixp
->ixp_ctx
.cipherCtx
.cipherKeyLen
= (cri
->cri_klen
+ 7) / 8;
283 ixp
->ixp_ctx
.cipherCtx
.cipherBlockLen
= 16;
284 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
= 16;
285 memcpy(ixp
->ixp_ctx
.cipherCtx
.key
.cipherKey
,
286 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
290 case CRYPTO_MD5_HMAC
:
291 ixp
->ixp_auth_alg
= cri
->cri_alg
;
292 ixp
->ixp_ctx
.authCtx
.authAlgo
= IX_CRYPTO_ACC_AUTH_MD5
;
293 ixp
->ixp_ctx
.authCtx
.authDigestLen
= AUTH_LEN(cri
, MD5_HASH_LEN
);
294 ixp
->ixp_ctx
.authCtx
.aadLen
= 0;
295 /* Only MD5_HMAC needs a key */
296 if (cri
->cri_alg
== CRYPTO_MD5_HMAC
) {
297 ixp
->ixp_ctx
.authCtx
.authKeyLen
= (cri
->cri_klen
+ 7) / 8;
298 if (ixp
->ixp_ctx
.authCtx
.authKeyLen
>
299 sizeof(ixp
->ixp_ctx
.authCtx
.key
.authKey
)) {
301 "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
303 ixp_freesession(NULL
, i
);
306 memcpy(ixp
->ixp_ctx
.authCtx
.key
.authKey
,
307 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
312 case CRYPTO_SHA1_HMAC
:
313 ixp
->ixp_auth_alg
= cri
->cri_alg
;
314 ixp
->ixp_ctx
.authCtx
.authAlgo
= IX_CRYPTO_ACC_AUTH_SHA1
;
315 ixp
->ixp_ctx
.authCtx
.authDigestLen
= AUTH_LEN(cri
, SHA1_HASH_LEN
);
316 ixp
->ixp_ctx
.authCtx
.aadLen
= 0;
317 /* Only SHA1_HMAC needs a key */
318 if (cri
->cri_alg
== CRYPTO_SHA1_HMAC
) {
319 ixp
->ixp_ctx
.authCtx
.authKeyLen
= (cri
->cri_klen
+ 7) / 8;
320 if (ixp
->ixp_ctx
.authCtx
.authKeyLen
>
321 sizeof(ixp
->ixp_ctx
.authCtx
.key
.authKey
)) {
323 "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
325 ixp_freesession(NULL
, i
);
328 memcpy(ixp
->ixp_ctx
.authCtx
.key
.authKey
,
329 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
334 printk("ixp: unknown algo 0x%x\n", cri
->cri_alg
);
335 ixp_freesession(NULL
, i
);
341 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
342 INIT_WORK(&ixp
->ixp_pending_work
, ixp_process_pending_wq
);
343 INIT_WORK(&ixp
->ixp_registration_work
, ixp_registration_wq
);
345 INIT_WORK(&ixp
->ixp_pending_work
, ixp_process_pending
, ixp
);
346 INIT_WORK(&ixp
->ixp_registration_work
, ixp_registration
, ixp
);
357 ixp_freesession(device_t dev
, u_int64_t tid
)
359 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
361 dprintk("%s()\n", __FUNCTION__
);
362 if (sid
> ixp_sesnum
|| ixp_sessions
== NULL
||
363 ixp_sessions
[sid
] == NULL
) {
364 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
368 /* Silently accept and return */
372 if (ixp_sessions
[sid
]) {
373 if (ixp_sessions
[sid
]->ixp_ctx_id
!= -1) {
374 ixCryptoAccCtxUnregister(ixp_sessions
[sid
]->ixp_ctx_id
);
375 ixp_sessions
[sid
]->ixp_ctx_id
= -1;
377 kfree(ixp_sessions
[sid
]);
379 ixp_sessions
[sid
] = NULL
;
382 crypto_unblock(ixp_id
, CRYPTO_SYMQ
);
389 * callback for when hash processing is complete
396 IxCryptoAccStatus status
)
400 dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__
, hash_key_id
, bufp
, status
);
403 printk("ixp: NULL buf in %s\n", __FUNCTION__
);
407 q
= IX_MBUF_PRIV(bufp
);
409 printk("ixp: NULL priv in %s\n", __FUNCTION__
);
413 if (status
== IX_CRYPTO_ACC_STATUS_SUCCESS
) {
414 /* On success, need to copy hash back into original client buffer */
415 memcpy(q
->ixp_hash_dest
, q
->ixp_hash_src
,
416 (q
->ixp_q_data
->ixp_auth_alg
== CRYPTO_SHA1
) ?
417 SHA1_HASH_LEN
: MD5_HASH_LEN
);
420 printk("ixp: hash perform failed status=%d\n", status
);
421 q
->ixp_q_crp
->crp_etype
= EINVAL
;
424 /* Free internal buffer used for hashing */
425 kfree(IX_MBUF_MDATA(&q
->ixp_q_mbuf
));
427 crypto_done(q
->ixp_q_crp
);
428 kmem_cache_free(qcache
, q
);
432 * setup a request and perform it
435 ixp_q_process(struct ixp_q
*q
)
437 IxCryptoAccStatus status
;
438 struct ixp_data
*ixp
= q
->ixp_q_data
;
446 dprintk("%s(%p)\n", __FUNCTION__
, q
);
449 if (q
->ixp_q_ccrd
->crd_flags
& CRD_F_ENCRYPT
) {
450 if (q
->ixp_q_ccrd
->crd_flags
& CRD_F_IV_EXPLICIT
) {
451 q
->ixp_q_iv
= q
->ixp_q_ccrd
->crd_iv
;
453 q
->ixp_q_iv
= q
->ixp_q_iv_data
;
454 read_random(q
->ixp_q_iv
, ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
);
456 if ((q
->ixp_q_ccrd
->crd_flags
& CRD_F_IV_PRESENT
) == 0)
457 crypto_copyback(q
->ixp_q_crp
->crp_flags
, q
->ixp_q_crp
->crp_buf
,
458 q
->ixp_q_ccrd
->crd_inject
,
459 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
,
460 (caddr_t
) q
->ixp_q_iv
);
462 if (q
->ixp_q_ccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
463 q
->ixp_q_iv
= q
->ixp_q_ccrd
->crd_iv
;
465 q
->ixp_q_iv
= q
->ixp_q_iv_data
;
466 crypto_copydata(q
->ixp_q_crp
->crp_flags
, q
->ixp_q_crp
->crp_buf
,
467 q
->ixp_q_ccrd
->crd_inject
,
468 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
,
469 (caddr_t
) q
->ixp_q_iv
);
474 auth_off
= q
->ixp_q_acrd
->crd_skip
;
475 auth_len
= q
->ixp_q_acrd
->crd_len
;
476 icv_off
= q
->ixp_q_acrd
->crd_inject
;
479 crypt_off
= q
->ixp_q_ccrd
->crd_skip
;
480 crypt_len
= q
->ixp_q_ccrd
->crd_len
;
481 } else { /* if (q->ixp_q_acrd) */
482 auth_off
= q
->ixp_q_acrd
->crd_skip
;
483 auth_len
= q
->ixp_q_acrd
->crd_len
;
484 icv_off
= q
->ixp_q_acrd
->crd_inject
;
487 if (q
->ixp_q_crp
->crp_flags
& CRYPTO_F_SKBUF
) {
488 struct sk_buff
*skb
= (struct sk_buff
*) q
->ixp_q_crp
->crp_buf
;
489 if (skb_shinfo(skb
)->nr_frags
) {
491 * DAVIDM fix this limitation one day by using
492 * a buffer pool and chaining, it is not currently
493 * needed for current user/kernel space acceleration
495 printk("ixp: Cannot handle fragmented skb's yet !\n");
496 q
->ixp_q_crp
->crp_etype
= ENOENT
;
499 IX_MBUF_MLEN(&q
->ixp_q_mbuf
) =
500 IX_MBUF_PKT_LEN(&q
->ixp_q_mbuf
) = skb
->len
;
501 IX_MBUF_MDATA(&q
->ixp_q_mbuf
) = skb
->data
;
502 } else if (q
->ixp_q_crp
->crp_flags
& CRYPTO_F_IOV
) {
503 struct uio
*uiop
= (struct uio
*) q
->ixp_q_crp
->crp_buf
;
504 if (uiop
->uio_iovcnt
!= 1) {
506 * DAVIDM fix this limitation one day by using
507 * a buffer pool and chaining, it is not currently
508 * needed for current user/kernel space acceleration
510 printk("ixp: Cannot handle more than 1 iovec yet !\n");
511 q
->ixp_q_crp
->crp_etype
= ENOENT
;
514 IX_MBUF_MLEN(&q
->ixp_q_mbuf
) =
515 IX_MBUF_PKT_LEN(&q
->ixp_q_mbuf
) = uiop
->uio_iov
[0].iov_len
;
516 IX_MBUF_MDATA(&q
->ixp_q_mbuf
) = uiop
->uio_iov
[0].iov_base
;
517 } else /* contig buffer */ {
518 IX_MBUF_MLEN(&q
->ixp_q_mbuf
) =
519 IX_MBUF_PKT_LEN(&q
->ixp_q_mbuf
) = q
->ixp_q_crp
->crp_ilen
;
520 IX_MBUF_MDATA(&q
->ixp_q_mbuf
) = q
->ixp_q_crp
->crp_buf
;
523 IX_MBUF_PRIV(&q
->ixp_q_mbuf
) = q
;
525 if (ixp
->ixp_auth_alg
== CRYPTO_SHA1
|| ixp
->ixp_auth_alg
== CRYPTO_MD5
) {
527 * For SHA1 and MD5 hash, need to create an internal buffer that is big
528 * enough to hold the original data + the appropriate padding for the
533 IX_MBUF_MLEN(&q
->ixp_q_mbuf
) = IX_MBUF_PKT_LEN(&q
->ixp_q_mbuf
) =
534 ((IX_MBUF_MLEN(&q
->ixp_q_mbuf
) * 8) + 72 + 511) / 8;
535 tbuf
= kmalloc(IX_MBUF_MLEN(&q
->ixp_q_mbuf
), SLAB_ATOMIC
);
537 if (IX_MBUF_MDATA(&q
->ixp_q_mbuf
) == NULL
) {
538 printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
539 IX_MBUF_MLEN(&q
->ixp_q_mbuf
));
540 q
->ixp_q_crp
->crp_etype
= ENOMEM
;
543 memcpy(tbuf
, &(IX_MBUF_MDATA(&q
->ixp_q_mbuf
))[auth_off
], auth_len
);
545 /* Set location in client buffer to copy hash into */
547 &(IX_MBUF_MDATA(&q
->ixp_q_mbuf
))[auth_off
+ auth_len
];
549 IX_MBUF_MDATA(&q
->ixp_q_mbuf
) = tbuf
;
551 /* Set location in internal buffer for where hash starts */
552 q
->ixp_hash_src
= &(IX_MBUF_MDATA(&q
->ixp_q_mbuf
))[auth_len
];
554 crypt_func
= "ixCryptoAccHashPerform";
555 status
= ixCryptoAccHashPerform(ixp
->ixp_ctx
.authCtx
.authAlgo
,
556 &q
->ixp_q_mbuf
, ixp_hash_perform_cb
, 0, auth_len
, auth_len
,
557 &ixp
->ixp_hash_key_id
);
560 crypt_func
= "ixCryptoAccAuthCryptPerform";
561 status
= ixCryptoAccAuthCryptPerform(ixp
->ixp_ctx_id
, &q
->ixp_q_mbuf
,
562 NULL
, auth_off
, auth_len
, crypt_off
, crypt_len
, icv_off
,
566 if (IX_CRYPTO_ACC_STATUS_SUCCESS
== status
)
569 if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL
== status
) {
570 q
->ixp_q_crp
->crp_etype
= ENOMEM
;
574 printk("ixp: %s failed %u\n", crypt_func
, status
);
575 q
->ixp_q_crp
->crp_etype
= EINVAL
;
578 crypto_done(q
->ixp_q_crp
);
579 kmem_cache_free(qcache
, q
);
584 * because we cannot process the Q from the Register callback
585 * we do it here on a task Q.
589 ixp_process_pending(void *arg
)
591 struct ixp_data
*ixp
= arg
;
592 struct ixp_q
*q
= NULL
;
594 dprintk("%s(%p)\n", __FUNCTION__
, arg
);
599 while (!list_empty(&ixp
->ixp_q
)) {
600 q
= list_entry(ixp
->ixp_q
.next
, struct ixp_q
, ixp_q_list
);
601 list_del(&q
->ixp_q_list
);
606 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
608 ixp_process_pending_wq(struct work_struct
*work
)
610 struct ixp_data
*ixp
= container_of(work
, struct ixp_data
, ixp_pending_work
);
611 ixp_process_pending(ixp
);
616 * callback for when context registration is complete
620 ixp_register_cb(UINT32 ctx_id
, IX_MBUF
*bufp
, IxCryptoAccStatus status
)
623 struct ixp_data
*ixp
;
626 dprintk("%s(%d, %p, %d)\n", __FUNCTION__
, ctx_id
, bufp
, status
);
629 * free any buffer passed in to this routine
632 IX_MBUF_MLEN(bufp
) = IX_MBUF_PKT_LEN(bufp
) = 0;
633 kfree(IX_MBUF_MDATA(bufp
));
634 IX_MBUF_MDATA(bufp
) = NULL
;
637 for (i
= 0; i
< ixp_sesnum
; i
++) {
638 ixp
= ixp_sessions
[i
];
639 if (ixp
&& ixp
->ixp_ctx_id
== ctx_id
)
642 if (i
>= ixp_sesnum
) {
643 printk("ixp: invalid context id %d\n", ctx_id
);
647 if (IX_CRYPTO_ACC_STATUS_WAIT
== status
) {
648 /* this is normal to free the first of two buffers */
649 dprintk("ixp: register not finished yet.\n");
653 if (IX_CRYPTO_ACC_STATUS_SUCCESS
!= status
) {
654 printk("ixp: register failed 0x%x\n", status
);
655 while (!list_empty(&ixp
->ixp_q
)) {
656 q
= list_entry(ixp
->ixp_q
.next
, struct ixp_q
, ixp_q_list
);
657 list_del(&q
->ixp_q_list
);
658 q
->ixp_q_crp
->crp_etype
= EINVAL
;
659 crypto_done(q
->ixp_q_crp
);
660 kmem_cache_free(qcache
, q
);
666 * we are now registered, we cannot start processing the Q here
667 * or we get strange errors with AES (DES/3DES seem to be ok).
669 ixp
->ixp_registered
= 1;
670 schedule_work(&ixp
->ixp_pending_work
);
675 * callback for when data processing is complete
683 IxCryptoAccStatus status
)
687 dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__
, ctx_id
, sbufp
,
691 printk("ixp: NULL sbuf in ixp_perform_cb\n");
695 q
= IX_MBUF_PRIV(sbufp
);
697 printk("ixp: NULL priv in ixp_perform_cb\n");
701 if (status
!= IX_CRYPTO_ACC_STATUS_SUCCESS
) {
702 printk("ixp: perform failed status=%d\n", status
);
703 q
->ixp_q_crp
->crp_etype
= EINVAL
;
706 crypto_done(q
->ixp_q_crp
);
707 kmem_cache_free(qcache
, q
);
712 * registration is not callable at IRQ time, so we defer
713 * to a task queue, this routines completes the registration for us
714 * when the task queue runs
716 * Unfortunately this means we cannot tell OCF that the driver is blocked,
717 * we do that on the next request.
721 ixp_registration(void *arg
)
723 struct ixp_data
*ixp
= arg
;
724 struct ixp_q
*q
= NULL
;
725 IX_MBUF
*pri
= NULL
, *sec
= NULL
;
726 int status
= IX_CRYPTO_ACC_STATUS_SUCCESS
;
729 printk("ixp: ixp_registration with no arg\n");
733 if (ixp
->ixp_ctx_id
!= -1) {
734 ixCryptoAccCtxUnregister(ixp
->ixp_ctx_id
);
735 ixp
->ixp_ctx_id
= -1;
738 if (list_empty(&ixp
->ixp_q
)) {
739 printk("ixp: ixp_registration with no Q\n");
744 * setup the primary and secondary buffers
746 q
= list_entry(ixp
->ixp_q
.next
, struct ixp_q
, ixp_q_list
);
748 pri
= &ixp
->ixp_pri_mbuf
;
749 sec
= &ixp
->ixp_sec_mbuf
;
750 IX_MBUF_MLEN(pri
) = IX_MBUF_PKT_LEN(pri
) = 128;
751 IX_MBUF_MDATA(pri
) = (unsigned char *) kmalloc(128, SLAB_ATOMIC
);
752 IX_MBUF_MLEN(sec
) = IX_MBUF_PKT_LEN(sec
) = 128;
753 IX_MBUF_MDATA(sec
) = (unsigned char *) kmalloc(128, SLAB_ATOMIC
);
756 /* Only need to register if a crypt op or HMAC op */
757 if (!(ixp
->ixp_auth_alg
== CRYPTO_SHA1
||
758 ixp
->ixp_auth_alg
== CRYPTO_MD5
)) {
759 status
= ixCryptoAccCtxRegister(
767 /* Otherwise we start processing pending q */
768 schedule_work(&ixp
->ixp_pending_work
);
771 if (IX_CRYPTO_ACC_STATUS_SUCCESS
== status
)
774 if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS
== status
) {
775 printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
777 /* perhaps we should return EGAIN on queued ops ? */
781 printk("ixp: ixCryptoAccCtxRegister failed %d\n", status
);
782 ixp
->ixp_ctx_id
= -1;
785 * everything waiting is toasted
787 while (!list_empty(&ixp
->ixp_q
)) {
788 q
= list_entry(ixp
->ixp_q
.next
, struct ixp_q
, ixp_q_list
);
789 list_del(&q
->ixp_q_list
);
790 q
->ixp_q_crp
->crp_etype
= ENOENT
;
791 crypto_done(q
->ixp_q_crp
);
792 kmem_cache_free(qcache
, q
);
796 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
798 ixp_registration_wq(struct work_struct
*work
)
800 struct ixp_data
*ixp
= container_of(work
, struct ixp_data
,
801 ixp_registration_work
);
802 ixp_registration(ixp
);
810 ixp_process(device_t dev
, struct cryptop
*crp
, int hint
)
812 struct ixp_data
*ixp
;
814 struct ixp_q
*q
= NULL
;
817 dprintk("%s()\n", __FUNCTION__
);
821 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
830 if (crp
->crp_desc
== NULL
|| crp
->crp_buf
== NULL
) {
831 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
832 crp
->crp_etype
= EINVAL
;
837 * find the session we are using
840 lid
= crp
->crp_sid
& 0xffffffff;
841 if (lid
>= ixp_sesnum
|| lid
== 0 || ixp_sessions
== NULL
||
842 ixp_sessions
[lid
] == NULL
) {
843 crp
->crp_etype
= ENOENT
;
844 dprintk("%s,%d: ENOENT\n", __FILE__
, __LINE__
);
847 ixp
= ixp_sessions
[lid
];
850 * setup a new request ready for queuing
852 q
= kmem_cache_alloc(qcache
, SLAB_ATOMIC
);
854 dprintk("%s,%d: ENOMEM\n", __FILE__
, __LINE__
);
855 crp
->crp_etype
= ENOMEM
;
859 * save some cycles by only zeroing the important bits
861 memset(&q
->ixp_q_mbuf
, 0, sizeof(q
->ixp_q_mbuf
));
862 q
->ixp_q_ccrd
= NULL
;
863 q
->ixp_q_acrd
= NULL
;
868 * point the cipher and auth descriptors appropriately
869 * check that we have something to do
871 if (crp
->crp_desc
->crd_alg
== ixp
->ixp_cipher_alg
)
872 q
->ixp_q_ccrd
= crp
->crp_desc
;
873 else if (crp
->crp_desc
->crd_alg
== ixp
->ixp_auth_alg
)
874 q
->ixp_q_acrd
= crp
->crp_desc
;
876 crp
->crp_etype
= ENOENT
;
877 dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__
, __LINE__
);
880 if (crp
->crp_desc
->crd_next
) {
881 if (crp
->crp_desc
->crd_next
->crd_alg
== ixp
->ixp_cipher_alg
)
882 q
->ixp_q_ccrd
= crp
->crp_desc
->crd_next
;
883 else if (crp
->crp_desc
->crd_next
->crd_alg
== ixp
->ixp_auth_alg
)
884 q
->ixp_q_acrd
= crp
->crp_desc
->crd_next
;
886 crp
->crp_etype
= ENOENT
;
887 dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__
, __LINE__
);
893 * If there is a direction change for this context then we mark it as
894 * unregistered and re-register is for the new direction. This is not
895 * a very expensive operation and currently only tends to happen when
896 * user-space application are doing benchmarks
898 * DM - we should be checking for pending requests before unregistering.
900 if (q
->ixp_q_ccrd
&& ixp
->ixp_registered
&&
901 ixp
->ixp_crd_flags
!= (q
->ixp_q_ccrd
->crd_flags
& CRD_F_ENCRYPT
)) {
902 dprintk("%s - detected direction change on session\n", __FUNCTION__
);
903 ixp
->ixp_registered
= 0;
907 * if we are registered, call straight into the perform code
909 if (ixp
->ixp_registered
) {
915 * the only part of the context not set in newsession is the direction
919 ixp
->ixp_crd_flags
= (q
->ixp_q_ccrd
->crd_flags
& CRD_F_ENCRYPT
);
920 if (q
->ixp_q_ccrd
->crd_flags
& CRD_F_ENCRYPT
) {
921 ixp
->ixp_ctx
.operation
= q
->ixp_q_acrd
?
922 IX_CRYPTO_ACC_OP_ENCRYPT_AUTH
: IX_CRYPTO_ACC_OP_ENCRYPT
;
924 ixp
->ixp_ctx
.operation
= q
->ixp_q_acrd
?
925 IX_CRYPTO_ACC_OP_AUTH_DECRYPT
: IX_CRYPTO_ACC_OP_DECRYPT
;
928 /* q->ixp_q_acrd must be set if we are here */
929 ixp
->ixp_ctx
.operation
= IX_CRYPTO_ACC_OP_AUTH_CALC
;
932 status
= list_empty(&ixp
->ixp_q
);
933 list_add_tail(&q
->ixp_q_list
, &ixp
->ixp_q
);
935 schedule_work(&ixp
->ixp_registration_work
);
940 kmem_cache_free(qcache
, q
);
948 * key processing support for the ixp465
953 * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
954 * assume zeroed and only copy bits that are significant
958 ixp_copy_ibuf(struct crparam
*p
, IxCryptoAccPkeEauOperand
*op
, UINT32
*buf
)
960 unsigned char *src
= (unsigned char *) p
->crp_p
;
962 int len
, bits
= p
->crp_nbits
;
964 dprintk("%s()\n", __FUNCTION__
);
966 if (bits
> MAX_IOP_SIZE
* sizeof(UINT32
) * 8) {
967 dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__
,
968 bits
, MAX_IOP_SIZE
* sizeof(UINT32
) * 8);
972 len
= (bits
+ 31) / 32; /* the number UINT32's needed */
974 dst
= (unsigned char *) &buf
[len
];
982 #if 0 /* no need to zero remaining bits as it is done during request alloc */
983 while (dst
> (unsigned char *) buf
)
993 * copy out the result, be as forgiving as we can about small output buffers
997 ixp_copy_obuf(struct crparam
*p
, IxCryptoAccPkeEauOpResult
*op
, UINT32
*buf
)
999 unsigned char *dst
= (unsigned char *) p
->crp_p
;
1000 unsigned char *src
= (unsigned char *) buf
;
1001 int len
, z
, bits
= p
->crp_nbits
;
1003 dprintk("%s()\n", __FUNCTION__
);
1005 len
= op
->dataLen
* sizeof(UINT32
);
1007 /* skip leading zeroes to be small buffer friendly */
1009 while (z
< len
&& src
[z
] == '\0')
1016 while (len
> 0 && bits
> 0) {
1028 dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
1029 __FUNCTION__
, len
, z
, p
->crp_nbits
/ 8);
1038 * the parameter offsets for exp_mod
1041 #define IXP_PARAM_BASE 0
1042 #define IXP_PARAM_EXP 1
1043 #define IXP_PARAM_MOD 2
1044 #define IXP_PARAM_RES 3
1047 * key processing complete callback, is also used to start processing
1048 * by passing a NULL for pResult
1053 IxCryptoAccPkeEauOperation operation
,
1054 IxCryptoAccPkeEauOpResult
*pResult
,
1056 IxCryptoAccStatus status
)
1058 struct ixp_pkq
*q
, *tmp
;
1059 unsigned long flags
;
1061 dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__
, operation
, pResult
,
1062 carryOrBorrow
, status
);
1064 /* handle a completed request */
1066 if (ixp_pk_cur
&& &ixp_pk_cur
->pkq_result
== pResult
) {
1068 if (status
!= IX_CRYPTO_ACC_STATUS_SUCCESS
) {
1069 dprintk("%s() - op failed 0x%x\n", __FUNCTION__
, status
);
1070 q
->pkq_krp
->krp_status
= ERANGE
; /* could do better */
1072 /* copy out the result */
1073 if (ixp_copy_obuf(&q
->pkq_krp
->krp_param
[IXP_PARAM_RES
],
1074 &q
->pkq_result
, q
->pkq_obuf
))
1075 q
->pkq_krp
->krp_status
= ERANGE
;
1077 crypto_kdone(q
->pkq_krp
);
1081 printk("%s - callback with invalid result pointer\n", __FUNCTION__
);
1084 spin_lock_irqsave(&ixp_pkq_lock
, flags
);
1085 if (ixp_pk_cur
|| list_empty(&ixp_pkq
)) {
1086 spin_unlock_irqrestore(&ixp_pkq_lock
, flags
);
1090 list_for_each_entry_safe(q
, tmp
, &ixp_pkq
, pkq_list
) {
1092 list_del(&q
->pkq_list
);
1095 spin_unlock_irqrestore(&ixp_pkq_lock
, flags
);
1097 status
= ixCryptoAccPkeEauPerform(
1098 IX_CRYPTO_ACC_OP_EAU_MOD_EXP
,
1103 if (status
== IX_CRYPTO_ACC_STATUS_SUCCESS
) {
1104 dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__
);
1105 return; /* callback will return here for callback */
1106 } else if (status
== IX_CRYPTO_ACC_STATUS_RETRY
) {
1107 printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__
);
1109 printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
1110 __FUNCTION__
, status
);
1112 q
->pkq_krp
->krp_status
= ERANGE
; /* could do better */
1113 crypto_kdone(q
->pkq_krp
);
1115 spin_lock_irqsave(&ixp_pkq_lock
, flags
);
1117 spin_unlock_irqrestore(&ixp_pkq_lock
, flags
);
1122 ixp_kprocess(device_t dev
, struct cryptkop
*krp
, int hint
)
1126 unsigned long flags
;
1128 dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__
,
1129 krp
->krp_param
[IXP_PARAM_BASE
].crp_nbits
,
1130 krp
->krp_param
[IXP_PARAM_EXP
].crp_nbits
,
1131 krp
->krp_param
[IXP_PARAM_MOD
].crp_nbits
,
1132 krp
->krp_param
[IXP_PARAM_RES
].crp_nbits
);
1135 if (krp
->krp_op
!= CRK_MOD_EXP
) {
1136 krp
->krp_status
= EOPNOTSUPP
;
1140 q
= (struct ixp_pkq
*) kmalloc(sizeof(*q
), GFP_KERNEL
);
1142 krp
->krp_status
= ENOMEM
;
1147 * The PKE engine does not appear to zero the output buffer
1148 * appropriately, so we need to do it all here.
1150 memset(q
, 0, sizeof(*q
));
1153 INIT_LIST_HEAD(&q
->pkq_list
);
1155 if (ixp_copy_ibuf(&krp
->krp_param
[IXP_PARAM_BASE
], &q
->pkq_op
.modExpOpr
.M
,
1158 if (!rc
&& ixp_copy_ibuf(&krp
->krp_param
[IXP_PARAM_EXP
],
1159 &q
->pkq_op
.modExpOpr
.e
, q
->pkq_ibuf1
))
1161 if (!rc
&& ixp_copy_ibuf(&krp
->krp_param
[IXP_PARAM_MOD
],
1162 &q
->pkq_op
.modExpOpr
.N
, q
->pkq_ibuf2
))
1167 krp
->krp_status
= ERANGE
;
1171 q
->pkq_result
.pData
= q
->pkq_obuf
;
1172 q
->pkq_result
.dataLen
=
1173 (krp
->krp_param
[IXP_PARAM_RES
].crp_nbits
+ 31) / 32;
1175 spin_lock_irqsave(&ixp_pkq_lock
, flags
);
1176 list_add_tail(&q
->pkq_list
, &ixp_pkq
);
1177 spin_unlock_irqrestore(&ixp_pkq_lock
, flags
);
1180 ixp_kperform_cb(0, NULL
, 0, 0);
1190 #ifdef CONFIG_OCF_RANDOMHARVEST
1192 * We run the random number generator output through SHA so that it
1193 * is FIPS compliant.
1196 static volatile int sha_done
= 0;
1197 static unsigned char sha_digest
[20];
1200 ixp_hash_cb(UINT8
*digest
, IxCryptoAccStatus status
)
1202 dprintk("%s(%p, %d)\n", __FUNCTION__
, digest
, status
);
1203 if (sha_digest
!= digest
)
1204 printk("digest error\n");
1205 if (IX_CRYPTO_ACC_STATUS_SUCCESS
== status
)
1212 ixp_read_random(void *arg
, u_int32_t
*buf
, int maxwords
)
1214 IxCryptoAccStatus status
;
1217 dprintk("%s(%p, %d)\n", __FUNCTION__
, buf
, maxwords
);
1218 memset(buf
, 0, maxwords
* sizeof(*buf
));
1219 status
= ixCryptoAccPkePseudoRandomNumberGet(maxwords
, buf
);
1220 if (status
!= IX_CRYPTO_ACC_STATUS_SUCCESS
) {
1221 dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
1222 __FUNCTION__
, status
);
1227 * run the random data through SHA to make it look more random
1230 n
= sizeof(sha_digest
); /* process digest bytes at a time */
1233 for (i
= 0; i
< maxwords
; i
+= n
/ sizeof(*buf
)) {
1234 if ((maxwords
- i
) * sizeof(*buf
) < n
)
1235 n
= (maxwords
- i
) * sizeof(*buf
);
1237 status
= ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1
,
1238 (UINT8
*) &buf
[i
], n
, ixp_hash_cb
, sha_digest
);
1239 if (status
!= IX_CRYPTO_ACC_STATUS_SUCCESS
) {
1240 dprintk("ixCryptoAccPkeHashPerform failed %d\n", status
);
1246 dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done
);
1249 memcpy(&buf
[i
], sha_digest
, n
);
1250 rc
+= n
/ sizeof(*buf
);;
1255 #endif /* CONFIG_OCF_RANDOMHARVEST */
1257 #endif /* __ixp46X */
1262 * our driver startup and shutdown routines
1268 dprintk("%s(%p)\n", __FUNCTION__
, ixp_init
);
1270 if (ixp_init_crypto
&& ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS
)
1271 printk("ixCryptoAccInit failed, assuming already initialised!\n");
1273 qcache
= kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q
), 0,
1274 SLAB_HWCACHE_ALIGN
, NULL
1275 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1280 printk("failed to create Qcache\n");
1284 memset(&ixpdev
, 0, sizeof(ixpdev
));
1285 softc_device_init(&ixpdev
, "ixp4xx", 0, ixp_methods
);
1287 ixp_id
= crypto_get_driverid(softc_get_device(&ixpdev
),
1288 CRYPTOCAP_F_HARDWARE
);
1290 panic("IXP/OCF crypto device cannot initialize!");
1292 #define REGISTER(alg) \
1293 crypto_register(ixp_id,alg,0,0)
1295 REGISTER(CRYPTO_DES_CBC
);
1296 REGISTER(CRYPTO_3DES_CBC
);
1297 REGISTER(CRYPTO_RIJNDAEL128_CBC
);
1298 #ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
1299 REGISTER(CRYPTO_MD5
);
1300 REGISTER(CRYPTO_SHA1
);
1302 REGISTER(CRYPTO_MD5_HMAC
);
1303 REGISTER(CRYPTO_SHA1_HMAC
);
1307 spin_lock_init(&ixp_pkq_lock
);
1309 * we do not enable the go fast options here as they can potentially
1310 * allow timing based attacks
1312 * http://www.openssl.org/news/secadv_20030219.txt
1314 ixCryptoAccPkeEauExpConfig(0, 0);
1315 crypto_kregister(ixp_id
, CRK_MOD_EXP
, 0);
1316 #ifdef CONFIG_OCF_RANDOMHARVEST
1317 crypto_rregister(ixp_id
, ixp_read_random
, NULL
);
1327 dprintk("%s()\n", __FUNCTION__
);
1328 crypto_unregister_all(ixp_id
);
1330 kmem_cache_destroy(qcache
);
1334 module_init(ixp_init
);
1335 module_exit(ixp_exit
);
1337 MODULE_LICENSE("Dual BSD/GPL");
1338 MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
1339 MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");