2 * An OCF module that uses Intels IXP CryptACC API to do the crypto.
3 * This driver requires the IXP400 Access Library that is available
4 * from Intel in order to operate (or compile).
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2006-2010 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
36 #ifndef AUTOCONF_INCLUDED
37 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/list.h>
42 #include <linux/slab.h>
43 #include <linux/sched.h>
44 #include <linux/wait.h>
45 #include <linux/crypto.h>
46 #include <linux/interrupt.h>
47 #include <asm/scatterlist.h>
50 #include <IxOsBuffMgt.h>
52 #include <IxCryptoAcc.h>
54 #include <IxOsServices.h>
55 #include <IxOsCacheMMU.h>
57 #include <cryptodev.h>
61 #define IX_MBUF_PRIV(x) ((x)->priv)
67 struct list_head ixp_q_list
;
68 struct ixp_data
*ixp_q_data
;
69 struct cryptop
*ixp_q_crp
;
70 struct cryptodesc
*ixp_q_ccrd
;
71 struct cryptodesc
*ixp_q_acrd
;
73 UINT8
*ixp_hash_dest
; /* Location for hash in client buffer */
74 UINT8
*ixp_hash_src
; /* Location of hash in internal buffer */
75 unsigned char ixp_q_iv_data
[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH
];
76 unsigned char *ixp_q_iv
;
80 int ixp_registered
; /* is the context registered */
81 int ixp_crd_flags
; /* detect direction changes */
87 UINT32 ixp_hash_key_id
; /* used when hashing */
88 IxCryptoAccCtx ixp_ctx
;
92 struct work_struct ixp_pending_work
;
93 struct work_struct ixp_registration_work
;
94 struct list_head ixp_q
; /* unprocessed requests */
99 #define MAX_IOP_SIZE 64 /* words */
100 #define MAX_OOP_SIZE 128
105 struct list_head pkq_list
;
106 struct cryptkop
*pkq_krp
;
108 IxCryptoAccPkeEauInOperands pkq_op
;
109 IxCryptoAccPkeEauOpResult pkq_result
;
111 UINT32 pkq_ibuf0
[MAX_IOP_SIZE
];
112 UINT32 pkq_ibuf1
[MAX_IOP_SIZE
];
113 UINT32 pkq_ibuf2
[MAX_IOP_SIZE
];
114 UINT32 pkq_obuf
[MAX_OOP_SIZE
];
117 static LIST_HEAD(ixp_pkq
); /* current PK wait list */
118 static struct ixp_pkq
*ixp_pk_cur
;
119 static spinlock_t ixp_pkq_lock
;
121 #endif /* __ixp46X */
123 static int ixp_blocked
= 0;
125 static int32_t ixp_id
= -1;
126 static struct ixp_data
**ixp_sessions
= NULL
;
127 static u_int32_t ixp_sesnum
= 0;
129 static int ixp_process(device_t
, struct cryptop
*, int);
130 static int ixp_newsession(device_t
, u_int32_t
*, struct cryptoini
*);
131 static int ixp_freesession(device_t
, u_int64_t
);
133 static int ixp_kprocess(device_t
, struct cryptkop
*krp
, int hint
);
136 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
137 static kmem_cache_t
*qcache
;
139 static struct kmem_cache
*qcache
;
142 #define debug ixp_debug
143 static int ixp_debug
= 0;
144 module_param(ixp_debug
, int, 0644);
145 MODULE_PARM_DESC(ixp_debug
, "Enable debug");
147 static int ixp_init_crypto
= 1;
148 module_param(ixp_init_crypto
, int, 0444); /* RO after load/boot */
149 MODULE_PARM_DESC(ixp_init_crypto
, "Call ixCryptoAccInit (default is 1)");
151 static void ixp_process_pending(void *arg
);
152 static void ixp_registration(void *arg
);
153 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
154 static void ixp_process_pending_wq(struct work_struct
*work
);
155 static void ixp_registration_wq(struct work_struct
*work
);
159 * dummy device structure
163 softc_device_decl sc_dev
;
166 static device_method_t ixp_methods
= {
167 /* crypto device methods */
168 DEVMETHOD(cryptodev_newsession
, ixp_newsession
),
169 DEVMETHOD(cryptodev_freesession
,ixp_freesession
),
170 DEVMETHOD(cryptodev_process
, ixp_process
),
172 DEVMETHOD(cryptodev_kprocess
, ixp_kprocess
),
177 * Generate a new software session.
180 ixp_newsession(device_t dev
, u_int32_t
*sid
, struct cryptoini
*cri
)
182 struct ixp_data
*ixp
;
184 #define AUTH_LEN(cri, def) \
185 (cri->cri_mlen ? cri->cri_mlen : (def))
187 dprintk("%s():alg %d\n", __FUNCTION__
,cri
->cri_alg
);
188 if (sid
== NULL
|| cri
== NULL
) {
189 dprintk("%s,%d - EINVAL\n", __FILE__
, __LINE__
);
194 for (i
= 1; i
< ixp_sesnum
; i
++)
195 if (ixp_sessions
[i
] == NULL
)
198 i
= 1; /* NB: to silence compiler warning */
200 if (ixp_sessions
== NULL
|| i
== ixp_sesnum
) {
201 struct ixp_data
**ixpd
;
203 if (ixp_sessions
== NULL
) {
204 i
= 1; /* We leave ixp_sessions[0] empty */
205 ixp_sesnum
= CRYPTO_SW_SESSIONS
;
209 ixpd
= kmalloc(ixp_sesnum
* sizeof(struct ixp_data
*), SLAB_ATOMIC
);
211 /* Reset session number */
212 if (ixp_sesnum
== CRYPTO_SW_SESSIONS
)
216 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
219 memset(ixpd
, 0, ixp_sesnum
* sizeof(struct ixp_data
*));
221 /* Copy existing sessions */
223 memcpy(ixpd
, ixp_sessions
,
224 (ixp_sesnum
/ 2) * sizeof(struct ixp_data
*));
231 ixp_sessions
[i
] = (struct ixp_data
*) kmalloc(sizeof(struct ixp_data
),
233 if (ixp_sessions
[i
] == NULL
) {
234 ixp_freesession(NULL
, i
);
235 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
241 ixp
= ixp_sessions
[i
];
242 memset(ixp
, 0, sizeof(*ixp
));
244 ixp
->ixp_cipher_alg
= -1;
245 ixp
->ixp_auth_alg
= -1;
246 ixp
->ixp_ctx_id
= -1;
247 INIT_LIST_HEAD(&ixp
->ixp_q
);
249 ixp
->ixp_ctx
.useDifferentSrcAndDestMbufs
= 0;
252 switch (cri
->cri_alg
) {
254 ixp
->ixp_cipher_alg
= cri
->cri_alg
;
255 ixp
->ixp_ctx
.cipherCtx
.cipherAlgo
= IX_CRYPTO_ACC_CIPHER_DES
;
256 ixp
->ixp_ctx
.cipherCtx
.cipherMode
= IX_CRYPTO_ACC_MODE_CBC
;
257 ixp
->ixp_ctx
.cipherCtx
.cipherKeyLen
= (cri
->cri_klen
+ 7) / 8;
258 ixp
->ixp_ctx
.cipherCtx
.cipherBlockLen
= IX_CRYPTO_ACC_DES_BLOCK_64
;
259 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
=
260 IX_CRYPTO_ACC_DES_IV_64
;
261 memcpy(ixp
->ixp_ctx
.cipherCtx
.key
.cipherKey
,
262 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
265 case CRYPTO_3DES_CBC
:
266 ixp
->ixp_cipher_alg
= cri
->cri_alg
;
267 ixp
->ixp_ctx
.cipherCtx
.cipherAlgo
= IX_CRYPTO_ACC_CIPHER_3DES
;
268 ixp
->ixp_ctx
.cipherCtx
.cipherMode
= IX_CRYPTO_ACC_MODE_CBC
;
269 ixp
->ixp_ctx
.cipherCtx
.cipherKeyLen
= (cri
->cri_klen
+ 7) / 8;
270 ixp
->ixp_ctx
.cipherCtx
.cipherBlockLen
= IX_CRYPTO_ACC_DES_BLOCK_64
;
271 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
=
272 IX_CRYPTO_ACC_DES_IV_64
;
273 memcpy(ixp
->ixp_ctx
.cipherCtx
.key
.cipherKey
,
274 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
277 case CRYPTO_RIJNDAEL128_CBC
:
278 ixp
->ixp_cipher_alg
= cri
->cri_alg
;
279 ixp
->ixp_ctx
.cipherCtx
.cipherAlgo
= IX_CRYPTO_ACC_CIPHER_AES
;
280 ixp
->ixp_ctx
.cipherCtx
.cipherMode
= IX_CRYPTO_ACC_MODE_CBC
;
281 ixp
->ixp_ctx
.cipherCtx
.cipherKeyLen
= (cri
->cri_klen
+ 7) / 8;
282 ixp
->ixp_ctx
.cipherCtx
.cipherBlockLen
= 16;
283 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
= 16;
284 memcpy(ixp
->ixp_ctx
.cipherCtx
.key
.cipherKey
,
285 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
289 case CRYPTO_MD5_HMAC
:
290 ixp
->ixp_auth_alg
= cri
->cri_alg
;
291 ixp
->ixp_ctx
.authCtx
.authAlgo
= IX_CRYPTO_ACC_AUTH_MD5
;
292 ixp
->ixp_ctx
.authCtx
.authDigestLen
= AUTH_LEN(cri
, MD5_HASH_LEN
);
293 ixp
->ixp_ctx
.authCtx
.aadLen
= 0;
294 /* Only MD5_HMAC needs a key */
295 if (cri
->cri_alg
== CRYPTO_MD5_HMAC
) {
296 ixp
->ixp_ctx
.authCtx
.authKeyLen
= (cri
->cri_klen
+ 7) / 8;
297 if (ixp
->ixp_ctx
.authCtx
.authKeyLen
>
298 sizeof(ixp
->ixp_ctx
.authCtx
.key
.authKey
)) {
300 "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
302 ixp_freesession(NULL
, i
);
305 memcpy(ixp
->ixp_ctx
.authCtx
.key
.authKey
,
306 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
311 case CRYPTO_SHA1_HMAC
:
312 ixp
->ixp_auth_alg
= cri
->cri_alg
;
313 ixp
->ixp_ctx
.authCtx
.authAlgo
= IX_CRYPTO_ACC_AUTH_SHA1
;
314 ixp
->ixp_ctx
.authCtx
.authDigestLen
= AUTH_LEN(cri
, SHA1_HASH_LEN
);
315 ixp
->ixp_ctx
.authCtx
.aadLen
= 0;
316 /* Only SHA1_HMAC needs a key */
317 if (cri
->cri_alg
== CRYPTO_SHA1_HMAC
) {
318 ixp
->ixp_ctx
.authCtx
.authKeyLen
= (cri
->cri_klen
+ 7) / 8;
319 if (ixp
->ixp_ctx
.authCtx
.authKeyLen
>
320 sizeof(ixp
->ixp_ctx
.authCtx
.key
.authKey
)) {
322 "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
324 ixp_freesession(NULL
, i
);
327 memcpy(ixp
->ixp_ctx
.authCtx
.key
.authKey
,
328 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
333 printk("ixp: unknown algo 0x%x\n", cri
->cri_alg
);
334 ixp_freesession(NULL
, i
);
340 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
341 INIT_WORK(&ixp
->ixp_pending_work
, ixp_process_pending_wq
);
342 INIT_WORK(&ixp
->ixp_registration_work
, ixp_registration_wq
);
344 INIT_WORK(&ixp
->ixp_pending_work
, ixp_process_pending
, ixp
);
345 INIT_WORK(&ixp
->ixp_registration_work
, ixp_registration
, ixp
);
356 ixp_freesession(device_t dev
, u_int64_t tid
)
358 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
360 dprintk("%s()\n", __FUNCTION__
);
361 if (sid
> ixp_sesnum
|| ixp_sessions
== NULL
||
362 ixp_sessions
[sid
] == NULL
) {
363 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
367 /* Silently accept and return */
371 if (ixp_sessions
[sid
]) {
372 if (ixp_sessions
[sid
]->ixp_ctx_id
!= -1) {
373 ixCryptoAccCtxUnregister(ixp_sessions
[sid
]->ixp_ctx_id
);
374 ixp_sessions
[sid
]->ixp_ctx_id
= -1;
376 kfree(ixp_sessions
[sid
]);
378 ixp_sessions
[sid
] = NULL
;
381 crypto_unblock(ixp_id
, CRYPTO_SYMQ
);
388 * callback for when hash processing is complete
395 IxCryptoAccStatus status
)
399 dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__
, hash_key_id
, bufp
, status
);
402 printk("ixp: NULL buf in %s\n", __FUNCTION__
);
406 q
= IX_MBUF_PRIV(bufp
);
408 printk("ixp: NULL priv in %s\n", __FUNCTION__
);
412 if (status
== IX_CRYPTO_ACC_STATUS_SUCCESS
) {
413 /* On success, need to copy hash back into original client buffer */
414 memcpy(q
->ixp_hash_dest
, q
->ixp_hash_src
,
415 (q
->ixp_q_data
->ixp_auth_alg
== CRYPTO_SHA1
) ?
416 SHA1_HASH_LEN
: MD5_HASH_LEN
);
419 printk("ixp: hash perform failed status=%d\n", status
);
420 q
->ixp_q_crp
->crp_etype
= EINVAL
;
423 /* Free internal buffer used for hashing */
424 kfree(IX_MBUF_MDATA(&q
->ixp_q_mbuf
));
426 crypto_done(q
->ixp_q_crp
);
427 kmem_cache_free(qcache
, q
);
431 * setup a request and perform it
434 ixp_q_process(struct ixp_q
*q
)
436 IxCryptoAccStatus status
;
437 struct ixp_data
*ixp
= q
->ixp_q_data
;
445 dprintk("%s(%p)\n", __FUNCTION__
, q
);
448 if (q
->ixp_q_ccrd
->crd_flags
& CRD_F_IV_EXPLICIT
) {
449 q
->ixp_q_iv
= q
->ixp_q_ccrd
->crd_iv
;
451 q
->ixp_q_iv
= q
->ixp_q_iv_data
;
452 crypto_copydata(q
->ixp_q_crp
->crp_flags
, q
->ixp_q_crp
->crp_buf
,
453 q
->ixp_q_ccrd
->crd_inject
,
454 ixp
->ixp_ctx
.cipherCtx
.cipherInitialVectorLen
,
455 (caddr_t
) q
->ixp_q_iv
);
459 auth_off
= q
->ixp_q_acrd
->crd_skip
;
460 auth_len
= q
->ixp_q_acrd
->crd_len
;
461 icv_off
= q
->ixp_q_acrd
->crd_inject
;
464 crypt_off
= q
->ixp_q_ccrd
->crd_skip
;
465 crypt_len
= q
->ixp_q_ccrd
->crd_len
;
466 } else { /* if (q->ixp_q_acrd) */
467 auth_off
= q
->ixp_q_acrd
->crd_skip
;
468 auth_len
= q
->ixp_q_acrd
->crd_len
;
469 icv_off
= q
->ixp_q_acrd
->crd_inject
;
472 if (q
->ixp_q_crp
->crp_flags
& CRYPTO_F_SKBUF
) {
473 struct sk_buff
*skb
= (struct sk_buff
*) q
->ixp_q_crp
->crp_buf
;
474 if (skb_shinfo(skb
)->nr_frags
) {
476 * DAVIDM fix this limitation one day by using
477 * a buffer pool and chaining, it is not currently
478 * needed for current user/kernel space acceleration
480 printk("ixp: Cannot handle fragmented skb's yet !\n");
481 q
->ixp_q_crp
->crp_etype
= ENOENT
;
484 IX_MBUF_MLEN(&q
->ixp_q_mbuf
) =
485 IX_MBUF_PKT_LEN(&q
->ixp_q_mbuf
) = skb
->len
;
486 IX_MBUF_MDATA(&q
->ixp_q_mbuf
) = skb
->data
;
487 } else if (q
->ixp_q_crp
->crp_flags
& CRYPTO_F_IOV
) {
488 struct uio
*uiop
= (struct uio
*) q
->ixp_q_crp
->crp_buf
;
489 if (uiop
->uio_iovcnt
!= 1) {
491 * DAVIDM fix this limitation one day by using
492 * a buffer pool and chaining, it is not currently
493 * needed for current user/kernel space acceleration
495 printk("ixp: Cannot handle more than 1 iovec yet !\n");
496 q
->ixp_q_crp
->crp_etype
= ENOENT
;
499 IX_MBUF_MLEN(&q
->ixp_q_mbuf
) =
500 IX_MBUF_PKT_LEN(&q
->ixp_q_mbuf
) = uiop
->uio_iov
[0].iov_len
;
501 IX_MBUF_MDATA(&q
->ixp_q_mbuf
) = uiop
->uio_iov
[0].iov_base
;
502 } else /* contig buffer */ {
503 IX_MBUF_MLEN(&q
->ixp_q_mbuf
) =
504 IX_MBUF_PKT_LEN(&q
->ixp_q_mbuf
) = q
->ixp_q_crp
->crp_ilen
;
505 IX_MBUF_MDATA(&q
->ixp_q_mbuf
) = q
->ixp_q_crp
->crp_buf
;
508 IX_MBUF_PRIV(&q
->ixp_q_mbuf
) = q
;
510 if (ixp
->ixp_auth_alg
== CRYPTO_SHA1
|| ixp
->ixp_auth_alg
== CRYPTO_MD5
) {
512 * For SHA1 and MD5 hash, need to create an internal buffer that is big
513 * enough to hold the original data + the appropriate padding for the
518 IX_MBUF_MLEN(&q
->ixp_q_mbuf
) = IX_MBUF_PKT_LEN(&q
->ixp_q_mbuf
) =
519 ((IX_MBUF_MLEN(&q
->ixp_q_mbuf
) * 8) + 72 + 511) / 8;
520 tbuf
= kmalloc(IX_MBUF_MLEN(&q
->ixp_q_mbuf
), SLAB_ATOMIC
);
522 if (IX_MBUF_MDATA(&q
->ixp_q_mbuf
) == NULL
) {
523 printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
524 IX_MBUF_MLEN(&q
->ixp_q_mbuf
));
525 q
->ixp_q_crp
->crp_etype
= ENOMEM
;
528 memcpy(tbuf
, &(IX_MBUF_MDATA(&q
->ixp_q_mbuf
))[auth_off
], auth_len
);
530 /* Set location in client buffer to copy hash into */
532 &(IX_MBUF_MDATA(&q
->ixp_q_mbuf
))[auth_off
+ auth_len
];
534 IX_MBUF_MDATA(&q
->ixp_q_mbuf
) = tbuf
;
536 /* Set location in internal buffer for where hash starts */
537 q
->ixp_hash_src
= &(IX_MBUF_MDATA(&q
->ixp_q_mbuf
))[auth_len
];
539 crypt_func
= "ixCryptoAccHashPerform";
540 status
= ixCryptoAccHashPerform(ixp
->ixp_ctx
.authCtx
.authAlgo
,
541 &q
->ixp_q_mbuf
, ixp_hash_perform_cb
, 0, auth_len
, auth_len
,
542 &ixp
->ixp_hash_key_id
);
545 crypt_func
= "ixCryptoAccAuthCryptPerform";
546 status
= ixCryptoAccAuthCryptPerform(ixp
->ixp_ctx_id
, &q
->ixp_q_mbuf
,
547 NULL
, auth_off
, auth_len
, crypt_off
, crypt_len
, icv_off
,
551 if (IX_CRYPTO_ACC_STATUS_SUCCESS
== status
)
554 if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL
== status
) {
555 q
->ixp_q_crp
->crp_etype
= ENOMEM
;
559 printk("ixp: %s failed %u\n", crypt_func
, status
);
560 q
->ixp_q_crp
->crp_etype
= EINVAL
;
563 crypto_done(q
->ixp_q_crp
);
564 kmem_cache_free(qcache
, q
);
569 * because we cannot process the Q from the Register callback
570 * we do it here on a task Q.
574 ixp_process_pending(void *arg
)
576 struct ixp_data
*ixp
= arg
;
577 struct ixp_q
*q
= NULL
;
579 dprintk("%s(%p)\n", __FUNCTION__
, arg
);
584 while (!list_empty(&ixp
->ixp_q
)) {
585 q
= list_entry(ixp
->ixp_q
.next
, struct ixp_q
, ixp_q_list
);
586 list_del(&q
->ixp_q_list
);
591 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
593 ixp_process_pending_wq(struct work_struct
*work
)
595 struct ixp_data
*ixp
= container_of(work
, struct ixp_data
, ixp_pending_work
);
596 ixp_process_pending(ixp
);
601 * callback for when context registration is complete
605 ixp_register_cb(UINT32 ctx_id
, IX_MBUF
*bufp
, IxCryptoAccStatus status
)
608 struct ixp_data
*ixp
;
611 dprintk("%s(%d, %p, %d)\n", __FUNCTION__
, ctx_id
, bufp
, status
);
614 * free any buffer passed in to this routine
617 IX_MBUF_MLEN(bufp
) = IX_MBUF_PKT_LEN(bufp
) = 0;
618 kfree(IX_MBUF_MDATA(bufp
));
619 IX_MBUF_MDATA(bufp
) = NULL
;
622 for (i
= 0; i
< ixp_sesnum
; i
++) {
623 ixp
= ixp_sessions
[i
];
624 if (ixp
&& ixp
->ixp_ctx_id
== ctx_id
)
627 if (i
>= ixp_sesnum
) {
628 printk("ixp: invalid context id %d\n", ctx_id
);
632 if (IX_CRYPTO_ACC_STATUS_WAIT
== status
) {
633 /* this is normal to free the first of two buffers */
634 dprintk("ixp: register not finished yet.\n");
638 if (IX_CRYPTO_ACC_STATUS_SUCCESS
!= status
) {
639 printk("ixp: register failed 0x%x\n", status
);
640 while (!list_empty(&ixp
->ixp_q
)) {
641 q
= list_entry(ixp
->ixp_q
.next
, struct ixp_q
, ixp_q_list
);
642 list_del(&q
->ixp_q_list
);
643 q
->ixp_q_crp
->crp_etype
= EINVAL
;
644 crypto_done(q
->ixp_q_crp
);
645 kmem_cache_free(qcache
, q
);
651 * we are now registered, we cannot start processing the Q here
652 * or we get strange errors with AES (DES/3DES seem to be ok).
654 ixp
->ixp_registered
= 1;
655 schedule_work(&ixp
->ixp_pending_work
);
660 * callback for when data processing is complete
668 IxCryptoAccStatus status
)
672 dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__
, ctx_id
, sbufp
,
676 printk("ixp: NULL sbuf in ixp_perform_cb\n");
680 q
= IX_MBUF_PRIV(sbufp
);
682 printk("ixp: NULL priv in ixp_perform_cb\n");
686 if (status
!= IX_CRYPTO_ACC_STATUS_SUCCESS
) {
687 printk("ixp: perform failed status=%d\n", status
);
688 q
->ixp_q_crp
->crp_etype
= EINVAL
;
691 crypto_done(q
->ixp_q_crp
);
692 kmem_cache_free(qcache
, q
);
697 * registration is not callable at IRQ time, so we defer
698 * to a task queue, this routines completes the registration for us
699 * when the task queue runs
701 * Unfortunately this means we cannot tell OCF that the driver is blocked,
702 * we do that on the next request.
706 ixp_registration(void *arg
)
708 struct ixp_data
*ixp
= arg
;
709 struct ixp_q
*q
= NULL
;
710 IX_MBUF
*pri
= NULL
, *sec
= NULL
;
711 int status
= IX_CRYPTO_ACC_STATUS_SUCCESS
;
714 printk("ixp: ixp_registration with no arg\n");
718 if (ixp
->ixp_ctx_id
!= -1) {
719 ixCryptoAccCtxUnregister(ixp
->ixp_ctx_id
);
720 ixp
->ixp_ctx_id
= -1;
723 if (list_empty(&ixp
->ixp_q
)) {
724 printk("ixp: ixp_registration with no Q\n");
729 * setup the primary and secondary buffers
731 q
= list_entry(ixp
->ixp_q
.next
, struct ixp_q
, ixp_q_list
);
733 pri
= &ixp
->ixp_pri_mbuf
;
734 sec
= &ixp
->ixp_sec_mbuf
;
735 IX_MBUF_MLEN(pri
) = IX_MBUF_PKT_LEN(pri
) = 128;
736 IX_MBUF_MDATA(pri
) = (unsigned char *) kmalloc(128, SLAB_ATOMIC
);
737 IX_MBUF_MLEN(sec
) = IX_MBUF_PKT_LEN(sec
) = 128;
738 IX_MBUF_MDATA(sec
) = (unsigned char *) kmalloc(128, SLAB_ATOMIC
);
741 /* Only need to register if a crypt op or HMAC op */
742 if (!(ixp
->ixp_auth_alg
== CRYPTO_SHA1
||
743 ixp
->ixp_auth_alg
== CRYPTO_MD5
)) {
744 status
= ixCryptoAccCtxRegister(
752 /* Otherwise we start processing pending q */
753 schedule_work(&ixp
->ixp_pending_work
);
756 if (IX_CRYPTO_ACC_STATUS_SUCCESS
== status
)
759 if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS
== status
) {
760 printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
762 /* perhaps we should return EGAIN on queued ops ? */
766 printk("ixp: ixCryptoAccCtxRegister failed %d\n", status
);
767 ixp
->ixp_ctx_id
= -1;
770 * everything waiting is toasted
772 while (!list_empty(&ixp
->ixp_q
)) {
773 q
= list_entry(ixp
->ixp_q
.next
, struct ixp_q
, ixp_q_list
);
774 list_del(&q
->ixp_q_list
);
775 q
->ixp_q_crp
->crp_etype
= ENOENT
;
776 crypto_done(q
->ixp_q_crp
);
777 kmem_cache_free(qcache
, q
);
781 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
783 ixp_registration_wq(struct work_struct
*work
)
785 struct ixp_data
*ixp
= container_of(work
, struct ixp_data
,
786 ixp_registration_work
);
787 ixp_registration(ixp
);
795 ixp_process(device_t dev
, struct cryptop
*crp
, int hint
)
797 struct ixp_data
*ixp
;
799 struct ixp_q
*q
= NULL
;
802 dprintk("%s()\n", __FUNCTION__
);
806 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
815 if (crp
->crp_desc
== NULL
|| crp
->crp_buf
== NULL
) {
816 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
817 crp
->crp_etype
= EINVAL
;
822 * find the session we are using
825 lid
= crp
->crp_sid
& 0xffffffff;
826 if (lid
>= ixp_sesnum
|| lid
== 0 || ixp_sessions
== NULL
||
827 ixp_sessions
[lid
] == NULL
) {
828 crp
->crp_etype
= ENOENT
;
829 dprintk("%s,%d: ENOENT\n", __FILE__
, __LINE__
);
832 ixp
= ixp_sessions
[lid
];
835 * setup a new request ready for queuing
837 q
= kmem_cache_alloc(qcache
, SLAB_ATOMIC
);
839 dprintk("%s,%d: ENOMEM\n", __FILE__
, __LINE__
);
840 crp
->crp_etype
= ENOMEM
;
844 * save some cycles by only zeroing the important bits
846 memset(&q
->ixp_q_mbuf
, 0, sizeof(q
->ixp_q_mbuf
));
847 q
->ixp_q_ccrd
= NULL
;
848 q
->ixp_q_acrd
= NULL
;
853 * point the cipher and auth descriptors appropriately
854 * check that we have something to do
856 if (crp
->crp_desc
->crd_alg
== ixp
->ixp_cipher_alg
)
857 q
->ixp_q_ccrd
= crp
->crp_desc
;
858 else if (crp
->crp_desc
->crd_alg
== ixp
->ixp_auth_alg
)
859 q
->ixp_q_acrd
= crp
->crp_desc
;
861 crp
->crp_etype
= ENOENT
;
862 dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__
, __LINE__
);
865 if (crp
->crp_desc
->crd_next
) {
866 if (crp
->crp_desc
->crd_next
->crd_alg
== ixp
->ixp_cipher_alg
)
867 q
->ixp_q_ccrd
= crp
->crp_desc
->crd_next
;
868 else if (crp
->crp_desc
->crd_next
->crd_alg
== ixp
->ixp_auth_alg
)
869 q
->ixp_q_acrd
= crp
->crp_desc
->crd_next
;
871 crp
->crp_etype
= ENOENT
;
872 dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__
, __LINE__
);
878 * If there is a direction change for this context then we mark it as
879 * unregistered and re-register is for the new direction. This is not
880 * a very expensive operation and currently only tends to happen when
881 * user-space application are doing benchmarks
883 * DM - we should be checking for pending requests before unregistering.
885 if (q
->ixp_q_ccrd
&& ixp
->ixp_registered
&&
886 ixp
->ixp_crd_flags
!= (q
->ixp_q_ccrd
->crd_flags
& CRD_F_ENCRYPT
)) {
887 dprintk("%s - detected direction change on session\n", __FUNCTION__
);
888 ixp
->ixp_registered
= 0;
892 * if we are registered, call straight into the perform code
894 if (ixp
->ixp_registered
) {
900 * the only part of the context not set in newsession is the direction
904 ixp
->ixp_crd_flags
= (q
->ixp_q_ccrd
->crd_flags
& CRD_F_ENCRYPT
);
905 if (q
->ixp_q_ccrd
->crd_flags
& CRD_F_ENCRYPT
) {
906 ixp
->ixp_ctx
.operation
= q
->ixp_q_acrd
?
907 IX_CRYPTO_ACC_OP_ENCRYPT_AUTH
: IX_CRYPTO_ACC_OP_ENCRYPT
;
909 ixp
->ixp_ctx
.operation
= q
->ixp_q_acrd
?
910 IX_CRYPTO_ACC_OP_AUTH_DECRYPT
: IX_CRYPTO_ACC_OP_DECRYPT
;
913 /* q->ixp_q_acrd must be set if we are here */
914 ixp
->ixp_ctx
.operation
= IX_CRYPTO_ACC_OP_AUTH_CALC
;
917 status
= list_empty(&ixp
->ixp_q
);
918 list_add_tail(&q
->ixp_q_list
, &ixp
->ixp_q
);
920 schedule_work(&ixp
->ixp_registration_work
);
925 kmem_cache_free(qcache
, q
);
933 * key processing support for the ixp465
938 * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
939 * assume zeroed and only copy bits that are significant
943 ixp_copy_ibuf(struct crparam
*p
, IxCryptoAccPkeEauOperand
*op
, UINT32
*buf
)
945 unsigned char *src
= (unsigned char *) p
->crp_p
;
947 int len
, bits
= p
->crp_nbits
;
949 dprintk("%s()\n", __FUNCTION__
);
951 if (bits
> MAX_IOP_SIZE
* sizeof(UINT32
) * 8) {
952 dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__
,
953 bits
, MAX_IOP_SIZE
* sizeof(UINT32
) * 8);
957 len
= (bits
+ 31) / 32; /* the number UINT32's needed */
959 dst
= (unsigned char *) &buf
[len
];
967 #if 0 /* no need to zero remaining bits as it is done during request alloc */
968 while (dst
> (unsigned char *) buf
)
978 * copy out the result, be as forgiving as we can about small output buffers
982 ixp_copy_obuf(struct crparam
*p
, IxCryptoAccPkeEauOpResult
*op
, UINT32
*buf
)
984 unsigned char *dst
= (unsigned char *) p
->crp_p
;
985 unsigned char *src
= (unsigned char *) buf
;
986 int len
, z
, bits
= p
->crp_nbits
;
988 dprintk("%s()\n", __FUNCTION__
);
990 len
= op
->dataLen
* sizeof(UINT32
);
992 /* skip leading zeroes to be small buffer friendly */
994 while (z
< len
&& src
[z
] == '\0')
1001 while (len
> 0 && bits
> 0) {
1013 dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
1014 __FUNCTION__
, len
, z
, p
->crp_nbits
/ 8);
1023 * the parameter offsets for exp_mod
1026 #define IXP_PARAM_BASE 0
1027 #define IXP_PARAM_EXP 1
1028 #define IXP_PARAM_MOD 2
1029 #define IXP_PARAM_RES 3
1032 * key processing complete callback, is also used to start processing
1033 * by passing a NULL for pResult
1038 IxCryptoAccPkeEauOperation operation
,
1039 IxCryptoAccPkeEauOpResult
*pResult
,
1041 IxCryptoAccStatus status
)
1043 struct ixp_pkq
*q
, *tmp
;
1044 unsigned long flags
;
1046 dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__
, operation
, pResult
,
1047 carryOrBorrow
, status
);
1049 /* handle a completed request */
1051 if (ixp_pk_cur
&& &ixp_pk_cur
->pkq_result
== pResult
) {
1053 if (status
!= IX_CRYPTO_ACC_STATUS_SUCCESS
) {
1054 dprintk("%s() - op failed 0x%x\n", __FUNCTION__
, status
);
1055 q
->pkq_krp
->krp_status
= ERANGE
; /* could do better */
1057 /* copy out the result */
1058 if (ixp_copy_obuf(&q
->pkq_krp
->krp_param
[IXP_PARAM_RES
],
1059 &q
->pkq_result
, q
->pkq_obuf
))
1060 q
->pkq_krp
->krp_status
= ERANGE
;
1062 crypto_kdone(q
->pkq_krp
);
1066 printk("%s - callback with invalid result pointer\n", __FUNCTION__
);
1069 spin_lock_irqsave(&ixp_pkq_lock
, flags
);
1070 if (ixp_pk_cur
|| list_empty(&ixp_pkq
)) {
1071 spin_unlock_irqrestore(&ixp_pkq_lock
, flags
);
1075 list_for_each_entry_safe(q
, tmp
, &ixp_pkq
, pkq_list
) {
1077 list_del(&q
->pkq_list
);
1080 spin_unlock_irqrestore(&ixp_pkq_lock
, flags
);
1082 status
= ixCryptoAccPkeEauPerform(
1083 IX_CRYPTO_ACC_OP_EAU_MOD_EXP
,
1088 if (status
== IX_CRYPTO_ACC_STATUS_SUCCESS
) {
1089 dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__
);
1090 return; /* callback will return here for callback */
1091 } else if (status
== IX_CRYPTO_ACC_STATUS_RETRY
) {
1092 printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__
);
1094 printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
1095 __FUNCTION__
, status
);
1097 q
->pkq_krp
->krp_status
= ERANGE
; /* could do better */
1098 crypto_kdone(q
->pkq_krp
);
1100 spin_lock_irqsave(&ixp_pkq_lock
, flags
);
1102 spin_unlock_irqrestore(&ixp_pkq_lock
, flags
);
1107 ixp_kprocess(device_t dev
, struct cryptkop
*krp
, int hint
)
1111 unsigned long flags
;
1113 dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__
,
1114 krp
->krp_param
[IXP_PARAM_BASE
].crp_nbits
,
1115 krp
->krp_param
[IXP_PARAM_EXP
].crp_nbits
,
1116 krp
->krp_param
[IXP_PARAM_MOD
].crp_nbits
,
1117 krp
->krp_param
[IXP_PARAM_RES
].crp_nbits
);
1120 if (krp
->krp_op
!= CRK_MOD_EXP
) {
1121 krp
->krp_status
= EOPNOTSUPP
;
1125 q
= (struct ixp_pkq
*) kmalloc(sizeof(*q
), GFP_KERNEL
);
1127 krp
->krp_status
= ENOMEM
;
1132 * The PKE engine does not appear to zero the output buffer
1133 * appropriately, so we need to do it all here.
1135 memset(q
, 0, sizeof(*q
));
1138 INIT_LIST_HEAD(&q
->pkq_list
);
1140 if (ixp_copy_ibuf(&krp
->krp_param
[IXP_PARAM_BASE
], &q
->pkq_op
.modExpOpr
.M
,
1143 if (!rc
&& ixp_copy_ibuf(&krp
->krp_param
[IXP_PARAM_EXP
],
1144 &q
->pkq_op
.modExpOpr
.e
, q
->pkq_ibuf1
))
1146 if (!rc
&& ixp_copy_ibuf(&krp
->krp_param
[IXP_PARAM_MOD
],
1147 &q
->pkq_op
.modExpOpr
.N
, q
->pkq_ibuf2
))
1152 krp
->krp_status
= ERANGE
;
1156 q
->pkq_result
.pData
= q
->pkq_obuf
;
1157 q
->pkq_result
.dataLen
=
1158 (krp
->krp_param
[IXP_PARAM_RES
].crp_nbits
+ 31) / 32;
1160 spin_lock_irqsave(&ixp_pkq_lock
, flags
);
1161 list_add_tail(&q
->pkq_list
, &ixp_pkq
);
1162 spin_unlock_irqrestore(&ixp_pkq_lock
, flags
);
1165 ixp_kperform_cb(0, NULL
, 0, 0);
1175 #ifdef CONFIG_OCF_RANDOMHARVEST
1177 * We run the random number generator output through SHA so that it
1178 * is FIPS compliant.
1181 static volatile int sha_done
= 0;
1182 static unsigned char sha_digest
[20];
1185 ixp_hash_cb(UINT8
*digest
, IxCryptoAccStatus status
)
1187 dprintk("%s(%p, %d)\n", __FUNCTION__
, digest
, status
);
1188 if (sha_digest
!= digest
)
1189 printk("digest error\n");
1190 if (IX_CRYPTO_ACC_STATUS_SUCCESS
== status
)
1197 ixp_read_random(void *arg
, u_int32_t
*buf
, int maxwords
)
1199 IxCryptoAccStatus status
;
1202 dprintk("%s(%p, %d)\n", __FUNCTION__
, buf
, maxwords
);
1203 memset(buf
, 0, maxwords
* sizeof(*buf
));
1204 status
= ixCryptoAccPkePseudoRandomNumberGet(maxwords
, buf
);
1205 if (status
!= IX_CRYPTO_ACC_STATUS_SUCCESS
) {
1206 dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
1207 __FUNCTION__
, status
);
1212 * run the random data through SHA to make it look more random
1215 n
= sizeof(sha_digest
); /* process digest bytes at a time */
1218 for (i
= 0; i
< maxwords
; i
+= n
/ sizeof(*buf
)) {
1219 if ((maxwords
- i
) * sizeof(*buf
) < n
)
1220 n
= (maxwords
- i
) * sizeof(*buf
);
1222 status
= ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1
,
1223 (UINT8
*) &buf
[i
], n
, ixp_hash_cb
, sha_digest
);
1224 if (status
!= IX_CRYPTO_ACC_STATUS_SUCCESS
) {
1225 dprintk("ixCryptoAccPkeHashPerform failed %d\n", status
);
1231 dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done
);
1234 memcpy(&buf
[i
], sha_digest
, n
);
1235 rc
+= n
/ sizeof(*buf
);;
1240 #endif /* CONFIG_OCF_RANDOMHARVEST */
1242 #endif /* __ixp46X */
1247 * our driver startup and shutdown routines
1253 dprintk("%s(%p)\n", __FUNCTION__
, ixp_init
);
1255 if (ixp_init_crypto
&& ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS
)
1256 printk("ixCryptoAccInit failed, assuming already initialised!\n");
1258 qcache
= kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q
), 0,
1259 SLAB_HWCACHE_ALIGN
, NULL
1260 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1265 printk("failed to create Qcache\n");
1269 memset(&ixpdev
, 0, sizeof(ixpdev
));
1270 softc_device_init(&ixpdev
, "ixp4xx", 0, ixp_methods
);
1272 ixp_id
= crypto_get_driverid(softc_get_device(&ixpdev
),
1273 CRYPTOCAP_F_HARDWARE
);
1275 panic("IXP/OCF crypto device cannot initialize!");
1277 #define REGISTER(alg) \
1278 crypto_register(ixp_id,alg,0,0)
1280 REGISTER(CRYPTO_DES_CBC
);
1281 REGISTER(CRYPTO_3DES_CBC
);
1282 REGISTER(CRYPTO_RIJNDAEL128_CBC
);
1283 #ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
1284 REGISTER(CRYPTO_MD5
);
1285 REGISTER(CRYPTO_SHA1
);
1287 REGISTER(CRYPTO_MD5_HMAC
);
1288 REGISTER(CRYPTO_SHA1_HMAC
);
1292 spin_lock_init(&ixp_pkq_lock
);
1294 * we do not enable the go fast options here as they can potentially
1295 * allow timing based attacks
1297 * http://www.openssl.org/news/secadv_20030219.txt
1299 ixCryptoAccPkeEauExpConfig(0, 0);
1300 crypto_kregister(ixp_id
, CRK_MOD_EXP
, 0);
1301 #ifdef CONFIG_OCF_RANDOMHARVEST
1302 crypto_rregister(ixp_id
, ixp_read_random
, NULL
);
1312 dprintk("%s()\n", __FUNCTION__
);
1313 crypto_unregister_all(ixp_id
);
1315 kmem_cache_destroy(qcache
);
1319 module_init(ixp_init
);
1320 module_exit(ixp_exit
);
1322 MODULE_LICENSE("Dual BSD/GPL");
1323 MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
1324 MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");