ixp4xx: sync 2.6.32 kernel config
[openwrt.git] / target / linux / generic / files / crypto / ocf / ixp4xx / ixp4xx.c
1 /*
2 * An OCF module that uses Intels IXP CryptACC API to do the crypto.
3 * This driver requires the IXP400 Access Library that is available
4 * from Intel in order to operate (or compile).
5 *
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2006-2010 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
9 *
10 * LICENSE TERMS
11 *
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
14 *
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
17 *
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
21 *
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
24 *
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
28 *
29 * DISCLAIMER
30 *
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 */
35
36 #ifndef AUTOCONF_INCLUDED
37 #include <linux/config.h>
38 #endif
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/list.h>
42 #include <linux/slab.h>
43 #include <linux/sched.h>
44 #include <linux/wait.h>
45 #include <linux/crypto.h>
46 #include <linux/interrupt.h>
47 #include <asm/scatterlist.h>
48
49 #include <IxTypes.h>
50 #include <IxOsBuffMgt.h>
51 #include <IxNpeDl.h>
52 #include <IxCryptoAcc.h>
53 #include <IxQMgr.h>
54 #include <IxOsServices.h>
55 #include <IxOsCacheMMU.h>
56
57 #include <cryptodev.h>
58 #include <uio.h>
59
60 #ifndef IX_MBUF_PRIV
61 #define IX_MBUF_PRIV(x) ((x)->priv)
62 #endif
63
64 struct ixp_data;
65
66 struct ixp_q {
67 struct list_head ixp_q_list;
68 struct ixp_data *ixp_q_data;
69 struct cryptop *ixp_q_crp;
70 struct cryptodesc *ixp_q_ccrd;
71 struct cryptodesc *ixp_q_acrd;
72 IX_MBUF ixp_q_mbuf;
73 UINT8 *ixp_hash_dest; /* Location for hash in client buffer */
74 UINT8 *ixp_hash_src; /* Location of hash in internal buffer */
75 unsigned char ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
76 unsigned char *ixp_q_iv;
77 };
78
79 struct ixp_data {
80 int ixp_registered; /* is the context registered */
81 int ixp_crd_flags; /* detect direction changes */
82
83 int ixp_cipher_alg;
84 int ixp_auth_alg;
85
86 UINT32 ixp_ctx_id;
87 UINT32 ixp_hash_key_id; /* used when hashing */
88 IxCryptoAccCtx ixp_ctx;
89 IX_MBUF ixp_pri_mbuf;
90 IX_MBUF ixp_sec_mbuf;
91
92 struct work_struct ixp_pending_work;
93 struct work_struct ixp_registration_work;
94 struct list_head ixp_q; /* unprocessed requests */
95 };
96
97 #ifdef __ixp46X
98
99 #define MAX_IOP_SIZE 64 /* words */
100 #define MAX_OOP_SIZE 128
101
102 #define MAX_PARAMS 3
103
104 struct ixp_pkq {
105 struct list_head pkq_list;
106 struct cryptkop *pkq_krp;
107
108 IxCryptoAccPkeEauInOperands pkq_op;
109 IxCryptoAccPkeEauOpResult pkq_result;
110
111 UINT32 pkq_ibuf0[MAX_IOP_SIZE];
112 UINT32 pkq_ibuf1[MAX_IOP_SIZE];
113 UINT32 pkq_ibuf2[MAX_IOP_SIZE];
114 UINT32 pkq_obuf[MAX_OOP_SIZE];
115 };
116
117 static LIST_HEAD(ixp_pkq); /* current PK wait list */
118 static struct ixp_pkq *ixp_pk_cur;
119 static spinlock_t ixp_pkq_lock;
120
121 #endif /* __ixp46X */
122
123 static int ixp_blocked = 0;
124
125 static int32_t ixp_id = -1;
126 static struct ixp_data **ixp_sessions = NULL;
127 static u_int32_t ixp_sesnum = 0;
128
129 static int ixp_process(device_t, struct cryptop *, int);
130 static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
131 static int ixp_freesession(device_t, u_int64_t);
132 #ifdef __ixp46X
133 static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
134 #endif
135
136 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
137 static kmem_cache_t *qcache;
138 #else
139 static struct kmem_cache *qcache;
140 #endif
141
142 #define debug ixp_debug
143 static int ixp_debug = 0;
144 module_param(ixp_debug, int, 0644);
145 MODULE_PARM_DESC(ixp_debug, "Enable debug");
146
147 static int ixp_init_crypto = 1;
148 module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
149 MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
150
151 static void ixp_process_pending(void *arg);
152 static void ixp_registration(void *arg);
153 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
154 static void ixp_process_pending_wq(struct work_struct *work);
155 static void ixp_registration_wq(struct work_struct *work);
156 #endif
157
158 /*
159 * dummy device structure
160 */
161
162 static struct {
163 softc_device_decl sc_dev;
164 } ixpdev;
165
166 static device_method_t ixp_methods = {
167 /* crypto device methods */
168 DEVMETHOD(cryptodev_newsession, ixp_newsession),
169 DEVMETHOD(cryptodev_freesession,ixp_freesession),
170 DEVMETHOD(cryptodev_process, ixp_process),
171 #ifdef __ixp46X
172 DEVMETHOD(cryptodev_kprocess, ixp_kprocess),
173 #endif
174 };
175
176 /*
177 * Generate a new software session.
178 */
179 static int
180 ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
181 {
182 struct ixp_data *ixp;
183 u_int32_t i;
184 #define AUTH_LEN(cri, def) \
185 (cri->cri_mlen ? cri->cri_mlen : (def))
186
187 dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
188 if (sid == NULL || cri == NULL) {
189 dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
190 return EINVAL;
191 }
192
193 if (ixp_sessions) {
194 for (i = 1; i < ixp_sesnum; i++)
195 if (ixp_sessions[i] == NULL)
196 break;
197 } else
198 i = 1; /* NB: to silence compiler warning */
199
200 if (ixp_sessions == NULL || i == ixp_sesnum) {
201 struct ixp_data **ixpd;
202
203 if (ixp_sessions == NULL) {
204 i = 1; /* We leave ixp_sessions[0] empty */
205 ixp_sesnum = CRYPTO_SW_SESSIONS;
206 } else
207 ixp_sesnum *= 2;
208
209 ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
210 if (ixpd == NULL) {
211 /* Reset session number */
212 if (ixp_sesnum == CRYPTO_SW_SESSIONS)
213 ixp_sesnum = 0;
214 else
215 ixp_sesnum /= 2;
216 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
217 return ENOBUFS;
218 }
219 memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
220
221 /* Copy existing sessions */
222 if (ixp_sessions) {
223 memcpy(ixpd, ixp_sessions,
224 (ixp_sesnum / 2) * sizeof(struct ixp_data *));
225 kfree(ixp_sessions);
226 }
227
228 ixp_sessions = ixpd;
229 }
230
231 ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
232 SLAB_ATOMIC);
233 if (ixp_sessions[i] == NULL) {
234 ixp_freesession(NULL, i);
235 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
236 return ENOBUFS;
237 }
238
239 *sid = i;
240
241 ixp = ixp_sessions[i];
242 memset(ixp, 0, sizeof(*ixp));
243
244 ixp->ixp_cipher_alg = -1;
245 ixp->ixp_auth_alg = -1;
246 ixp->ixp_ctx_id = -1;
247 INIT_LIST_HEAD(&ixp->ixp_q);
248
249 ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
250
251 while (cri) {
252 switch (cri->cri_alg) {
253 case CRYPTO_DES_CBC:
254 ixp->ixp_cipher_alg = cri->cri_alg;
255 ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
256 ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
257 ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
258 ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
259 ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
260 IX_CRYPTO_ACC_DES_IV_64;
261 memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
262 cri->cri_key, (cri->cri_klen + 7) / 8);
263 break;
264
265 case CRYPTO_3DES_CBC:
266 ixp->ixp_cipher_alg = cri->cri_alg;
267 ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
268 ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
269 ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
270 ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
271 ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
272 IX_CRYPTO_ACC_DES_IV_64;
273 memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
274 cri->cri_key, (cri->cri_klen + 7) / 8);
275 break;
276
277 case CRYPTO_RIJNDAEL128_CBC:
278 ixp->ixp_cipher_alg = cri->cri_alg;
279 ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
280 ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
281 ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
282 ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
283 ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
284 memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
285 cri->cri_key, (cri->cri_klen + 7) / 8);
286 break;
287
288 case CRYPTO_MD5:
289 case CRYPTO_MD5_HMAC:
290 ixp->ixp_auth_alg = cri->cri_alg;
291 ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
292 ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
293 ixp->ixp_ctx.authCtx.aadLen = 0;
294 /* Only MD5_HMAC needs a key */
295 if (cri->cri_alg == CRYPTO_MD5_HMAC) {
296 ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
297 if (ixp->ixp_ctx.authCtx.authKeyLen >
298 sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
299 printk(
300 "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
301 cri->cri_klen);
302 ixp_freesession(NULL, i);
303 return EINVAL;
304 }
305 memcpy(ixp->ixp_ctx.authCtx.key.authKey,
306 cri->cri_key, (cri->cri_klen + 7) / 8);
307 }
308 break;
309
310 case CRYPTO_SHA1:
311 case CRYPTO_SHA1_HMAC:
312 ixp->ixp_auth_alg = cri->cri_alg;
313 ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
314 ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
315 ixp->ixp_ctx.authCtx.aadLen = 0;
316 /* Only SHA1_HMAC needs a key */
317 if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
318 ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
319 if (ixp->ixp_ctx.authCtx.authKeyLen >
320 sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
321 printk(
322 "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
323 cri->cri_klen);
324 ixp_freesession(NULL, i);
325 return EINVAL;
326 }
327 memcpy(ixp->ixp_ctx.authCtx.key.authKey,
328 cri->cri_key, (cri->cri_klen + 7) / 8);
329 }
330 break;
331
332 default:
333 printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
334 ixp_freesession(NULL, i);
335 return EINVAL;
336 }
337 cri = cri->cri_next;
338 }
339
340 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
341 INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
342 INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
343 #else
344 INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
345 INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
346 #endif
347
348 return 0;
349 }
350
351
352 /*
353 * Free a session.
354 */
355 static int
356 ixp_freesession(device_t dev, u_int64_t tid)
357 {
358 u_int32_t sid = CRYPTO_SESID2LID(tid);
359
360 dprintk("%s()\n", __FUNCTION__);
361 if (sid > ixp_sesnum || ixp_sessions == NULL ||
362 ixp_sessions[sid] == NULL) {
363 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
364 return EINVAL;
365 }
366
367 /* Silently accept and return */
368 if (sid == 0)
369 return 0;
370
371 if (ixp_sessions[sid]) {
372 if (ixp_sessions[sid]->ixp_ctx_id != -1) {
373 ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
374 ixp_sessions[sid]->ixp_ctx_id = -1;
375 }
376 kfree(ixp_sessions[sid]);
377 }
378 ixp_sessions[sid] = NULL;
379 if (ixp_blocked) {
380 ixp_blocked = 0;
381 crypto_unblock(ixp_id, CRYPTO_SYMQ);
382 }
383 return 0;
384 }
385
386
387 /*
388 * callback for when hash processing is complete
389 */
390
391 static void
392 ixp_hash_perform_cb(
393 UINT32 hash_key_id,
394 IX_MBUF *bufp,
395 IxCryptoAccStatus status)
396 {
397 struct ixp_q *q;
398
399 dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
400
401 if (bufp == NULL) {
402 printk("ixp: NULL buf in %s\n", __FUNCTION__);
403 return;
404 }
405
406 q = IX_MBUF_PRIV(bufp);
407 if (q == NULL) {
408 printk("ixp: NULL priv in %s\n", __FUNCTION__);
409 return;
410 }
411
412 if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
413 /* On success, need to copy hash back into original client buffer */
414 memcpy(q->ixp_hash_dest, q->ixp_hash_src,
415 (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
416 SHA1_HASH_LEN : MD5_HASH_LEN);
417 }
418 else {
419 printk("ixp: hash perform failed status=%d\n", status);
420 q->ixp_q_crp->crp_etype = EINVAL;
421 }
422
423 /* Free internal buffer used for hashing */
424 kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
425
426 crypto_done(q->ixp_q_crp);
427 kmem_cache_free(qcache, q);
428 }
429
430 /*
431 * setup a request and perform it
432 */
433 static void
434 ixp_q_process(struct ixp_q *q)
435 {
436 IxCryptoAccStatus status;
437 struct ixp_data *ixp = q->ixp_q_data;
438 int auth_off = 0;
439 int auth_len = 0;
440 int crypt_off = 0;
441 int crypt_len = 0;
442 int icv_off = 0;
443 char *crypt_func;
444
445 dprintk("%s(%p)\n", __FUNCTION__, q);
446
447 if (q->ixp_q_ccrd) {
448 if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
449 q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
450 } else {
451 q->ixp_q_iv = q->ixp_q_iv_data;
452 crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
453 q->ixp_q_ccrd->crd_inject,
454 ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
455 (caddr_t) q->ixp_q_iv);
456 }
457
458 if (q->ixp_q_acrd) {
459 auth_off = q->ixp_q_acrd->crd_skip;
460 auth_len = q->ixp_q_acrd->crd_len;
461 icv_off = q->ixp_q_acrd->crd_inject;
462 }
463
464 crypt_off = q->ixp_q_ccrd->crd_skip;
465 crypt_len = q->ixp_q_ccrd->crd_len;
466 } else { /* if (q->ixp_q_acrd) */
467 auth_off = q->ixp_q_acrd->crd_skip;
468 auth_len = q->ixp_q_acrd->crd_len;
469 icv_off = q->ixp_q_acrd->crd_inject;
470 }
471
472 if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
473 struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
474 if (skb_shinfo(skb)->nr_frags) {
475 /*
476 * DAVIDM fix this limitation one day by using
477 * a buffer pool and chaining, it is not currently
478 * needed for current user/kernel space acceleration
479 */
480 printk("ixp: Cannot handle fragmented skb's yet !\n");
481 q->ixp_q_crp->crp_etype = ENOENT;
482 goto done;
483 }
484 IX_MBUF_MLEN(&q->ixp_q_mbuf) =
485 IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = skb->len;
486 IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
487 } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
488 struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
489 if (uiop->uio_iovcnt != 1) {
490 /*
491 * DAVIDM fix this limitation one day by using
492 * a buffer pool and chaining, it is not currently
493 * needed for current user/kernel space acceleration
494 */
495 printk("ixp: Cannot handle more than 1 iovec yet !\n");
496 q->ixp_q_crp->crp_etype = ENOENT;
497 goto done;
498 }
499 IX_MBUF_MLEN(&q->ixp_q_mbuf) =
500 IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
501 IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
502 } else /* contig buffer */ {
503 IX_MBUF_MLEN(&q->ixp_q_mbuf) =
504 IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
505 IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
506 }
507
508 IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
509
510 if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
511 /*
512 * For SHA1 and MD5 hash, need to create an internal buffer that is big
513 * enough to hold the original data + the appropriate padding for the
514 * hash algorithm.
515 */
516 UINT8 *tbuf = NULL;
517
518 IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
519 ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
520 tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
521
522 if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
523 printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
524 IX_MBUF_MLEN(&q->ixp_q_mbuf));
525 q->ixp_q_crp->crp_etype = ENOMEM;
526 goto done;
527 }
528 memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
529
530 /* Set location in client buffer to copy hash into */
531 q->ixp_hash_dest =
532 &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
533
534 IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
535
536 /* Set location in internal buffer for where hash starts */
537 q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
538
539 crypt_func = "ixCryptoAccHashPerform";
540 status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
541 &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
542 &ixp->ixp_hash_key_id);
543 }
544 else {
545 crypt_func = "ixCryptoAccAuthCryptPerform";
546 status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
547 NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
548 q->ixp_q_iv);
549 }
550
551 if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
552 return;
553
554 if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
555 q->ixp_q_crp->crp_etype = ENOMEM;
556 goto done;
557 }
558
559 printk("ixp: %s failed %u\n", crypt_func, status);
560 q->ixp_q_crp->crp_etype = EINVAL;
561
562 done:
563 crypto_done(q->ixp_q_crp);
564 kmem_cache_free(qcache, q);
565 }
566
567
568 /*
569 * because we cannot process the Q from the Register callback
570 * we do it here on a task Q.
571 */
572
573 static void
574 ixp_process_pending(void *arg)
575 {
576 struct ixp_data *ixp = arg;
577 struct ixp_q *q = NULL;
578
579 dprintk("%s(%p)\n", __FUNCTION__, arg);
580
581 if (!ixp)
582 return;
583
584 while (!list_empty(&ixp->ixp_q)) {
585 q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
586 list_del(&q->ixp_q_list);
587 ixp_q_process(q);
588 }
589 }
590
591 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
592 static void
593 ixp_process_pending_wq(struct work_struct *work)
594 {
595 struct ixp_data *ixp = container_of(work, struct ixp_data, ixp_pending_work);
596 ixp_process_pending(ixp);
597 }
598 #endif
599
600 /*
601 * callback for when context registration is complete
602 */
603
604 static void
605 ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
606 {
607 int i;
608 struct ixp_data *ixp;
609 struct ixp_q *q;
610
611 dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
612
613 /*
614 * free any buffer passed in to this routine
615 */
616 if (bufp) {
617 IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
618 kfree(IX_MBUF_MDATA(bufp));
619 IX_MBUF_MDATA(bufp) = NULL;
620 }
621
622 for (i = 0; i < ixp_sesnum; i++) {
623 ixp = ixp_sessions[i];
624 if (ixp && ixp->ixp_ctx_id == ctx_id)
625 break;
626 }
627 if (i >= ixp_sesnum) {
628 printk("ixp: invalid context id %d\n", ctx_id);
629 return;
630 }
631
632 if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
633 /* this is normal to free the first of two buffers */
634 dprintk("ixp: register not finished yet.\n");
635 return;
636 }
637
638 if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
639 printk("ixp: register failed 0x%x\n", status);
640 while (!list_empty(&ixp->ixp_q)) {
641 q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
642 list_del(&q->ixp_q_list);
643 q->ixp_q_crp->crp_etype = EINVAL;
644 crypto_done(q->ixp_q_crp);
645 kmem_cache_free(qcache, q);
646 }
647 return;
648 }
649
650 /*
651 * we are now registered, we cannot start processing the Q here
652 * or we get strange errors with AES (DES/3DES seem to be ok).
653 */
654 ixp->ixp_registered = 1;
655 schedule_work(&ixp->ixp_pending_work);
656 }
657
658
659 /*
660 * callback for when data processing is complete
661 */
662
663 static void
664 ixp_perform_cb(
665 UINT32 ctx_id,
666 IX_MBUF *sbufp,
667 IX_MBUF *dbufp,
668 IxCryptoAccStatus status)
669 {
670 struct ixp_q *q;
671
672 dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
673 dbufp, status);
674
675 if (sbufp == NULL) {
676 printk("ixp: NULL sbuf in ixp_perform_cb\n");
677 return;
678 }
679
680 q = IX_MBUF_PRIV(sbufp);
681 if (q == NULL) {
682 printk("ixp: NULL priv in ixp_perform_cb\n");
683 return;
684 }
685
686 if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
687 printk("ixp: perform failed status=%d\n", status);
688 q->ixp_q_crp->crp_etype = EINVAL;
689 }
690
691 crypto_done(q->ixp_q_crp);
692 kmem_cache_free(qcache, q);
693 }
694
695
696 /*
697 * registration is not callable at IRQ time, so we defer
698 * to a task queue, this routines completes the registration for us
699 * when the task queue runs
700 *
701 * Unfortunately this means we cannot tell OCF that the driver is blocked,
702 * we do that on the next request.
703 */
704
705 static void
706 ixp_registration(void *arg)
707 {
708 struct ixp_data *ixp = arg;
709 struct ixp_q *q = NULL;
710 IX_MBUF *pri = NULL, *sec = NULL;
711 int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
712
713 if (!ixp) {
714 printk("ixp: ixp_registration with no arg\n");
715 return;
716 }
717
718 if (ixp->ixp_ctx_id != -1) {
719 ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
720 ixp->ixp_ctx_id = -1;
721 }
722
723 if (list_empty(&ixp->ixp_q)) {
724 printk("ixp: ixp_registration with no Q\n");
725 return;
726 }
727
728 /*
729 * setup the primary and secondary buffers
730 */
731 q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
732 if (q->ixp_q_acrd) {
733 pri = &ixp->ixp_pri_mbuf;
734 sec = &ixp->ixp_sec_mbuf;
735 IX_MBUF_MLEN(pri) = IX_MBUF_PKT_LEN(pri) = 128;
736 IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
737 IX_MBUF_MLEN(sec) = IX_MBUF_PKT_LEN(sec) = 128;
738 IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
739 }
740
741 /* Only need to register if a crypt op or HMAC op */
742 if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
743 ixp->ixp_auth_alg == CRYPTO_MD5)) {
744 status = ixCryptoAccCtxRegister(
745 &ixp->ixp_ctx,
746 pri, sec,
747 ixp_register_cb,
748 ixp_perform_cb,
749 &ixp->ixp_ctx_id);
750 }
751 else {
752 /* Otherwise we start processing pending q */
753 schedule_work(&ixp->ixp_pending_work);
754 }
755
756 if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
757 return;
758
759 if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
760 printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
761 ixp_blocked = 1;
762 /* perhaps we should return EGAIN on queued ops ? */
763 return;
764 }
765
766 printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
767 ixp->ixp_ctx_id = -1;
768
769 /*
770 * everything waiting is toasted
771 */
772 while (!list_empty(&ixp->ixp_q)) {
773 q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
774 list_del(&q->ixp_q_list);
775 q->ixp_q_crp->crp_etype = ENOENT;
776 crypto_done(q->ixp_q_crp);
777 kmem_cache_free(qcache, q);
778 }
779 }
780
781 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
782 static void
783 ixp_registration_wq(struct work_struct *work)
784 {
785 struct ixp_data *ixp = container_of(work, struct ixp_data,
786 ixp_registration_work);
787 ixp_registration(ixp);
788 }
789 #endif
790
791 /*
792 * Process a request.
793 */
794 static int
795 ixp_process(device_t dev, struct cryptop *crp, int hint)
796 {
797 struct ixp_data *ixp;
798 unsigned int lid;
799 struct ixp_q *q = NULL;
800 int status;
801
802 dprintk("%s()\n", __FUNCTION__);
803
804 /* Sanity check */
805 if (crp == NULL) {
806 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
807 return EINVAL;
808 }
809
810 crp->crp_etype = 0;
811
812 if (ixp_blocked)
813 return ERESTART;
814
815 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
816 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
817 crp->crp_etype = EINVAL;
818 goto done;
819 }
820
821 /*
822 * find the session we are using
823 */
824
825 lid = crp->crp_sid & 0xffffffff;
826 if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
827 ixp_sessions[lid] == NULL) {
828 crp->crp_etype = ENOENT;
829 dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
830 goto done;
831 }
832 ixp = ixp_sessions[lid];
833
834 /*
835 * setup a new request ready for queuing
836 */
837 q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
838 if (q == NULL) {
839 dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
840 crp->crp_etype = ENOMEM;
841 goto done;
842 }
843 /*
844 * save some cycles by only zeroing the important bits
845 */
846 memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
847 q->ixp_q_ccrd = NULL;
848 q->ixp_q_acrd = NULL;
849 q->ixp_q_crp = crp;
850 q->ixp_q_data = ixp;
851
852 /*
853 * point the cipher and auth descriptors appropriately
854 * check that we have something to do
855 */
856 if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
857 q->ixp_q_ccrd = crp->crp_desc;
858 else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
859 q->ixp_q_acrd = crp->crp_desc;
860 else {
861 crp->crp_etype = ENOENT;
862 dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
863 goto done;
864 }
865 if (crp->crp_desc->crd_next) {
866 if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
867 q->ixp_q_ccrd = crp->crp_desc->crd_next;
868 else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
869 q->ixp_q_acrd = crp->crp_desc->crd_next;
870 else {
871 crp->crp_etype = ENOENT;
872 dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
873 goto done;
874 }
875 }
876
877 /*
878 * If there is a direction change for this context then we mark it as
879 * unregistered and re-register is for the new direction. This is not
880 * a very expensive operation and currently only tends to happen when
881 * user-space application are doing benchmarks
882 *
883 * DM - we should be checking for pending requests before unregistering.
884 */
885 if (q->ixp_q_ccrd && ixp->ixp_registered &&
886 ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
887 dprintk("%s - detected direction change on session\n", __FUNCTION__);
888 ixp->ixp_registered = 0;
889 }
890
891 /*
892 * if we are registered, call straight into the perform code
893 */
894 if (ixp->ixp_registered) {
895 ixp_q_process(q);
896 return 0;
897 }
898
899 /*
900 * the only part of the context not set in newsession is the direction
901 * dependent parts
902 */
903 if (q->ixp_q_ccrd) {
904 ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
905 if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
906 ixp->ixp_ctx.operation = q->ixp_q_acrd ?
907 IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
908 } else {
909 ixp->ixp_ctx.operation = q->ixp_q_acrd ?
910 IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
911 }
912 } else {
913 /* q->ixp_q_acrd must be set if we are here */
914 ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
915 }
916
917 status = list_empty(&ixp->ixp_q);
918 list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
919 if (status)
920 schedule_work(&ixp->ixp_registration_work);
921 return 0;
922
923 done:
924 if (q)
925 kmem_cache_free(qcache, q);
926 crypto_done(crp);
927 return 0;
928 }
929
930
931 #ifdef __ixp46X
932 /*
933 * key processing support for the ixp465
934 */
935
936
937 /*
938 * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
939 * assume zeroed and only copy bits that are significant
940 */
941
942 static int
943 ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
944 {
945 unsigned char *src = (unsigned char *) p->crp_p;
946 unsigned char *dst;
947 int len, bits = p->crp_nbits;
948
949 dprintk("%s()\n", __FUNCTION__);
950
951 if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
952 dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
953 bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
954 return -1;
955 }
956
957 len = (bits + 31) / 32; /* the number UINT32's needed */
958
959 dst = (unsigned char *) &buf[len];
960 dst--;
961
962 while (bits > 0) {
963 *dst-- = *src++;
964 bits -= 8;
965 }
966
967 #if 0 /* no need to zero remaining bits as it is done during request alloc */
968 while (dst > (unsigned char *) buf)
969 *dst-- = '\0';
970 #endif
971
972 op->pData = buf;
973 op->dataLen = len;
974 return 0;
975 }
976
977 /*
978 * copy out the result, be as forgiving as we can about small output buffers
979 */
980
981 static int
982 ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
983 {
984 unsigned char *dst = (unsigned char *) p->crp_p;
985 unsigned char *src = (unsigned char *) buf;
986 int len, z, bits = p->crp_nbits;
987
988 dprintk("%s()\n", __FUNCTION__);
989
990 len = op->dataLen * sizeof(UINT32);
991
992 /* skip leading zeroes to be small buffer friendly */
993 z = 0;
994 while (z < len && src[z] == '\0')
995 z++;
996
997 src += len;
998 src--;
999 len -= z;
1000
1001 while (len > 0 && bits > 0) {
1002 *dst++ = *src--;
1003 len--;
1004 bits -= 8;
1005 }
1006
1007 while (bits > 0) {
1008 *dst++ = '\0';
1009 bits -= 8;
1010 }
1011
1012 if (len > 0) {
1013 dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
1014 __FUNCTION__, len, z, p->crp_nbits / 8);
1015 return -1;
1016 }
1017
1018 return 0;
1019 }
1020
1021
1022 /*
1023 * the parameter offsets for exp_mod
1024 */
1025
1026 #define IXP_PARAM_BASE 0
1027 #define IXP_PARAM_EXP 1
1028 #define IXP_PARAM_MOD 2
1029 #define IXP_PARAM_RES 3
1030
1031 /*
1032 * key processing complete callback, is also used to start processing
1033 * by passing a NULL for pResult
1034 */
1035
1036 static void
1037 ixp_kperform_cb(
1038 IxCryptoAccPkeEauOperation operation,
1039 IxCryptoAccPkeEauOpResult *pResult,
1040 BOOL carryOrBorrow,
1041 IxCryptoAccStatus status)
1042 {
1043 struct ixp_pkq *q, *tmp;
1044 unsigned long flags;
1045
1046 dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
1047 carryOrBorrow, status);
1048
1049 /* handle a completed request */
1050 if (pResult) {
1051 if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
1052 q = ixp_pk_cur;
1053 if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1054 dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
1055 q->pkq_krp->krp_status = ERANGE; /* could do better */
1056 } else {
1057 /* copy out the result */
1058 if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
1059 &q->pkq_result, q->pkq_obuf))
1060 q->pkq_krp->krp_status = ERANGE;
1061 }
1062 crypto_kdone(q->pkq_krp);
1063 kfree(q);
1064 ixp_pk_cur = NULL;
1065 } else
1066 printk("%s - callback with invalid result pointer\n", __FUNCTION__);
1067 }
1068
1069 spin_lock_irqsave(&ixp_pkq_lock, flags);
1070 if (ixp_pk_cur || list_empty(&ixp_pkq)) {
1071 spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1072 return;
1073 }
1074
1075 list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
1076
1077 list_del(&q->pkq_list);
1078 ixp_pk_cur = q;
1079
1080 spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1081
1082 status = ixCryptoAccPkeEauPerform(
1083 IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
1084 &q->pkq_op,
1085 ixp_kperform_cb,
1086 &q->pkq_result);
1087
1088 if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
1089 dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
1090 return; /* callback will return here for callback */
1091 } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
1092 printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
1093 } else {
1094 printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
1095 __FUNCTION__, status);
1096 }
1097 q->pkq_krp->krp_status = ERANGE; /* could do better */
1098 crypto_kdone(q->pkq_krp);
1099 kfree(q);
1100 spin_lock_irqsave(&ixp_pkq_lock, flags);
1101 }
1102 spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1103 }
1104
1105
1106 static int
1107 ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
1108 {
1109 struct ixp_pkq *q;
1110 int rc = 0;
1111 unsigned long flags;
1112
1113 dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
1114 krp->krp_param[IXP_PARAM_BASE].crp_nbits,
1115 krp->krp_param[IXP_PARAM_EXP].crp_nbits,
1116 krp->krp_param[IXP_PARAM_MOD].crp_nbits,
1117 krp->krp_param[IXP_PARAM_RES].crp_nbits);
1118
1119
1120 if (krp->krp_op != CRK_MOD_EXP) {
1121 krp->krp_status = EOPNOTSUPP;
1122 goto err;
1123 }
1124
1125 q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
1126 if (q == NULL) {
1127 krp->krp_status = ENOMEM;
1128 goto err;
1129 }
1130
1131 /*
1132 * The PKE engine does not appear to zero the output buffer
1133 * appropriately, so we need to do it all here.
1134 */
1135 memset(q, 0, sizeof(*q));
1136
1137 q->pkq_krp = krp;
1138 INIT_LIST_HEAD(&q->pkq_list);
1139
1140 if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
1141 q->pkq_ibuf0))
1142 rc = 1;
1143 if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
1144 &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
1145 rc = 2;
1146 if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
1147 &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
1148 rc = 3;
1149
1150 if (rc) {
1151 kfree(q);
1152 krp->krp_status = ERANGE;
1153 goto err;
1154 }
1155
1156 q->pkq_result.pData = q->pkq_obuf;
1157 q->pkq_result.dataLen =
1158 (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
1159
1160 spin_lock_irqsave(&ixp_pkq_lock, flags);
1161 list_add_tail(&q->pkq_list, &ixp_pkq);
1162 spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1163
1164 if (!ixp_pk_cur)
1165 ixp_kperform_cb(0, NULL, 0, 0);
1166 return (0);
1167
1168 err:
1169 crypto_kdone(krp);
1170 return (0);
1171 }
1172
1173
1174
1175 #ifdef CONFIG_OCF_RANDOMHARVEST
1176 /*
1177 * We run the random number generator output through SHA so that it
1178 * is FIPS compliant.
1179 */
1180
1181 static volatile int sha_done = 0;
1182 static unsigned char sha_digest[20];
1183
1184 static void
1185 ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
1186 {
1187 dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
1188 if (sha_digest != digest)
1189 printk("digest error\n");
1190 if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
1191 sha_done = 1;
1192 else
1193 sha_done = -status;
1194 }
1195
1196 static int
1197 ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
1198 {
1199 IxCryptoAccStatus status;
1200 int i, n, rc;
1201
1202 dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
1203 memset(buf, 0, maxwords * sizeof(*buf));
1204 status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
1205 if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1206 dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
1207 __FUNCTION__, status);
1208 return 0;
1209 }
1210
1211 /*
1212 * run the random data through SHA to make it look more random
1213 */
1214
1215 n = sizeof(sha_digest); /* process digest bytes at a time */
1216
1217 rc = 0;
1218 for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
1219 if ((maxwords - i) * sizeof(*buf) < n)
1220 n = (maxwords - i) * sizeof(*buf);
1221 sha_done = 0;
1222 status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
1223 (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
1224 if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1225 dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
1226 return -EIO;
1227 }
1228 while (!sha_done)
1229 schedule();
1230 if (sha_done < 0) {
1231 dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
1232 return 0;
1233 }
1234 memcpy(&buf[i], sha_digest, n);
1235 rc += n / sizeof(*buf);;
1236 }
1237
1238 return rc;
1239 }
1240 #endif /* CONFIG_OCF_RANDOMHARVEST */
1241
1242 #endif /* __ixp46X */
1243
1244
1245
1246 /*
1247 * our driver startup and shutdown routines
1248 */
1249
1250 static int
1251 ixp_init(void)
1252 {
1253 dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
1254
1255 if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
1256 printk("ixCryptoAccInit failed, assuming already initialised!\n");
1257
1258 qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
1259 SLAB_HWCACHE_ALIGN, NULL
1260 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1261 , NULL
1262 #endif
1263 );
1264 if (!qcache) {
1265 printk("failed to create Qcache\n");
1266 return -ENOENT;
1267 }
1268
1269 memset(&ixpdev, 0, sizeof(ixpdev));
1270 softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
1271
1272 ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
1273 CRYPTOCAP_F_HARDWARE);
1274 if (ixp_id < 0)
1275 panic("IXP/OCF crypto device cannot initialize!");
1276
1277 #define REGISTER(alg) \
1278 crypto_register(ixp_id,alg,0,0)
1279
1280 REGISTER(CRYPTO_DES_CBC);
1281 REGISTER(CRYPTO_3DES_CBC);
1282 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1283 #ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
1284 REGISTER(CRYPTO_MD5);
1285 REGISTER(CRYPTO_SHA1);
1286 #endif
1287 REGISTER(CRYPTO_MD5_HMAC);
1288 REGISTER(CRYPTO_SHA1_HMAC);
1289 #undef REGISTER
1290
1291 #ifdef __ixp46X
1292 spin_lock_init(&ixp_pkq_lock);
1293 /*
1294 * we do not enable the go fast options here as they can potentially
1295 * allow timing based attacks
1296 *
1297 * http://www.openssl.org/news/secadv_20030219.txt
1298 */
1299 ixCryptoAccPkeEauExpConfig(0, 0);
1300 crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
1301 #ifdef CONFIG_OCF_RANDOMHARVEST
1302 crypto_rregister(ixp_id, ixp_read_random, NULL);
1303 #endif
1304 #endif
1305
1306 return 0;
1307 }
1308
1309 static void
1310 ixp_exit(void)
1311 {
1312 dprintk("%s()\n", __FUNCTION__);
1313 crypto_unregister_all(ixp_id);
1314 ixp_id = -1;
1315 kmem_cache_destroy(qcache);
1316 qcache = NULL;
1317 }
1318
1319 module_init(ixp_init);
1320 module_exit(ixp_exit);
1321
1322 MODULE_LICENSE("Dual BSD/GPL");
1323 MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
1324 MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
This page took 0.145341 seconds and 5 git commands to generate.