kernel: linux/version.h was removed in kernel 2.6.19 and is now replaced by generated...
[openwrt.git] / target / linux / generic / files / crypto / ocf / ixp4xx / ixp4xx.c
1 /*
2 * An OCF module that uses Intels IXP CryptACC API to do the crypto.
3 * This driver requires the IXP400 Access Library that is available
4 * from Intel in order to operate (or compile).
5 *
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2006-2010 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
9 *
10 * LICENSE TERMS
11 *
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
14 *
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
17 *
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
21 *
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
24 *
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
28 *
29 * DISCLAIMER
30 *
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 */
35
36 #include <linux/version.h>
37 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
38 #include <generated/autoconf.h>
39 #else
40 #include <linux/autoconf.h>
41 #endif
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/list.h>
45 #include <linux/slab.h>
46 #include <linux/sched.h>
47 #include <linux/wait.h>
48 #include <linux/crypto.h>
49 #include <linux/interrupt.h>
50 #include <asm/scatterlist.h>
51
52 #include <IxTypes.h>
53 #include <IxOsBuffMgt.h>
54 #include <IxNpeDl.h>
55 #include <IxCryptoAcc.h>
56 #include <IxQMgr.h>
57 #include <IxOsServices.h>
58 #include <IxOsCacheMMU.h>
59
60 #include <cryptodev.h>
61 #include <uio.h>
62
63 #ifndef IX_MBUF_PRIV
64 #define IX_MBUF_PRIV(x) ((x)->priv)
65 #endif
66
67 struct ixp_data;
68
69 struct ixp_q {
70 struct list_head ixp_q_list;
71 struct ixp_data *ixp_q_data;
72 struct cryptop *ixp_q_crp;
73 struct cryptodesc *ixp_q_ccrd;
74 struct cryptodesc *ixp_q_acrd;
75 IX_MBUF ixp_q_mbuf;
76 UINT8 *ixp_hash_dest; /* Location for hash in client buffer */
77 UINT8 *ixp_hash_src; /* Location of hash in internal buffer */
78 unsigned char ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
79 unsigned char *ixp_q_iv;
80 };
81
82 struct ixp_data {
83 int ixp_registered; /* is the context registered */
84 int ixp_crd_flags; /* detect direction changes */
85
86 int ixp_cipher_alg;
87 int ixp_auth_alg;
88
89 UINT32 ixp_ctx_id;
90 UINT32 ixp_hash_key_id; /* used when hashing */
91 IxCryptoAccCtx ixp_ctx;
92 IX_MBUF ixp_pri_mbuf;
93 IX_MBUF ixp_sec_mbuf;
94
95 struct work_struct ixp_pending_work;
96 struct work_struct ixp_registration_work;
97 struct list_head ixp_q; /* unprocessed requests */
98 };
99
100 #ifdef __ixp46X
101
102 #define MAX_IOP_SIZE 64 /* words */
103 #define MAX_OOP_SIZE 128
104
105 #define MAX_PARAMS 3
106
107 struct ixp_pkq {
108 struct list_head pkq_list;
109 struct cryptkop *pkq_krp;
110
111 IxCryptoAccPkeEauInOperands pkq_op;
112 IxCryptoAccPkeEauOpResult pkq_result;
113
114 UINT32 pkq_ibuf0[MAX_IOP_SIZE];
115 UINT32 pkq_ibuf1[MAX_IOP_SIZE];
116 UINT32 pkq_ibuf2[MAX_IOP_SIZE];
117 UINT32 pkq_obuf[MAX_OOP_SIZE];
118 };
119
120 static LIST_HEAD(ixp_pkq); /* current PK wait list */
121 static struct ixp_pkq *ixp_pk_cur;
122 static spinlock_t ixp_pkq_lock;
123
124 #endif /* __ixp46X */
125
126 static int ixp_blocked = 0;
127
128 static int32_t ixp_id = -1;
129 static struct ixp_data **ixp_sessions = NULL;
130 static u_int32_t ixp_sesnum = 0;
131
132 static int ixp_process(device_t, struct cryptop *, int);
133 static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
134 static int ixp_freesession(device_t, u_int64_t);
135 #ifdef __ixp46X
136 static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
137 #endif
138
139 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
140 static kmem_cache_t *qcache;
141 #else
142 static struct kmem_cache *qcache;
143 #endif
144
145 #define debug ixp_debug
146 static int ixp_debug = 0;
147 module_param(ixp_debug, int, 0644);
148 MODULE_PARM_DESC(ixp_debug, "Enable debug");
149
150 static int ixp_init_crypto = 1;
151 module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
152 MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
153
154 static void ixp_process_pending(void *arg);
155 static void ixp_registration(void *arg);
156 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
157 static void ixp_process_pending_wq(struct work_struct *work);
158 static void ixp_registration_wq(struct work_struct *work);
159 #endif
160
161 /*
162 * dummy device structure
163 */
164
165 static struct {
166 softc_device_decl sc_dev;
167 } ixpdev;
168
169 static device_method_t ixp_methods = {
170 /* crypto device methods */
171 DEVMETHOD(cryptodev_newsession, ixp_newsession),
172 DEVMETHOD(cryptodev_freesession,ixp_freesession),
173 DEVMETHOD(cryptodev_process, ixp_process),
174 #ifdef __ixp46X
175 DEVMETHOD(cryptodev_kprocess, ixp_kprocess),
176 #endif
177 };
178
179 /*
180 * Generate a new software session.
181 */
182 static int
183 ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
184 {
185 struct ixp_data *ixp;
186 u_int32_t i;
187 #define AUTH_LEN(cri, def) \
188 (cri->cri_mlen ? cri->cri_mlen : (def))
189
190 dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
191 if (sid == NULL || cri == NULL) {
192 dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
193 return EINVAL;
194 }
195
196 if (ixp_sessions) {
197 for (i = 1; i < ixp_sesnum; i++)
198 if (ixp_sessions[i] == NULL)
199 break;
200 } else
201 i = 1; /* NB: to silence compiler warning */
202
203 if (ixp_sessions == NULL || i == ixp_sesnum) {
204 struct ixp_data **ixpd;
205
206 if (ixp_sessions == NULL) {
207 i = 1; /* We leave ixp_sessions[0] empty */
208 ixp_sesnum = CRYPTO_SW_SESSIONS;
209 } else
210 ixp_sesnum *= 2;
211
212 ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
213 if (ixpd == NULL) {
214 /* Reset session number */
215 if (ixp_sesnum == CRYPTO_SW_SESSIONS)
216 ixp_sesnum = 0;
217 else
218 ixp_sesnum /= 2;
219 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
220 return ENOBUFS;
221 }
222 memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
223
224 /* Copy existing sessions */
225 if (ixp_sessions) {
226 memcpy(ixpd, ixp_sessions,
227 (ixp_sesnum / 2) * sizeof(struct ixp_data *));
228 kfree(ixp_sessions);
229 }
230
231 ixp_sessions = ixpd;
232 }
233
234 ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
235 SLAB_ATOMIC);
236 if (ixp_sessions[i] == NULL) {
237 ixp_freesession(NULL, i);
238 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
239 return ENOBUFS;
240 }
241
242 *sid = i;
243
244 ixp = ixp_sessions[i];
245 memset(ixp, 0, sizeof(*ixp));
246
247 ixp->ixp_cipher_alg = -1;
248 ixp->ixp_auth_alg = -1;
249 ixp->ixp_ctx_id = -1;
250 INIT_LIST_HEAD(&ixp->ixp_q);
251
252 ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
253
254 while (cri) {
255 switch (cri->cri_alg) {
256 case CRYPTO_DES_CBC:
257 ixp->ixp_cipher_alg = cri->cri_alg;
258 ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
259 ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
260 ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
261 ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
262 ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
263 IX_CRYPTO_ACC_DES_IV_64;
264 memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
265 cri->cri_key, (cri->cri_klen + 7) / 8);
266 break;
267
268 case CRYPTO_3DES_CBC:
269 ixp->ixp_cipher_alg = cri->cri_alg;
270 ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
271 ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
272 ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
273 ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
274 ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
275 IX_CRYPTO_ACC_DES_IV_64;
276 memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
277 cri->cri_key, (cri->cri_klen + 7) / 8);
278 break;
279
280 case CRYPTO_RIJNDAEL128_CBC:
281 ixp->ixp_cipher_alg = cri->cri_alg;
282 ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
283 ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
284 ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
285 ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
286 ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
287 memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
288 cri->cri_key, (cri->cri_klen + 7) / 8);
289 break;
290
291 case CRYPTO_MD5:
292 case CRYPTO_MD5_HMAC:
293 ixp->ixp_auth_alg = cri->cri_alg;
294 ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
295 ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
296 ixp->ixp_ctx.authCtx.aadLen = 0;
297 /* Only MD5_HMAC needs a key */
298 if (cri->cri_alg == CRYPTO_MD5_HMAC) {
299 ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
300 if (ixp->ixp_ctx.authCtx.authKeyLen >
301 sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
302 printk(
303 "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
304 cri->cri_klen);
305 ixp_freesession(NULL, i);
306 return EINVAL;
307 }
308 memcpy(ixp->ixp_ctx.authCtx.key.authKey,
309 cri->cri_key, (cri->cri_klen + 7) / 8);
310 }
311 break;
312
313 case CRYPTO_SHA1:
314 case CRYPTO_SHA1_HMAC:
315 ixp->ixp_auth_alg = cri->cri_alg;
316 ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
317 ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
318 ixp->ixp_ctx.authCtx.aadLen = 0;
319 /* Only SHA1_HMAC needs a key */
320 if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
321 ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
322 if (ixp->ixp_ctx.authCtx.authKeyLen >
323 sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
324 printk(
325 "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
326 cri->cri_klen);
327 ixp_freesession(NULL, i);
328 return EINVAL;
329 }
330 memcpy(ixp->ixp_ctx.authCtx.key.authKey,
331 cri->cri_key, (cri->cri_klen + 7) / 8);
332 }
333 break;
334
335 default:
336 printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
337 ixp_freesession(NULL, i);
338 return EINVAL;
339 }
340 cri = cri->cri_next;
341 }
342
343 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
344 INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
345 INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
346 #else
347 INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
348 INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
349 #endif
350
351 return 0;
352 }
353
354
355 /*
356 * Free a session.
357 */
358 static int
359 ixp_freesession(device_t dev, u_int64_t tid)
360 {
361 u_int32_t sid = CRYPTO_SESID2LID(tid);
362
363 dprintk("%s()\n", __FUNCTION__);
364 if (sid > ixp_sesnum || ixp_sessions == NULL ||
365 ixp_sessions[sid] == NULL) {
366 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
367 return EINVAL;
368 }
369
370 /* Silently accept and return */
371 if (sid == 0)
372 return 0;
373
374 if (ixp_sessions[sid]) {
375 if (ixp_sessions[sid]->ixp_ctx_id != -1) {
376 ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
377 ixp_sessions[sid]->ixp_ctx_id = -1;
378 }
379 kfree(ixp_sessions[sid]);
380 }
381 ixp_sessions[sid] = NULL;
382 if (ixp_blocked) {
383 ixp_blocked = 0;
384 crypto_unblock(ixp_id, CRYPTO_SYMQ);
385 }
386 return 0;
387 }
388
389
390 /*
391 * callback for when hash processing is complete
392 */
393
394 static void
395 ixp_hash_perform_cb(
396 UINT32 hash_key_id,
397 IX_MBUF *bufp,
398 IxCryptoAccStatus status)
399 {
400 struct ixp_q *q;
401
402 dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
403
404 if (bufp == NULL) {
405 printk("ixp: NULL buf in %s\n", __FUNCTION__);
406 return;
407 }
408
409 q = IX_MBUF_PRIV(bufp);
410 if (q == NULL) {
411 printk("ixp: NULL priv in %s\n", __FUNCTION__);
412 return;
413 }
414
415 if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
416 /* On success, need to copy hash back into original client buffer */
417 memcpy(q->ixp_hash_dest, q->ixp_hash_src,
418 (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
419 SHA1_HASH_LEN : MD5_HASH_LEN);
420 }
421 else {
422 printk("ixp: hash perform failed status=%d\n", status);
423 q->ixp_q_crp->crp_etype = EINVAL;
424 }
425
426 /* Free internal buffer used for hashing */
427 kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
428
429 crypto_done(q->ixp_q_crp);
430 kmem_cache_free(qcache, q);
431 }
432
433 /*
434 * setup a request and perform it
435 */
436 static void
437 ixp_q_process(struct ixp_q *q)
438 {
439 IxCryptoAccStatus status;
440 struct ixp_data *ixp = q->ixp_q_data;
441 int auth_off = 0;
442 int auth_len = 0;
443 int crypt_off = 0;
444 int crypt_len = 0;
445 int icv_off = 0;
446 char *crypt_func;
447
448 dprintk("%s(%p)\n", __FUNCTION__, q);
449
450 if (q->ixp_q_ccrd) {
451 if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
452 q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
453 } else {
454 q->ixp_q_iv = q->ixp_q_iv_data;
455 crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
456 q->ixp_q_ccrd->crd_inject,
457 ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
458 (caddr_t) q->ixp_q_iv);
459 }
460
461 if (q->ixp_q_acrd) {
462 auth_off = q->ixp_q_acrd->crd_skip;
463 auth_len = q->ixp_q_acrd->crd_len;
464 icv_off = q->ixp_q_acrd->crd_inject;
465 }
466
467 crypt_off = q->ixp_q_ccrd->crd_skip;
468 crypt_len = q->ixp_q_ccrd->crd_len;
469 } else { /* if (q->ixp_q_acrd) */
470 auth_off = q->ixp_q_acrd->crd_skip;
471 auth_len = q->ixp_q_acrd->crd_len;
472 icv_off = q->ixp_q_acrd->crd_inject;
473 }
474
475 if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
476 struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
477 if (skb_shinfo(skb)->nr_frags) {
478 /*
479 * DAVIDM fix this limitation one day by using
480 * a buffer pool and chaining, it is not currently
481 * needed for current user/kernel space acceleration
482 */
483 printk("ixp: Cannot handle fragmented skb's yet !\n");
484 q->ixp_q_crp->crp_etype = ENOENT;
485 goto done;
486 }
487 IX_MBUF_MLEN(&q->ixp_q_mbuf) =
488 IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = skb->len;
489 IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
490 } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
491 struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
492 if (uiop->uio_iovcnt != 1) {
493 /*
494 * DAVIDM fix this limitation one day by using
495 * a buffer pool and chaining, it is not currently
496 * needed for current user/kernel space acceleration
497 */
498 printk("ixp: Cannot handle more than 1 iovec yet !\n");
499 q->ixp_q_crp->crp_etype = ENOENT;
500 goto done;
501 }
502 IX_MBUF_MLEN(&q->ixp_q_mbuf) =
503 IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
504 IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
505 } else /* contig buffer */ {
506 IX_MBUF_MLEN(&q->ixp_q_mbuf) =
507 IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
508 IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
509 }
510
511 IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
512
513 if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
514 /*
515 * For SHA1 and MD5 hash, need to create an internal buffer that is big
516 * enough to hold the original data + the appropriate padding for the
517 * hash algorithm.
518 */
519 UINT8 *tbuf = NULL;
520
521 IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
522 ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
523 tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
524
525 if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
526 printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
527 IX_MBUF_MLEN(&q->ixp_q_mbuf));
528 q->ixp_q_crp->crp_etype = ENOMEM;
529 goto done;
530 }
531 memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
532
533 /* Set location in client buffer to copy hash into */
534 q->ixp_hash_dest =
535 &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
536
537 IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
538
539 /* Set location in internal buffer for where hash starts */
540 q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
541
542 crypt_func = "ixCryptoAccHashPerform";
543 status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
544 &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
545 &ixp->ixp_hash_key_id);
546 }
547 else {
548 crypt_func = "ixCryptoAccAuthCryptPerform";
549 status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
550 NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
551 q->ixp_q_iv);
552 }
553
554 if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
555 return;
556
557 if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
558 q->ixp_q_crp->crp_etype = ENOMEM;
559 goto done;
560 }
561
562 printk("ixp: %s failed %u\n", crypt_func, status);
563 q->ixp_q_crp->crp_etype = EINVAL;
564
565 done:
566 crypto_done(q->ixp_q_crp);
567 kmem_cache_free(qcache, q);
568 }
569
570
571 /*
572 * because we cannot process the Q from the Register callback
573 * we do it here on a task Q.
574 */
575
576 static void
577 ixp_process_pending(void *arg)
578 {
579 struct ixp_data *ixp = arg;
580 struct ixp_q *q = NULL;
581
582 dprintk("%s(%p)\n", __FUNCTION__, arg);
583
584 if (!ixp)
585 return;
586
587 while (!list_empty(&ixp->ixp_q)) {
588 q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
589 list_del(&q->ixp_q_list);
590 ixp_q_process(q);
591 }
592 }
593
594 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
595 static void
596 ixp_process_pending_wq(struct work_struct *work)
597 {
598 struct ixp_data *ixp = container_of(work, struct ixp_data, ixp_pending_work);
599 ixp_process_pending(ixp);
600 }
601 #endif
602
603 /*
604 * callback for when context registration is complete
605 */
606
607 static void
608 ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
609 {
610 int i;
611 struct ixp_data *ixp;
612 struct ixp_q *q;
613
614 dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
615
616 /*
617 * free any buffer passed in to this routine
618 */
619 if (bufp) {
620 IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
621 kfree(IX_MBUF_MDATA(bufp));
622 IX_MBUF_MDATA(bufp) = NULL;
623 }
624
625 for (i = 0; i < ixp_sesnum; i++) {
626 ixp = ixp_sessions[i];
627 if (ixp && ixp->ixp_ctx_id == ctx_id)
628 break;
629 }
630 if (i >= ixp_sesnum) {
631 printk("ixp: invalid context id %d\n", ctx_id);
632 return;
633 }
634
635 if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
636 /* this is normal to free the first of two buffers */
637 dprintk("ixp: register not finished yet.\n");
638 return;
639 }
640
641 if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
642 printk("ixp: register failed 0x%x\n", status);
643 while (!list_empty(&ixp->ixp_q)) {
644 q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
645 list_del(&q->ixp_q_list);
646 q->ixp_q_crp->crp_etype = EINVAL;
647 crypto_done(q->ixp_q_crp);
648 kmem_cache_free(qcache, q);
649 }
650 return;
651 }
652
653 /*
654 * we are now registered, we cannot start processing the Q here
655 * or we get strange errors with AES (DES/3DES seem to be ok).
656 */
657 ixp->ixp_registered = 1;
658 schedule_work(&ixp->ixp_pending_work);
659 }
660
661
662 /*
663 * callback for when data processing is complete
664 */
665
666 static void
667 ixp_perform_cb(
668 UINT32 ctx_id,
669 IX_MBUF *sbufp,
670 IX_MBUF *dbufp,
671 IxCryptoAccStatus status)
672 {
673 struct ixp_q *q;
674
675 dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
676 dbufp, status);
677
678 if (sbufp == NULL) {
679 printk("ixp: NULL sbuf in ixp_perform_cb\n");
680 return;
681 }
682
683 q = IX_MBUF_PRIV(sbufp);
684 if (q == NULL) {
685 printk("ixp: NULL priv in ixp_perform_cb\n");
686 return;
687 }
688
689 if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
690 printk("ixp: perform failed status=%d\n", status);
691 q->ixp_q_crp->crp_etype = EINVAL;
692 }
693
694 crypto_done(q->ixp_q_crp);
695 kmem_cache_free(qcache, q);
696 }
697
698
699 /*
700 * registration is not callable at IRQ time, so we defer
701 * to a task queue, this routines completes the registration for us
702 * when the task queue runs
703 *
704 * Unfortunately this means we cannot tell OCF that the driver is blocked,
705 * we do that on the next request.
706 */
707
708 static void
709 ixp_registration(void *arg)
710 {
711 struct ixp_data *ixp = arg;
712 struct ixp_q *q = NULL;
713 IX_MBUF *pri = NULL, *sec = NULL;
714 int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
715
716 if (!ixp) {
717 printk("ixp: ixp_registration with no arg\n");
718 return;
719 }
720
721 if (ixp->ixp_ctx_id != -1) {
722 ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
723 ixp->ixp_ctx_id = -1;
724 }
725
726 if (list_empty(&ixp->ixp_q)) {
727 printk("ixp: ixp_registration with no Q\n");
728 return;
729 }
730
731 /*
732 * setup the primary and secondary buffers
733 */
734 q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
735 if (q->ixp_q_acrd) {
736 pri = &ixp->ixp_pri_mbuf;
737 sec = &ixp->ixp_sec_mbuf;
738 IX_MBUF_MLEN(pri) = IX_MBUF_PKT_LEN(pri) = 128;
739 IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
740 IX_MBUF_MLEN(sec) = IX_MBUF_PKT_LEN(sec) = 128;
741 IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
742 }
743
744 /* Only need to register if a crypt op or HMAC op */
745 if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
746 ixp->ixp_auth_alg == CRYPTO_MD5)) {
747 status = ixCryptoAccCtxRegister(
748 &ixp->ixp_ctx,
749 pri, sec,
750 ixp_register_cb,
751 ixp_perform_cb,
752 &ixp->ixp_ctx_id);
753 }
754 else {
755 /* Otherwise we start processing pending q */
756 schedule_work(&ixp->ixp_pending_work);
757 }
758
759 if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
760 return;
761
762 if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
763 printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
764 ixp_blocked = 1;
765 /* perhaps we should return EGAIN on queued ops ? */
766 return;
767 }
768
769 printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
770 ixp->ixp_ctx_id = -1;
771
772 /*
773 * everything waiting is toasted
774 */
775 while (!list_empty(&ixp->ixp_q)) {
776 q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
777 list_del(&q->ixp_q_list);
778 q->ixp_q_crp->crp_etype = ENOENT;
779 crypto_done(q->ixp_q_crp);
780 kmem_cache_free(qcache, q);
781 }
782 }
783
784 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
785 static void
786 ixp_registration_wq(struct work_struct *work)
787 {
788 struct ixp_data *ixp = container_of(work, struct ixp_data,
789 ixp_registration_work);
790 ixp_registration(ixp);
791 }
792 #endif
793
794 /*
795 * Process a request.
796 */
797 static int
798 ixp_process(device_t dev, struct cryptop *crp, int hint)
799 {
800 struct ixp_data *ixp;
801 unsigned int lid;
802 struct ixp_q *q = NULL;
803 int status;
804
805 dprintk("%s()\n", __FUNCTION__);
806
807 /* Sanity check */
808 if (crp == NULL) {
809 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
810 return EINVAL;
811 }
812
813 crp->crp_etype = 0;
814
815 if (ixp_blocked)
816 return ERESTART;
817
818 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
819 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
820 crp->crp_etype = EINVAL;
821 goto done;
822 }
823
824 /*
825 * find the session we are using
826 */
827
828 lid = crp->crp_sid & 0xffffffff;
829 if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
830 ixp_sessions[lid] == NULL) {
831 crp->crp_etype = ENOENT;
832 dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
833 goto done;
834 }
835 ixp = ixp_sessions[lid];
836
837 /*
838 * setup a new request ready for queuing
839 */
840 q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
841 if (q == NULL) {
842 dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
843 crp->crp_etype = ENOMEM;
844 goto done;
845 }
846 /*
847 * save some cycles by only zeroing the important bits
848 */
849 memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
850 q->ixp_q_ccrd = NULL;
851 q->ixp_q_acrd = NULL;
852 q->ixp_q_crp = crp;
853 q->ixp_q_data = ixp;
854
855 /*
856 * point the cipher and auth descriptors appropriately
857 * check that we have something to do
858 */
859 if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
860 q->ixp_q_ccrd = crp->crp_desc;
861 else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
862 q->ixp_q_acrd = crp->crp_desc;
863 else {
864 crp->crp_etype = ENOENT;
865 dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
866 goto done;
867 }
868 if (crp->crp_desc->crd_next) {
869 if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
870 q->ixp_q_ccrd = crp->crp_desc->crd_next;
871 else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
872 q->ixp_q_acrd = crp->crp_desc->crd_next;
873 else {
874 crp->crp_etype = ENOENT;
875 dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
876 goto done;
877 }
878 }
879
880 /*
881 * If there is a direction change for this context then we mark it as
882 * unregistered and re-register is for the new direction. This is not
883 * a very expensive operation and currently only tends to happen when
884 * user-space application are doing benchmarks
885 *
886 * DM - we should be checking for pending requests before unregistering.
887 */
888 if (q->ixp_q_ccrd && ixp->ixp_registered &&
889 ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
890 dprintk("%s - detected direction change on session\n", __FUNCTION__);
891 ixp->ixp_registered = 0;
892 }
893
894 /*
895 * if we are registered, call straight into the perform code
896 */
897 if (ixp->ixp_registered) {
898 ixp_q_process(q);
899 return 0;
900 }
901
902 /*
903 * the only part of the context not set in newsession is the direction
904 * dependent parts
905 */
906 if (q->ixp_q_ccrd) {
907 ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
908 if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
909 ixp->ixp_ctx.operation = q->ixp_q_acrd ?
910 IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
911 } else {
912 ixp->ixp_ctx.operation = q->ixp_q_acrd ?
913 IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
914 }
915 } else {
916 /* q->ixp_q_acrd must be set if we are here */
917 ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
918 }
919
920 status = list_empty(&ixp->ixp_q);
921 list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
922 if (status)
923 schedule_work(&ixp->ixp_registration_work);
924 return 0;
925
926 done:
927 if (q)
928 kmem_cache_free(qcache, q);
929 crypto_done(crp);
930 return 0;
931 }
932
933
934 #ifdef __ixp46X
935 /*
936 * key processing support for the ixp465
937 */
938
939
940 /*
941 * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
942 * assume zeroed and only copy bits that are significant
943 */
944
945 static int
946 ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
947 {
948 unsigned char *src = (unsigned char *) p->crp_p;
949 unsigned char *dst;
950 int len, bits = p->crp_nbits;
951
952 dprintk("%s()\n", __FUNCTION__);
953
954 if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
955 dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
956 bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
957 return -1;
958 }
959
960 len = (bits + 31) / 32; /* the number UINT32's needed */
961
962 dst = (unsigned char *) &buf[len];
963 dst--;
964
965 while (bits > 0) {
966 *dst-- = *src++;
967 bits -= 8;
968 }
969
970 #if 0 /* no need to zero remaining bits as it is done during request alloc */
971 while (dst > (unsigned char *) buf)
972 *dst-- = '\0';
973 #endif
974
975 op->pData = buf;
976 op->dataLen = len;
977 return 0;
978 }
979
980 /*
981 * copy out the result, be as forgiving as we can about small output buffers
982 */
983
984 static int
985 ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
986 {
987 unsigned char *dst = (unsigned char *) p->crp_p;
988 unsigned char *src = (unsigned char *) buf;
989 int len, z, bits = p->crp_nbits;
990
991 dprintk("%s()\n", __FUNCTION__);
992
993 len = op->dataLen * sizeof(UINT32);
994
995 /* skip leading zeroes to be small buffer friendly */
996 z = 0;
997 while (z < len && src[z] == '\0')
998 z++;
999
1000 src += len;
1001 src--;
1002 len -= z;
1003
1004 while (len > 0 && bits > 0) {
1005 *dst++ = *src--;
1006 len--;
1007 bits -= 8;
1008 }
1009
1010 while (bits > 0) {
1011 *dst++ = '\0';
1012 bits -= 8;
1013 }
1014
1015 if (len > 0) {
1016 dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
1017 __FUNCTION__, len, z, p->crp_nbits / 8);
1018 return -1;
1019 }
1020
1021 return 0;
1022 }
1023
1024
1025 /*
1026 * the parameter offsets for exp_mod
1027 */
1028
1029 #define IXP_PARAM_BASE 0
1030 #define IXP_PARAM_EXP 1
1031 #define IXP_PARAM_MOD 2
1032 #define IXP_PARAM_RES 3
1033
1034 /*
1035 * key processing complete callback, is also used to start processing
1036 * by passing a NULL for pResult
1037 */
1038
1039 static void
1040 ixp_kperform_cb(
1041 IxCryptoAccPkeEauOperation operation,
1042 IxCryptoAccPkeEauOpResult *pResult,
1043 BOOL carryOrBorrow,
1044 IxCryptoAccStatus status)
1045 {
1046 struct ixp_pkq *q, *tmp;
1047 unsigned long flags;
1048
1049 dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
1050 carryOrBorrow, status);
1051
1052 /* handle a completed request */
1053 if (pResult) {
1054 if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
1055 q = ixp_pk_cur;
1056 if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1057 dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
1058 q->pkq_krp->krp_status = ERANGE; /* could do better */
1059 } else {
1060 /* copy out the result */
1061 if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
1062 &q->pkq_result, q->pkq_obuf))
1063 q->pkq_krp->krp_status = ERANGE;
1064 }
1065 crypto_kdone(q->pkq_krp);
1066 kfree(q);
1067 ixp_pk_cur = NULL;
1068 } else
1069 printk("%s - callback with invalid result pointer\n", __FUNCTION__);
1070 }
1071
1072 spin_lock_irqsave(&ixp_pkq_lock, flags);
1073 if (ixp_pk_cur || list_empty(&ixp_pkq)) {
1074 spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1075 return;
1076 }
1077
1078 list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
1079
1080 list_del(&q->pkq_list);
1081 ixp_pk_cur = q;
1082
1083 spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1084
1085 status = ixCryptoAccPkeEauPerform(
1086 IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
1087 &q->pkq_op,
1088 ixp_kperform_cb,
1089 &q->pkq_result);
1090
1091 if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
1092 dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
1093 return; /* callback will return here for callback */
1094 } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
1095 printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
1096 } else {
1097 printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
1098 __FUNCTION__, status);
1099 }
1100 q->pkq_krp->krp_status = ERANGE; /* could do better */
1101 crypto_kdone(q->pkq_krp);
1102 kfree(q);
1103 spin_lock_irqsave(&ixp_pkq_lock, flags);
1104 }
1105 spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1106 }
1107
1108
1109 static int
1110 ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
1111 {
1112 struct ixp_pkq *q;
1113 int rc = 0;
1114 unsigned long flags;
1115
1116 dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
1117 krp->krp_param[IXP_PARAM_BASE].crp_nbits,
1118 krp->krp_param[IXP_PARAM_EXP].crp_nbits,
1119 krp->krp_param[IXP_PARAM_MOD].crp_nbits,
1120 krp->krp_param[IXP_PARAM_RES].crp_nbits);
1121
1122
1123 if (krp->krp_op != CRK_MOD_EXP) {
1124 krp->krp_status = EOPNOTSUPP;
1125 goto err;
1126 }
1127
1128 q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
1129 if (q == NULL) {
1130 krp->krp_status = ENOMEM;
1131 goto err;
1132 }
1133
1134 /*
1135 * The PKE engine does not appear to zero the output buffer
1136 * appropriately, so we need to do it all here.
1137 */
1138 memset(q, 0, sizeof(*q));
1139
1140 q->pkq_krp = krp;
1141 INIT_LIST_HEAD(&q->pkq_list);
1142
1143 if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
1144 q->pkq_ibuf0))
1145 rc = 1;
1146 if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
1147 &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
1148 rc = 2;
1149 if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
1150 &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
1151 rc = 3;
1152
1153 if (rc) {
1154 kfree(q);
1155 krp->krp_status = ERANGE;
1156 goto err;
1157 }
1158
1159 q->pkq_result.pData = q->pkq_obuf;
1160 q->pkq_result.dataLen =
1161 (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
1162
1163 spin_lock_irqsave(&ixp_pkq_lock, flags);
1164 list_add_tail(&q->pkq_list, &ixp_pkq);
1165 spin_unlock_irqrestore(&ixp_pkq_lock, flags);
1166
1167 if (!ixp_pk_cur)
1168 ixp_kperform_cb(0, NULL, 0, 0);
1169 return (0);
1170
1171 err:
1172 crypto_kdone(krp);
1173 return (0);
1174 }
1175
1176
1177
1178 #ifdef CONFIG_OCF_RANDOMHARVEST
1179 /*
1180 * We run the random number generator output through SHA so that it
1181 * is FIPS compliant.
1182 */
1183
1184 static volatile int sha_done = 0;
1185 static unsigned char sha_digest[20];
1186
1187 static void
1188 ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
1189 {
1190 dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
1191 if (sha_digest != digest)
1192 printk("digest error\n");
1193 if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
1194 sha_done = 1;
1195 else
1196 sha_done = -status;
1197 }
1198
1199 static int
1200 ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
1201 {
1202 IxCryptoAccStatus status;
1203 int i, n, rc;
1204
1205 dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
1206 memset(buf, 0, maxwords * sizeof(*buf));
1207 status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
1208 if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1209 dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
1210 __FUNCTION__, status);
1211 return 0;
1212 }
1213
1214 /*
1215 * run the random data through SHA to make it look more random
1216 */
1217
1218 n = sizeof(sha_digest); /* process digest bytes at a time */
1219
1220 rc = 0;
1221 for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
1222 if ((maxwords - i) * sizeof(*buf) < n)
1223 n = (maxwords - i) * sizeof(*buf);
1224 sha_done = 0;
1225 status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
1226 (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
1227 if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
1228 dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
1229 return -EIO;
1230 }
1231 while (!sha_done)
1232 schedule();
1233 if (sha_done < 0) {
1234 dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
1235 return 0;
1236 }
1237 memcpy(&buf[i], sha_digest, n);
1238 rc += n / sizeof(*buf);;
1239 }
1240
1241 return rc;
1242 }
1243 #endif /* CONFIG_OCF_RANDOMHARVEST */
1244
1245 #endif /* __ixp46X */
1246
1247
1248
1249 /*
1250 * our driver startup and shutdown routines
1251 */
1252
1253 static int
1254 ixp_init(void)
1255 {
1256 dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
1257
1258 if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
1259 printk("ixCryptoAccInit failed, assuming already initialised!\n");
1260
1261 qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
1262 SLAB_HWCACHE_ALIGN, NULL
1263 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1264 , NULL
1265 #endif
1266 );
1267 if (!qcache) {
1268 printk("failed to create Qcache\n");
1269 return -ENOENT;
1270 }
1271
1272 memset(&ixpdev, 0, sizeof(ixpdev));
1273 softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
1274
1275 ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
1276 CRYPTOCAP_F_HARDWARE);
1277 if (ixp_id < 0)
1278 panic("IXP/OCF crypto device cannot initialize!");
1279
1280 #define REGISTER(alg) \
1281 crypto_register(ixp_id,alg,0,0)
1282
1283 REGISTER(CRYPTO_DES_CBC);
1284 REGISTER(CRYPTO_3DES_CBC);
1285 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1286 #ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
1287 REGISTER(CRYPTO_MD5);
1288 REGISTER(CRYPTO_SHA1);
1289 #endif
1290 REGISTER(CRYPTO_MD5_HMAC);
1291 REGISTER(CRYPTO_SHA1_HMAC);
1292 #undef REGISTER
1293
1294 #ifdef __ixp46X
1295 spin_lock_init(&ixp_pkq_lock);
1296 /*
1297 * we do not enable the go fast options here as they can potentially
1298 * allow timing based attacks
1299 *
1300 * http://www.openssl.org/news/secadv_20030219.txt
1301 */
1302 ixCryptoAccPkeEauExpConfig(0, 0);
1303 crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
1304 #ifdef CONFIG_OCF_RANDOMHARVEST
1305 crypto_rregister(ixp_id, ixp_read_random, NULL);
1306 #endif
1307 #endif
1308
1309 return 0;
1310 }
1311
1312 static void
1313 ixp_exit(void)
1314 {
1315 dprintk("%s()\n", __FUNCTION__);
1316 crypto_unregister_all(ixp_id);
1317 ixp_id = -1;
1318 kmem_cache_destroy(qcache);
1319 qcache = NULL;
1320 }
1321
1322 module_init(ixp_init);
1323 module_exit(ixp_exit);
1324
1325 MODULE_LICENSE("Dual BSD/GPL");
1326 MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
1327 MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
This page took 0.106679 seconds and 5 git commands to generate.