generic: ar8216: fix phy driver name
[openwrt.git] / target / linux / generic / files / crypto / ocf / cryptosoft.c
1 /*
2 * An OCF module that uses the linux kernel cryptoapi, based on the
3 * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
4 * but is mostly unrecognisable,
5 *
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2004-2011 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
9 *
10 * LICENSE TERMS
11 *
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
14 *
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
17 *
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
21 *
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
24 *
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
28 *
29 * DISCLAIMER
30 *
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 * ---------------------------------------------------------------------------
35 */
36
37 #include <linux/version.h>
38 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
39 #include <linux/config.h>
40 #endif
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
44 #include <linux/slab.h>
45 #include <linux/sched.h>
46 #include <linux/wait.h>
47 #include <linux/crypto.h>
48 #include <linux/mm.h>
49 #include <linux/skbuff.h>
50 #include <linux/random.h>
51 #include <linux/interrupt.h>
52 #include <linux/spinlock.h>
53 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
54 #include <linux/scatterlist.h>
55 #endif
56 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
57 #include <crypto/hash.h>
58 #endif
59
60 #include <cryptodev.h>
61 #include <uio.h>
62
63 struct {
64 softc_device_decl sc_dev;
65 } swcr_softc;
66
67 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
68
69 #define SW_TYPE_CIPHER 0x01
70 #define SW_TYPE_HMAC 0x02
71 #define SW_TYPE_HASH 0x04
72 #define SW_TYPE_COMP 0x08
73 #define SW_TYPE_BLKCIPHER 0x10
74 #define SW_TYPE_ALG_MASK 0x1f
75
76 #define SW_TYPE_ASYNC 0x8000
77
78 #define SW_TYPE_INUSE 0x10000000
79
80 /* We change some of the above if we have an async interface */
81
82 #define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
83
84 #define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
85 #define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
86 #define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
87
88 #define SCATTERLIST_MAX 16
89
90 struct swcr_data {
91 struct work_struct workq;
92 int sw_type;
93 int sw_alg;
94 struct crypto_tfm *sw_tfm;
95 spinlock_t sw_tfm_lock;
96 union {
97 struct {
98 char *sw_key;
99 int sw_klen;
100 int sw_mlen;
101 } hmac;
102 void *sw_comp_buf;
103 } u;
104 struct swcr_data *sw_next;
105 };
106
107 struct swcr_req {
108 struct swcr_data *sw_head;
109 struct swcr_data *sw;
110 struct cryptop *crp;
111 struct cryptodesc *crd;
112 struct scatterlist sg[SCATTERLIST_MAX];
113 unsigned char iv[EALG_MAX_BLOCK_LEN];
114 char result[HASH_MAX_LEN];
115 void *crypto_req;
116 };
117
118 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
119 static kmem_cache_t *swcr_req_cache;
120 #else
121 static struct kmem_cache *swcr_req_cache;
122 #endif
123
124 #ifndef CRYPTO_TFM_MODE_CBC
125 /*
126 * As of linux-2.6.21 this is no longer defined, and presumably no longer
127 * needed to be passed into the crypto core code.
128 */
129 #define CRYPTO_TFM_MODE_CBC 0
130 #define CRYPTO_TFM_MODE_ECB 0
131 #endif
132
133 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
134 /*
135 * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
136 * API into old API.
137 */
138
139 /* Symmetric/Block Cipher */
140 struct blkcipher_desc
141 {
142 struct crypto_tfm *tfm;
143 void *info;
144 };
145 #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
146 #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
147 #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
148 #define crypto_blkcipher_cast(X) X
149 #define crypto_blkcipher_tfm(X) X
150 #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
151 #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
152 #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
153 #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
154 #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
155 crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
156 #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
157 crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
158 #define crypto_blkcipher_set_flags(x, y) /* nop */
159 #define crypto_free_blkcipher(x) crypto_free_tfm(x)
160 #define crypto_free_comp crypto_free_tfm
161 #define crypto_free_hash crypto_free_tfm
162
163 /* Hash/HMAC/Digest */
164 struct hash_desc
165 {
166 struct crypto_tfm *tfm;
167 };
168 #define hmac(X) #X , 0
169 #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
170 #define crypto_hash_cast(X) X
171 #define crypto_hash_tfm(X) X
172 #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
173 #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
174 #define crypto_hash_digest(W, X, Y, Z) \
175 crypto_digest_digest((W)->tfm, X, sg_num, Z)
176
177 /* Asymmetric Cipher */
178 #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
179
180 /* Compression */
181 #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
182 #define crypto_comp_tfm(X) X
183 #define crypto_comp_cast(X) X
184 #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
185 #define plain(X) #X , 0
186 #else
187 #define ecb(X) "ecb(" #X ")" , 0
188 #define cbc(X) "cbc(" #X ")" , 0
189 #define hmac(X) "hmac(" #X ")" , 0
190 #define plain(X) #X , 0
191 #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
192
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
194 /* no ablkcipher in older kernels */
195 #define crypto_alloc_ablkcipher(a,b,c) (NULL)
196 #define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
197 #define crypto_ablkcipher_set_flags(a, b) /* nop */
198 #define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
199 #define crypto_has_ablkcipher(a,b,c) (0)
200 #else
201 #define HAVE_ABLKCIPHER
202 #endif
203
204 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
205 /* no ahash in older kernels */
206 #define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
207 #define crypto_alloc_ahash(a,b,c) (NULL)
208 #define crypto_ahash_digestsize(x) 0
209 #else
210 #define HAVE_AHASH
211 #endif
212
213 struct crypto_details {
214 char *alg_name;
215 int mode;
216 int sw_type;
217 };
218
219 static struct crypto_details crypto_details[] = {
220 [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
221 [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
222 [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
223 [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
224 [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
225 [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
226 [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
227 [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
228 [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
229 [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
230 [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
231 [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
232 [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
233 [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
234 [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
235 [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
236 [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
237 [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
238 [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
239 [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
240 [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
241 [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
242 [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
243 [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
244 [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
245 };
246
247 int32_t swcr_id = -1;
248 module_param(swcr_id, int, 0444);
249 MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
250
251 int swcr_fail_if_compression_grows = 1;
252 module_param(swcr_fail_if_compression_grows, int, 0644);
253 MODULE_PARM_DESC(swcr_fail_if_compression_grows,
254 "Treat compression that results in more data as a failure");
255
256 int swcr_no_ahash = 0;
257 module_param(swcr_no_ahash, int, 0644);
258 MODULE_PARM_DESC(swcr_no_ahash,
259 "Do not use async hash/hmac even if available");
260
261 int swcr_no_ablk = 0;
262 module_param(swcr_no_ablk, int, 0644);
263 MODULE_PARM_DESC(swcr_no_ablk,
264 "Do not use async blk ciphers even if available");
265
266 static struct swcr_data **swcr_sessions = NULL;
267 static u_int32_t swcr_sesnum = 0;
268
269 static int swcr_process(device_t, struct cryptop *, int);
270 static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
271 static int swcr_freesession(device_t, u_int64_t);
272
273 static device_method_t swcr_methods = {
274 /* crypto device methods */
275 DEVMETHOD(cryptodev_newsession, swcr_newsession),
276 DEVMETHOD(cryptodev_freesession,swcr_freesession),
277 DEVMETHOD(cryptodev_process, swcr_process),
278 };
279
280 #define debug swcr_debug
281 int swcr_debug = 0;
282 module_param(swcr_debug, int, 0644);
283 MODULE_PARM_DESC(swcr_debug, "Enable debug");
284
285 static void swcr_process_req(struct swcr_req *req);
286
287 /*
288 * somethings just need to be run with user context no matter whether
289 * the kernel compression libs use vmalloc/vfree for example.
290 */
291
292 typedef struct {
293 struct work_struct wq;
294 void (*func)(void *arg);
295 void *arg;
296 } execute_later_t;
297
298 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
299 static void
300 doing_it_now(struct work_struct *wq)
301 {
302 execute_later_t *w = container_of(wq, execute_later_t, wq);
303 (w->func)(w->arg);
304 kfree(w);
305 }
306 #else
307 static void
308 doing_it_now(void *arg)
309 {
310 execute_later_t *w = (execute_later_t *) arg;
311 (w->func)(w->arg);
312 kfree(w);
313 }
314 #endif
315
316 static void
317 execute_later(void (fn)(void *), void *arg)
318 {
319 execute_later_t *w;
320
321 w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC);
322 if (w) {
323 memset(w, '\0', sizeof(w));
324 w->func = fn;
325 w->arg = arg;
326 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
327 INIT_WORK(&w->wq, doing_it_now);
328 #else
329 INIT_WORK(&w->wq, doing_it_now, w);
330 #endif
331 schedule_work(&w->wq);
332 }
333 }
334
335 /*
336 * Generate a new software session.
337 */
338 static int
339 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
340 {
341 struct swcr_data **swd;
342 u_int32_t i;
343 int error;
344 char *algo;
345 int mode;
346
347 dprintk("%s()\n", __FUNCTION__);
348 if (sid == NULL || cri == NULL) {
349 dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
350 return EINVAL;
351 }
352
353 if (swcr_sessions) {
354 for (i = 1; i < swcr_sesnum; i++)
355 if (swcr_sessions[i] == NULL)
356 break;
357 } else
358 i = 1; /* NB: to silence compiler warning */
359
360 if (swcr_sessions == NULL || i == swcr_sesnum) {
361 if (swcr_sessions == NULL) {
362 i = 1; /* We leave swcr_sessions[0] empty */
363 swcr_sesnum = CRYPTO_SW_SESSIONS;
364 } else
365 swcr_sesnum *= 2;
366
367 swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
368 if (swd == NULL) {
369 /* Reset session number */
370 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
371 swcr_sesnum = 0;
372 else
373 swcr_sesnum /= 2;
374 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
375 return ENOBUFS;
376 }
377 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
378
379 /* Copy existing sessions */
380 if (swcr_sessions) {
381 memcpy(swd, swcr_sessions,
382 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
383 kfree(swcr_sessions);
384 }
385
386 swcr_sessions = swd;
387 }
388
389 swd = &swcr_sessions[i];
390 *sid = i;
391
392 while (cri) {
393 *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
394 SLAB_ATOMIC);
395 if (*swd == NULL) {
396 swcr_freesession(NULL, i);
397 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
398 return ENOBUFS;
399 }
400 memset(*swd, 0, sizeof(struct swcr_data));
401
402 if (cri->cri_alg < 0 ||
403 cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
404 printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
405 swcr_freesession(NULL, i);
406 return EINVAL;
407 }
408
409 algo = crypto_details[cri->cri_alg].alg_name;
410 if (!algo || !*algo) {
411 printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
412 swcr_freesession(NULL, i);
413 return EINVAL;
414 }
415
416 mode = crypto_details[cri->cri_alg].mode;
417 (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
418 (*swd)->sw_alg = cri->cri_alg;
419
420 spin_lock_init(&(*swd)->sw_tfm_lock);
421
422 /* Algorithm specific configuration */
423 switch (cri->cri_alg) {
424 case CRYPTO_NULL_CBC:
425 cri->cri_klen = 0; /* make it work with crypto API */
426 break;
427 default:
428 break;
429 }
430
431 if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
432 dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
433 algo, mode);
434
435 /* try async first */
436 (*swd)->sw_tfm = swcr_no_ablk ? NULL :
437 crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
438 if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) {
439 dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
440 (*swd)->sw_type |= SW_TYPE_ASYNC;
441 } else {
442 (*swd)->sw_tfm = crypto_blkcipher_tfm(
443 crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
444 if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm))
445 dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
446 }
447 if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) {
448 int err;
449 dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
450 algo,mode);
451 err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL;
452 (*swd)->sw_tfm = NULL; /* ensure NULL */
453 swcr_freesession(NULL, i);
454 return err;
455 }
456
457 if (debug) {
458 dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
459 __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
460 for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
461 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
462 cri->cri_key[i] & 0xff);
463 dprintk("\n");
464 }
465 if ((*swd)->sw_type & SW_TYPE_ASYNC) {
466 /* OCF doesn't enforce keys */
467 crypto_ablkcipher_set_flags(
468 __crypto_ablkcipher_cast((*swd)->sw_tfm),
469 CRYPTO_TFM_REQ_WEAK_KEY);
470 error = crypto_ablkcipher_setkey(
471 __crypto_ablkcipher_cast((*swd)->sw_tfm),
472 cri->cri_key, (cri->cri_klen + 7) / 8);
473 } else {
474 /* OCF doesn't enforce keys */
475 crypto_blkcipher_set_flags(
476 crypto_blkcipher_cast((*swd)->sw_tfm),
477 CRYPTO_TFM_REQ_WEAK_KEY);
478 error = crypto_blkcipher_setkey(
479 crypto_blkcipher_cast((*swd)->sw_tfm),
480 cri->cri_key, (cri->cri_klen + 7) / 8);
481 }
482 if (error) {
483 printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
484 (*swd)->sw_tfm->crt_flags);
485 swcr_freesession(NULL, i);
486 return error;
487 }
488 } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
489 dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
490 algo, mode);
491
492 /* try async first */
493 (*swd)->sw_tfm = swcr_no_ahash ? NULL :
494 crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
495 if ((*swd)->sw_tfm) {
496 dprintk("%s %s hash is async\n", __FUNCTION__, algo);
497 (*swd)->sw_type |= SW_TYPE_ASYNC;
498 } else {
499 dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
500 (*swd)->sw_tfm = crypto_hash_tfm(
501 crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
502 }
503
504 if (!(*swd)->sw_tfm) {
505 dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
506 algo, mode);
507 swcr_freesession(NULL, i);
508 return EINVAL;
509 }
510
511 (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
512 (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
513 SLAB_ATOMIC);
514 if ((*swd)->u.hmac.sw_key == NULL) {
515 swcr_freesession(NULL, i);
516 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
517 return ENOBUFS;
518 }
519 memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
520 if (cri->cri_mlen) {
521 (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
522 } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
523 (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
524 __crypto_ahash_cast((*swd)->sw_tfm));
525 } else {
526 (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
527 crypto_hash_cast((*swd)->sw_tfm));
528 }
529 } else if ((*swd)->sw_type & SW_TYPE_COMP) {
530 (*swd)->sw_tfm = crypto_comp_tfm(
531 crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
532 if (!(*swd)->sw_tfm) {
533 dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
534 algo, mode);
535 swcr_freesession(NULL, i);
536 return EINVAL;
537 }
538 (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
539 if ((*swd)->u.sw_comp_buf == NULL) {
540 swcr_freesession(NULL, i);
541 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
542 return ENOBUFS;
543 }
544 } else {
545 printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
546 swcr_freesession(NULL, i);
547 return EINVAL;
548 }
549
550 cri = cri->cri_next;
551 swd = &((*swd)->sw_next);
552 }
553 return 0;
554 }
555
556 /*
557 * Free a session.
558 */
559 static int
560 swcr_freesession(device_t dev, u_int64_t tid)
561 {
562 struct swcr_data *swd;
563 u_int32_t sid = CRYPTO_SESID2LID(tid);
564
565 dprintk("%s()\n", __FUNCTION__);
566 if (sid > swcr_sesnum || swcr_sessions == NULL ||
567 swcr_sessions[sid] == NULL) {
568 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
569 return(EINVAL);
570 }
571
572 /* Silently accept and return */
573 if (sid == 0)
574 return(0);
575
576 while ((swd = swcr_sessions[sid]) != NULL) {
577 swcr_sessions[sid] = swd->sw_next;
578 if (swd->sw_tfm) {
579 switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
580 #ifdef HAVE_AHASH
581 case SW_TYPE_AHMAC:
582 case SW_TYPE_AHASH:
583 crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
584 break;
585 #endif
586 #ifdef HAVE_ABLKCIPHER
587 case SW_TYPE_ABLKCIPHER:
588 crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
589 break;
590 #endif
591 case SW_TYPE_BLKCIPHER:
592 crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
593 break;
594 case SW_TYPE_HMAC:
595 case SW_TYPE_HASH:
596 crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
597 break;
598 case SW_TYPE_COMP:
599 if (in_interrupt())
600 execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm));
601 else
602 crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
603 break;
604 default:
605 crypto_free_tfm(swd->sw_tfm);
606 break;
607 }
608 swd->sw_tfm = NULL;
609 }
610 if (swd->sw_type & SW_TYPE_COMP) {
611 if (swd->u.sw_comp_buf)
612 kfree(swd->u.sw_comp_buf);
613 } else {
614 if (swd->u.hmac.sw_key)
615 kfree(swd->u.hmac.sw_key);
616 }
617 kfree(swd);
618 }
619 return 0;
620 }
621
622 static void swcr_process_req_complete(struct swcr_req *req)
623 {
624 dprintk("%s()\n", __FUNCTION__);
625
626 if (req->sw->sw_type & SW_TYPE_INUSE) {
627 unsigned long flags;
628 spin_lock_irqsave(&req->sw->sw_tfm_lock, flags);
629 req->sw->sw_type &= ~SW_TYPE_INUSE;
630 spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags);
631 }
632
633 if (req->crp->crp_etype)
634 goto done;
635
636 switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
637 #if defined(HAVE_AHASH)
638 case SW_TYPE_AHMAC:
639 case SW_TYPE_AHASH:
640 crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
641 req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
642 ahash_request_free(req->crypto_req);
643 break;
644 #endif
645 #if defined(HAVE_ABLKCIPHER)
646 case SW_TYPE_ABLKCIPHER:
647 ablkcipher_request_free(req->crypto_req);
648 break;
649 #endif
650 case SW_TYPE_CIPHER:
651 case SW_TYPE_HMAC:
652 case SW_TYPE_HASH:
653 case SW_TYPE_COMP:
654 case SW_TYPE_BLKCIPHER:
655 break;
656 default:
657 req->crp->crp_etype = EINVAL;
658 goto done;
659 }
660
661 req->crd = req->crd->crd_next;
662 if (req->crd) {
663 swcr_process_req(req);
664 return;
665 }
666
667 done:
668 dprintk("%s crypto_done %p\n", __FUNCTION__, req);
669 crypto_done(req->crp);
670 kmem_cache_free(swcr_req_cache, req);
671 }
672
673 #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
674 static void swcr_process_callback(struct crypto_async_request *creq, int err)
675 {
676 struct swcr_req *req = creq->data;
677
678 dprintk("%s()\n", __FUNCTION__);
679 if (err) {
680 if (err == -EINPROGRESS)
681 return;
682 dprintk("%s() fail %d\n", __FUNCTION__, -err);
683 req->crp->crp_etype = -err;
684 }
685
686 swcr_process_req_complete(req);
687 }
688 #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
689
690
691 static void swcr_process_req(struct swcr_req *req)
692 {
693 struct swcr_data *sw;
694 struct cryptop *crp = req->crp;
695 struct cryptodesc *crd = req->crd;
696 struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
697 struct uio *uiop = (struct uio *) crp->crp_buf;
698 int sg_num, sg_len, skip;
699
700 dprintk("%s()\n", __FUNCTION__);
701
702 /*
703 * Find the crypto context.
704 *
705 * XXX Note that the logic here prevents us from having
706 * XXX the same algorithm multiple times in a session
707 * XXX (or rather, we can but it won't give us the right
708 * XXX results). To do that, we'd need some way of differentiating
709 * XXX between the various instances of an algorithm (so we can
710 * XXX locate the correct crypto context).
711 */
712 for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
713 ;
714
715 /* No such context ? */
716 if (sw == NULL) {
717 crp->crp_etype = EINVAL;
718 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
719 goto done;
720 }
721
722 /*
723 * for some types we need to ensure only one user as info is stored in
724 * the tfm during an operation that can get corrupted
725 */
726 switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
727 #ifdef HAVE_AHASH
728 case SW_TYPE_AHMAC:
729 case SW_TYPE_AHASH:
730 #endif
731 case SW_TYPE_HMAC:
732 case SW_TYPE_HASH: {
733 unsigned long flags;
734 spin_lock_irqsave(&sw->sw_tfm_lock, flags);
735 if (sw->sw_type & SW_TYPE_INUSE) {
736 spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
737 execute_later((void (*)(void *))swcr_process_req, (void *)req);
738 return;
739 }
740 sw->sw_type |= SW_TYPE_INUSE;
741 spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
742 } break;
743 }
744
745 req->sw = sw;
746 skip = crd->crd_skip;
747
748 /*
749 * setup the SG list skip from the start of the buffer
750 */
751 memset(req->sg, 0, sizeof(req->sg));
752 sg_init_table(req->sg, SCATTERLIST_MAX);
753 if (crp->crp_flags & CRYPTO_F_SKBUF) {
754 int i, len;
755
756 sg_num = 0;
757 sg_len = 0;
758
759 if (skip < skb_headlen(skb)) {
760 len = skb_headlen(skb) - skip;
761 if (len + sg_len > crd->crd_len)
762 len = crd->crd_len - sg_len;
763 sg_set_page(&req->sg[sg_num],
764 virt_to_page(skb->data + skip), len,
765 offset_in_page(skb->data + skip));
766 sg_len += len;
767 sg_num++;
768 skip = 0;
769 } else
770 skip -= skb_headlen(skb);
771
772 for (i = 0; sg_len < crd->crd_len &&
773 i < skb_shinfo(skb)->nr_frags &&
774 sg_num < SCATTERLIST_MAX; i++) {
775 if (skip < skb_shinfo(skb)->frags[i].size) {
776 len = skb_shinfo(skb)->frags[i].size - skip;
777 if (len + sg_len > crd->crd_len)
778 len = crd->crd_len - sg_len;
779 sg_set_page(&req->sg[sg_num],
780 skb_frag_page(&skb_shinfo(skb)->frags[i]),
781 len,
782 skb_shinfo(skb)->frags[i].page_offset + skip);
783 sg_len += len;
784 sg_num++;
785 skip = 0;
786 } else
787 skip -= skb_shinfo(skb)->frags[i].size;
788 }
789 } else if (crp->crp_flags & CRYPTO_F_IOV) {
790 int len;
791
792 sg_len = 0;
793 for (sg_num = 0; sg_len < crd->crd_len &&
794 sg_num < uiop->uio_iovcnt &&
795 sg_num < SCATTERLIST_MAX; sg_num++) {
796 if (skip <= uiop->uio_iov[sg_num].iov_len) {
797 len = uiop->uio_iov[sg_num].iov_len - skip;
798 if (len + sg_len > crd->crd_len)
799 len = crd->crd_len - sg_len;
800 sg_set_page(&req->sg[sg_num],
801 virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
802 len,
803 offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
804 sg_len += len;
805 skip = 0;
806 } else
807 skip -= uiop->uio_iov[sg_num].iov_len;
808 }
809 } else {
810 sg_len = (crp->crp_ilen - skip);
811 if (sg_len > crd->crd_len)
812 sg_len = crd->crd_len;
813 sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
814 sg_len, offset_in_page(crp->crp_buf + skip));
815 sg_num = 1;
816 }
817
818 switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
819
820 #ifdef HAVE_AHASH
821 case SW_TYPE_AHMAC:
822 case SW_TYPE_AHASH:
823 {
824 int ret;
825
826 /* check we have room for the result */
827 if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
828 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
829 "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
830 crd->crd_inject, sw->u.hmac.sw_mlen);
831 crp->crp_etype = EINVAL;
832 goto done;
833 }
834
835 req->crypto_req =
836 ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC);
837 if (!req->crypto_req) {
838 crp->crp_etype = ENOMEM;
839 dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
840 goto done;
841 }
842
843 ahash_request_set_callback(req->crypto_req,
844 CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
845
846 memset(req->result, 0, sizeof(req->result));
847
848 if (sw->sw_type & SW_TYPE_AHMAC)
849 crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
850 sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
851 ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
852 ret = crypto_ahash_digest(req->crypto_req);
853 switch (ret) {
854 case -EINPROGRESS:
855 case -EBUSY:
856 return;
857 default:
858 case 0:
859 dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
860 crp->crp_etype = ret;
861 goto done;
862 }
863 } break;
864 #endif /* HAVE_AHASH */
865
866 #ifdef HAVE_ABLKCIPHER
867 case SW_TYPE_ABLKCIPHER: {
868 int ret;
869 unsigned char *ivp = req->iv;
870 int ivsize =
871 crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
872
873 if (sg_len < crypto_ablkcipher_blocksize(
874 __crypto_ablkcipher_cast(sw->sw_tfm))) {
875 crp->crp_etype = EINVAL;
876 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
877 sg_len, crypto_ablkcipher_blocksize(
878 __crypto_ablkcipher_cast(sw->sw_tfm)));
879 goto done;
880 }
881
882 if (ivsize > sizeof(req->iv)) {
883 crp->crp_etype = EINVAL;
884 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
885 goto done;
886 }
887
888 req->crypto_req = ablkcipher_request_alloc(
889 __crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC);
890 if (!req->crypto_req) {
891 crp->crp_etype = ENOMEM;
892 dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
893 __FILE__, __LINE__);
894 goto done;
895 }
896
897 ablkcipher_request_set_callback(req->crypto_req,
898 CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
899
900 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
901 int i, error;
902
903 if (debug) {
904 dprintk("%s key:", __FUNCTION__);
905 for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
906 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
907 crd->crd_key[i] & 0xff);
908 dprintk("\n");
909 }
910 /* OCF doesn't enforce keys */
911 crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
912 CRYPTO_TFM_REQ_WEAK_KEY);
913 error = crypto_ablkcipher_setkey(
914 __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
915 (crd->crd_klen + 7) / 8);
916 if (error) {
917 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
918 error, sw->sw_tfm->crt_flags);
919 crp->crp_etype = -error;
920 }
921 }
922
923 if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
924
925 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
926 ivp = crd->crd_iv;
927 else
928 get_random_bytes(ivp, ivsize);
929 /*
930 * do we have to copy the IV back to the buffer ?
931 */
932 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
933 crypto_copyback(crp->crp_flags, crp->crp_buf,
934 crd->crd_inject, ivsize, (caddr_t)ivp);
935 }
936 ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
937 sg_len, ivp);
938 ret = crypto_ablkcipher_encrypt(req->crypto_req);
939
940 } else { /*decrypt */
941
942 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
943 ivp = crd->crd_iv;
944 else
945 crypto_copydata(crp->crp_flags, crp->crp_buf,
946 crd->crd_inject, ivsize, (caddr_t)ivp);
947 ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
948 sg_len, ivp);
949 ret = crypto_ablkcipher_decrypt(req->crypto_req);
950 }
951
952 switch (ret) {
953 case -EINPROGRESS:
954 case -EBUSY:
955 return;
956 default:
957 case 0:
958 dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
959 crp->crp_etype = ret;
960 goto done;
961 }
962 } break;
963 #endif /* HAVE_ABLKCIPHER */
964
965 case SW_TYPE_BLKCIPHER: {
966 unsigned char iv[EALG_MAX_BLOCK_LEN];
967 unsigned char *ivp = iv;
968 struct blkcipher_desc desc;
969 int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
970
971 if (sg_len < crypto_blkcipher_blocksize(
972 crypto_blkcipher_cast(sw->sw_tfm))) {
973 crp->crp_etype = EINVAL;
974 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
975 sg_len, crypto_blkcipher_blocksize(
976 crypto_blkcipher_cast(sw->sw_tfm)));
977 goto done;
978 }
979
980 if (ivsize > sizeof(iv)) {
981 crp->crp_etype = EINVAL;
982 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
983 goto done;
984 }
985
986 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
987 int i, error;
988
989 if (debug) {
990 dprintk("%s key:", __FUNCTION__);
991 for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
992 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
993 crd->crd_key[i] & 0xff);
994 dprintk("\n");
995 }
996 /* OCF doesn't enforce keys */
997 crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
998 CRYPTO_TFM_REQ_WEAK_KEY);
999 error = crypto_blkcipher_setkey(
1000 crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
1001 (crd->crd_klen + 7) / 8);
1002 if (error) {
1003 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
1004 error, sw->sw_tfm->crt_flags);
1005 crp->crp_etype = -error;
1006 }
1007 }
1008
1009 memset(&desc, 0, sizeof(desc));
1010 desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
1011
1012 if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
1013
1014 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
1015 ivp = crd->crd_iv;
1016 } else {
1017 get_random_bytes(ivp, ivsize);
1018 }
1019 /*
1020 * do we have to copy the IV back to the buffer ?
1021 */
1022 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1023 crypto_copyback(crp->crp_flags, crp->crp_buf,
1024 crd->crd_inject, ivsize, (caddr_t)ivp);
1025 }
1026 desc.info = ivp;
1027 crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
1028
1029 } else { /*decrypt */
1030
1031 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
1032 ivp = crd->crd_iv;
1033 } else {
1034 crypto_copydata(crp->crp_flags, crp->crp_buf,
1035 crd->crd_inject, ivsize, (caddr_t)ivp);
1036 }
1037 desc.info = ivp;
1038 crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
1039 }
1040 } break;
1041
1042 case SW_TYPE_HMAC:
1043 case SW_TYPE_HASH:
1044 {
1045 char result[HASH_MAX_LEN];
1046 struct hash_desc desc;
1047
1048 /* check we have room for the result */
1049 if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
1050 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
1051 "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
1052 crd->crd_inject, sw->u.hmac.sw_mlen);
1053 crp->crp_etype = EINVAL;
1054 goto done;
1055 }
1056
1057 memset(&desc, 0, sizeof(desc));
1058 desc.tfm = crypto_hash_cast(sw->sw_tfm);
1059
1060 memset(result, 0, sizeof(result));
1061
1062 if (sw->sw_type & SW_TYPE_HMAC) {
1063 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1064 crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
1065 req->sg, sg_num, result);
1066 #else
1067 crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
1068 sw->u.hmac.sw_klen);
1069 crypto_hash_digest(&desc, req->sg, sg_len, result);
1070 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
1071
1072 } else { /* SW_TYPE_HASH */
1073 crypto_hash_digest(&desc, req->sg, sg_len, result);
1074 }
1075
1076 crypto_copyback(crp->crp_flags, crp->crp_buf,
1077 crd->crd_inject, sw->u.hmac.sw_mlen, result);
1078 }
1079 break;
1080
1081 case SW_TYPE_COMP: {
1082 void *ibuf = NULL;
1083 void *obuf = sw->u.sw_comp_buf;
1084 int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
1085 int ret = 0;
1086
1087 /*
1088 * we need to use an additional copy if there is more than one
1089 * input chunk since the kernel comp routines do not handle
1090 * SG yet. Otherwise we just use the input buffer as is.
1091 * Rather than allocate another buffer we just split the tmp
1092 * buffer we already have.
1093 * Perhaps we should just use zlib directly ?
1094 */
1095 if (sg_num > 1) {
1096 int blk;
1097
1098 ibuf = obuf;
1099 for (blk = 0; blk < sg_num; blk++) {
1100 memcpy(obuf, sg_virt(&req->sg[blk]),
1101 req->sg[blk].length);
1102 obuf += req->sg[blk].length;
1103 }
1104 olen -= sg_len;
1105 } else
1106 ibuf = sg_virt(&req->sg[0]);
1107
1108 if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
1109 ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
1110 ibuf, ilen, obuf, &olen);
1111 if (!ret && olen > crd->crd_len) {
1112 dprintk("cryptosoft: ERANGE compress %d into %d\n",
1113 crd->crd_len, olen);
1114 if (swcr_fail_if_compression_grows)
1115 ret = ERANGE;
1116 }
1117 } else { /* decompress */
1118 ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
1119 ibuf, ilen, obuf, &olen);
1120 if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
1121 dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
1122 "space for %d,at offset %d\n",
1123 crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
1124 ret = ETOOSMALL;
1125 }
1126 }
1127 if (ret)
1128 dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
1129
1130 /*
1131 * on success copy result back,
1132 * linux crpyto API returns -errno, we need to fix that
1133 */
1134 crp->crp_etype = ret < 0 ? -ret : ret;
1135 if (ret == 0) {
1136 /* copy back the result and return it's size */
1137 crypto_copyback(crp->crp_flags, crp->crp_buf,
1138 crd->crd_inject, olen, obuf);
1139 crp->crp_olen = olen;
1140 }
1141 } break;
1142
1143 default:
1144 /* Unknown/unsupported algorithm */
1145 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1146 crp->crp_etype = EINVAL;
1147 goto done;
1148 }
1149
1150 done:
1151 swcr_process_req_complete(req);
1152 }
1153
1154
1155 /*
1156 * Process a crypto request.
1157 */
1158 static int
1159 swcr_process(device_t dev, struct cryptop *crp, int hint)
1160 {
1161 struct swcr_req *req = NULL;
1162 u_int32_t lid;
1163
1164 dprintk("%s()\n", __FUNCTION__);
1165 /* Sanity check */
1166 if (crp == NULL) {
1167 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1168 return EINVAL;
1169 }
1170
1171 crp->crp_etype = 0;
1172
1173 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1174 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1175 crp->crp_etype = EINVAL;
1176 goto done;
1177 }
1178
1179 lid = crp->crp_sid & 0xffffffff;
1180 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
1181 swcr_sessions[lid] == NULL) {
1182 crp->crp_etype = ENOENT;
1183 dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
1184 goto done;
1185 }
1186
1187 /*
1188 * do some error checking outside of the loop for SKB and IOV processing
1189 * this leaves us with valid skb or uiop pointers for later
1190 */
1191 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1192 struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
1193 if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
1194 printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
1195 skb_shinfo(skb)->nr_frags);
1196 goto done;
1197 }
1198 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1199 struct uio *uiop = (struct uio *) crp->crp_buf;
1200 if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
1201 printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
1202 uiop->uio_iovcnt);
1203 goto done;
1204 }
1205 }
1206
1207 /*
1208 * setup a new request ready for queuing
1209 */
1210 req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
1211 if (req == NULL) {
1212 dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
1213 crp->crp_etype = ENOMEM;
1214 goto done;
1215 }
1216 memset(req, 0, sizeof(*req));
1217
1218 req->sw_head = swcr_sessions[lid];
1219 req->crp = crp;
1220 req->crd = crp->crp_desc;
1221
1222 swcr_process_req(req);
1223 return 0;
1224
1225 done:
1226 crypto_done(crp);
1227 if (req)
1228 kmem_cache_free(swcr_req_cache, req);
1229 return 0;
1230 }
1231
1232
1233 static int
1234 cryptosoft_init(void)
1235 {
1236 int i, sw_type, mode;
1237 char *algo;
1238
1239 dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
1240
1241 swcr_req_cache = kmem_cache_create("cryptosoft_req",
1242 sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
1243 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1244 , NULL
1245 #endif
1246 );
1247 if (!swcr_req_cache) {
1248 printk("cryptosoft: failed to create request cache\n");
1249 return -ENOENT;
1250 }
1251
1252 softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
1253
1254 swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
1255 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1256 if (swcr_id < 0) {
1257 printk("cryptosoft: Software crypto device cannot initialize!");
1258 return -ENODEV;
1259 }
1260
1261 #define REGISTER(alg) \
1262 crypto_register(swcr_id, alg, 0,0)
1263
1264 for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
1265 int found;
1266
1267 algo = crypto_details[i].alg_name;
1268 if (!algo || !*algo) {
1269 dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
1270 continue;
1271 }
1272
1273 mode = crypto_details[i].mode;
1274 sw_type = crypto_details[i].sw_type;
1275
1276 found = 0;
1277 switch (sw_type & SW_TYPE_ALG_MASK) {
1278 case SW_TYPE_CIPHER:
1279 found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
1280 break;
1281 case SW_TYPE_HMAC:
1282 found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1283 break;
1284 case SW_TYPE_HASH:
1285 found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1286 break;
1287 case SW_TYPE_COMP:
1288 found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
1289 break;
1290 case SW_TYPE_BLKCIPHER:
1291 found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
1292 if (!found && !swcr_no_ablk)
1293 found = crypto_has_ablkcipher(algo, 0, 0);
1294 break;
1295 }
1296 if (found) {
1297 REGISTER(i);
1298 } else {
1299 dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
1300 __FUNCTION__, sw_type, i, algo);
1301 }
1302 }
1303 return 0;
1304 }
1305
1306 static void
1307 cryptosoft_exit(void)
1308 {
1309 dprintk("%s()\n", __FUNCTION__);
1310 crypto_unregister_all(swcr_id);
1311 swcr_id = -1;
1312 kmem_cache_destroy(swcr_req_cache);
1313 }
1314
1315 late_initcall(cryptosoft_init);
1316 module_exit(cryptosoft_exit);
1317
1318 MODULE_LICENSE("Dual BSD/GPL");
1319 MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
1320 MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
This page took 0.11827 seconds and 5 git commands to generate.