2 * An OCF module that uses the linux kernel cryptoapi, based on the
3 * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
4 * but is mostly unrecognisable,
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2004-2011 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 * ---------------------------------------------------------------------------
37 #include <linux/version.h>
38 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
39 #include <linux/config.h>
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
44 #include <linux/slab.h>
45 #include <linux/sched.h>
46 #include <linux/wait.h>
47 #include <linux/crypto.h>
49 #include <linux/skbuff.h>
50 #include <linux/random.h>
51 #include <linux/interrupt.h>
52 #include <linux/spinlock.h>
53 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
54 #include <linux/scatterlist.h>
56 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
57 #include <crypto/hash.h>
60 #include <cryptodev.h>
64 softc_device_decl sc_dev
;
67 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
69 #define SW_TYPE_CIPHER 0x01
70 #define SW_TYPE_HMAC 0x02
71 #define SW_TYPE_HASH 0x04
72 #define SW_TYPE_COMP 0x08
73 #define SW_TYPE_BLKCIPHER 0x10
74 #define SW_TYPE_ALG_MASK 0x1f
76 #define SW_TYPE_ASYNC 0x8000
78 #define SW_TYPE_INUSE 0x10000000
80 /* We change some of the above if we have an async interface */
82 #define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
84 #define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
85 #define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
86 #define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
88 #define SCATTERLIST_MAX 16
91 struct work_struct workq
;
94 struct crypto_tfm
*sw_tfm
;
95 spinlock_t sw_tfm_lock
;
104 struct swcr_data
*sw_next
;
108 struct swcr_data
*sw_head
;
109 struct swcr_data
*sw
;
111 struct cryptodesc
*crd
;
112 struct scatterlist sg
[SCATTERLIST_MAX
];
113 unsigned char iv
[EALG_MAX_BLOCK_LEN
];
114 char result
[HASH_MAX_LEN
];
118 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
119 static kmem_cache_t
*swcr_req_cache
;
121 static struct kmem_cache
*swcr_req_cache
;
124 #ifndef CRYPTO_TFM_MODE_CBC
126 * As of linux-2.6.21 this is no longer defined, and presumably no longer
127 * needed to be passed into the crypto core code.
129 #define CRYPTO_TFM_MODE_CBC 0
130 #define CRYPTO_TFM_MODE_ECB 0
133 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
135 * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
139 /* Symmetric/Block Cipher */
140 struct blkcipher_desc
142 struct crypto_tfm
*tfm
;
145 #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
146 #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
147 #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
148 #define crypto_blkcipher_cast(X) X
149 #define crypto_blkcipher_tfm(X) X
150 #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
151 #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
152 #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
153 #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
154 #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
155 crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
156 #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
157 crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
158 #define crypto_blkcipher_set_flags(x, y) /* nop */
159 #define crypto_free_blkcipher(x) crypto_free_tfm(x)
160 #define crypto_free_comp crypto_free_tfm
161 #define crypto_free_hash crypto_free_tfm
163 /* Hash/HMAC/Digest */
166 struct crypto_tfm
*tfm
;
168 #define hmac(X) #X , 0
169 #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
170 #define crypto_hash_cast(X) X
171 #define crypto_hash_tfm(X) X
172 #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
173 #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
174 #define crypto_hash_digest(W, X, Y, Z) \
175 crypto_digest_digest((W)->tfm, X, sg_num, Z)
177 /* Asymmetric Cipher */
178 #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
181 #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
182 #define crypto_comp_tfm(X) X
183 #define crypto_comp_cast(X) X
184 #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
185 #define plain(X) #X , 0
187 #define ecb(X) "ecb(" #X ")" , 0
188 #define cbc(X) "cbc(" #X ")" , 0
189 #define hmac(X) "hmac(" #X ")" , 0
190 #define plain(X) #X , 0
191 #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
194 /* no ablkcipher in older kernels */
195 #define crypto_alloc_ablkcipher(a,b,c) (NULL)
196 #define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
197 #define crypto_ablkcipher_set_flags(a, b) /* nop */
198 #define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
199 #define crypto_has_ablkcipher(a,b,c) (0)
201 #define HAVE_ABLKCIPHER
204 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
205 /* no ahash in older kernels */
206 #define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
207 #define crypto_alloc_ahash(a,b,c) (NULL)
208 #define crypto_ahash_digestsize(x) 0
213 struct crypto_details
{
219 static struct crypto_details crypto_details
[] = {
220 [CRYPTO_DES_CBC
] = { cbc(des
), SW_TYPE_BLKCIPHER
, },
221 [CRYPTO_3DES_CBC
] = { cbc(des3_ede
), SW_TYPE_BLKCIPHER
, },
222 [CRYPTO_BLF_CBC
] = { cbc(blowfish
), SW_TYPE_BLKCIPHER
, },
223 [CRYPTO_CAST_CBC
] = { cbc(cast5
), SW_TYPE_BLKCIPHER
, },
224 [CRYPTO_SKIPJACK_CBC
] = { cbc(skipjack
), SW_TYPE_BLKCIPHER
, },
225 [CRYPTO_MD5_HMAC
] = { hmac(md5
), SW_TYPE_HMAC
, },
226 [CRYPTO_SHA1_HMAC
] = { hmac(sha1
), SW_TYPE_HMAC
, },
227 [CRYPTO_RIPEMD160_HMAC
] = { hmac(ripemd160
), SW_TYPE_HMAC
, },
228 [CRYPTO_MD5_KPDK
] = { plain(md5
-kpdk
), SW_TYPE_HASH
, },
229 [CRYPTO_SHA1_KPDK
] = { plain(sha1
-kpdk
), SW_TYPE_HASH
, },
230 [CRYPTO_AES_CBC
] = { cbc(aes
), SW_TYPE_BLKCIPHER
, },
231 [CRYPTO_ARC4
] = { ecb(arc4
), SW_TYPE_BLKCIPHER
, },
232 [CRYPTO_MD5
] = { plain(md5
), SW_TYPE_HASH
, },
233 [CRYPTO_SHA1
] = { plain(sha1
), SW_TYPE_HASH
, },
234 [CRYPTO_NULL_HMAC
] = { hmac(digest_null
), SW_TYPE_HMAC
, },
235 [CRYPTO_NULL_CBC
] = { cbc(cipher_null
), SW_TYPE_BLKCIPHER
, },
236 [CRYPTO_DEFLATE_COMP
] = { plain(deflate
), SW_TYPE_COMP
, },
237 [CRYPTO_SHA2_256_HMAC
] = { hmac(sha256
), SW_TYPE_HMAC
, },
238 [CRYPTO_SHA2_384_HMAC
] = { hmac(sha384
), SW_TYPE_HMAC
, },
239 [CRYPTO_SHA2_512_HMAC
] = { hmac(sha512
), SW_TYPE_HMAC
, },
240 [CRYPTO_CAMELLIA_CBC
] = { cbc(camellia
), SW_TYPE_BLKCIPHER
, },
241 [CRYPTO_SHA2_256
] = { plain(sha256
), SW_TYPE_HASH
, },
242 [CRYPTO_SHA2_384
] = { plain(sha384
), SW_TYPE_HASH
, },
243 [CRYPTO_SHA2_512
] = { plain(sha512
), SW_TYPE_HASH
, },
244 [CRYPTO_RIPEMD160
] = { plain(ripemd160
), SW_TYPE_HASH
, },
247 int32_t swcr_id
= -1;
248 module_param(swcr_id
, int, 0444);
249 MODULE_PARM_DESC(swcr_id
, "Read-Only OCF ID for cryptosoft driver");
251 int swcr_fail_if_compression_grows
= 1;
252 module_param(swcr_fail_if_compression_grows
, int, 0644);
253 MODULE_PARM_DESC(swcr_fail_if_compression_grows
,
254 "Treat compression that results in more data as a failure");
256 int swcr_no_ahash
= 0;
257 module_param(swcr_no_ahash
, int, 0644);
258 MODULE_PARM_DESC(swcr_no_ahash
,
259 "Do not use async hash/hmac even if available");
261 int swcr_no_ablk
= 0;
262 module_param(swcr_no_ablk
, int, 0644);
263 MODULE_PARM_DESC(swcr_no_ablk
,
264 "Do not use async blk ciphers even if available");
266 static struct swcr_data
**swcr_sessions
= NULL
;
267 static u_int32_t swcr_sesnum
= 0;
269 static int swcr_process(device_t
, struct cryptop
*, int);
270 static int swcr_newsession(device_t
, u_int32_t
*, struct cryptoini
*);
271 static int swcr_freesession(device_t
, u_int64_t
);
273 static device_method_t swcr_methods
= {
274 /* crypto device methods */
275 DEVMETHOD(cryptodev_newsession
, swcr_newsession
),
276 DEVMETHOD(cryptodev_freesession
,swcr_freesession
),
277 DEVMETHOD(cryptodev_process
, swcr_process
),
280 #define debug swcr_debug
282 module_param(swcr_debug
, int, 0644);
283 MODULE_PARM_DESC(swcr_debug
, "Enable debug");
285 static void swcr_process_req(struct swcr_req
*req
);
288 * somethings just need to be run with user context no matter whether
289 * the kernel compression libs use vmalloc/vfree for example.
293 struct work_struct wq
;
294 void (*func
)(void *arg
);
298 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
300 doing_it_now(struct work_struct
*wq
)
302 execute_later_t
*w
= container_of(wq
, execute_later_t
, wq
);
308 doing_it_now(void *arg
)
310 execute_later_t
*w
= (execute_later_t
*) arg
;
317 execute_later(void (fn
)(void *), void *arg
)
321 w
= (execute_later_t
*) kmalloc(sizeof(execute_later_t
), SLAB_ATOMIC
);
323 memset(w
, '\0', sizeof(w
));
326 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
327 INIT_WORK(&w
->wq
, doing_it_now
);
329 INIT_WORK(&w
->wq
, doing_it_now
, w
);
331 schedule_work(&w
->wq
);
336 * Generate a new software session.
339 swcr_newsession(device_t dev
, u_int32_t
*sid
, struct cryptoini
*cri
)
341 struct swcr_data
**swd
;
347 dprintk("%s()\n", __FUNCTION__
);
348 if (sid
== NULL
|| cri
== NULL
) {
349 dprintk("%s,%d - EINVAL\n", __FILE__
, __LINE__
);
354 for (i
= 1; i
< swcr_sesnum
; i
++)
355 if (swcr_sessions
[i
] == NULL
)
358 i
= 1; /* NB: to silence compiler warning */
360 if (swcr_sessions
== NULL
|| i
== swcr_sesnum
) {
361 if (swcr_sessions
== NULL
) {
362 i
= 1; /* We leave swcr_sessions[0] empty */
363 swcr_sesnum
= CRYPTO_SW_SESSIONS
;
367 swd
= kmalloc(swcr_sesnum
* sizeof(struct swcr_data
*), SLAB_ATOMIC
);
369 /* Reset session number */
370 if (swcr_sesnum
== CRYPTO_SW_SESSIONS
)
374 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
377 memset(swd
, 0, swcr_sesnum
* sizeof(struct swcr_data
*));
379 /* Copy existing sessions */
381 memcpy(swd
, swcr_sessions
,
382 (swcr_sesnum
/ 2) * sizeof(struct swcr_data
*));
383 kfree(swcr_sessions
);
389 swd
= &swcr_sessions
[i
];
393 *swd
= (struct swcr_data
*) kmalloc(sizeof(struct swcr_data
),
396 swcr_freesession(NULL
, i
);
397 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
400 memset(*swd
, 0, sizeof(struct swcr_data
));
402 if (cri
->cri_alg
< 0 ||
403 cri
->cri_alg
>=sizeof(crypto_details
)/sizeof(crypto_details
[0])){
404 printk("cryptosoft: Unknown algorithm 0x%x\n", cri
->cri_alg
);
405 swcr_freesession(NULL
, i
);
409 algo
= crypto_details
[cri
->cri_alg
].alg_name
;
410 if (!algo
|| !*algo
) {
411 printk("cryptosoft: Unsupported algorithm 0x%x\n", cri
->cri_alg
);
412 swcr_freesession(NULL
, i
);
416 mode
= crypto_details
[cri
->cri_alg
].mode
;
417 (*swd
)->sw_type
= crypto_details
[cri
->cri_alg
].sw_type
;
418 (*swd
)->sw_alg
= cri
->cri_alg
;
420 spin_lock_init(&(*swd
)->sw_tfm_lock
);
422 /* Algorithm specific configuration */
423 switch (cri
->cri_alg
) {
424 case CRYPTO_NULL_CBC
:
425 cri
->cri_klen
= 0; /* make it work with crypto API */
431 if ((*swd
)->sw_type
& SW_TYPE_BLKCIPHER
) {
432 dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__
,
435 /* try async first */
436 (*swd
)->sw_tfm
= swcr_no_ablk
? NULL
:
437 crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo
, 0, 0));
438 if ((*swd
)->sw_tfm
&& !IS_ERR((*swd
)->sw_tfm
)) {
439 dprintk("%s %s cipher is async\n", __FUNCTION__
, algo
);
440 (*swd
)->sw_type
|= SW_TYPE_ASYNC
;
442 (*swd
)->sw_tfm
= crypto_blkcipher_tfm(
443 crypto_alloc_blkcipher(algo
, 0, CRYPTO_ALG_ASYNC
));
444 if ((*swd
)->sw_tfm
&& !IS_ERR((*swd
)->sw_tfm
))
445 dprintk("%s %s cipher is sync\n", __FUNCTION__
, algo
);
447 if (!(*swd
)->sw_tfm
|| IS_ERR((*swd
)->sw_tfm
)) {
449 dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
451 err
= IS_ERR((*swd
)->sw_tfm
) ? -(PTR_ERR((*swd
)->sw_tfm
)) : EINVAL
;
452 (*swd
)->sw_tfm
= NULL
; /* ensure NULL */
453 swcr_freesession(NULL
, i
);
458 dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
459 __FUNCTION__
, cri
->cri_klen
, (cri
->cri_klen
+ 7) / 8);
460 for (i
= 0; i
< (cri
->cri_klen
+ 7) / 8; i
++)
461 dprintk("%s0x%x", (i
% 8) ? " " : "\n ",
462 cri
->cri_key
[i
] & 0xff);
465 if ((*swd
)->sw_type
& SW_TYPE_ASYNC
) {
466 /* OCF doesn't enforce keys */
467 crypto_ablkcipher_set_flags(
468 __crypto_ablkcipher_cast((*swd
)->sw_tfm
),
469 CRYPTO_TFM_REQ_WEAK_KEY
);
470 error
= crypto_ablkcipher_setkey(
471 __crypto_ablkcipher_cast((*swd
)->sw_tfm
),
472 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
474 /* OCF doesn't enforce keys */
475 crypto_blkcipher_set_flags(
476 crypto_blkcipher_cast((*swd
)->sw_tfm
),
477 CRYPTO_TFM_REQ_WEAK_KEY
);
478 error
= crypto_blkcipher_setkey(
479 crypto_blkcipher_cast((*swd
)->sw_tfm
),
480 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
483 printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error
,
484 (*swd
)->sw_tfm
->crt_flags
);
485 swcr_freesession(NULL
, i
);
488 } else if ((*swd
)->sw_type
& (SW_TYPE_HMAC
| SW_TYPE_HASH
)) {
489 dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__
,
492 /* try async first */
493 (*swd
)->sw_tfm
= swcr_no_ahash
? NULL
:
494 crypto_ahash_tfm(crypto_alloc_ahash(algo
, 0, 0));
495 if ((*swd
)->sw_tfm
) {
496 dprintk("%s %s hash is async\n", __FUNCTION__
, algo
);
497 (*swd
)->sw_type
|= SW_TYPE_ASYNC
;
499 dprintk("%s %s hash is sync\n", __FUNCTION__
, algo
);
500 (*swd
)->sw_tfm
= crypto_hash_tfm(
501 crypto_alloc_hash(algo
, 0, CRYPTO_ALG_ASYNC
));
504 if (!(*swd
)->sw_tfm
) {
505 dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
507 swcr_freesession(NULL
, i
);
511 (*swd
)->u
.hmac
.sw_klen
= (cri
->cri_klen
+ 7) / 8;
512 (*swd
)->u
.hmac
.sw_key
= (char *)kmalloc((*swd
)->u
.hmac
.sw_klen
,
514 if ((*swd
)->u
.hmac
.sw_key
== NULL
) {
515 swcr_freesession(NULL
, i
);
516 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
519 memcpy((*swd
)->u
.hmac
.sw_key
, cri
->cri_key
, (*swd
)->u
.hmac
.sw_klen
);
521 (*swd
)->u
.hmac
.sw_mlen
= cri
->cri_mlen
;
522 } else if ((*swd
)->sw_type
& SW_TYPE_ASYNC
) {
523 (*swd
)->u
.hmac
.sw_mlen
= crypto_ahash_digestsize(
524 __crypto_ahash_cast((*swd
)->sw_tfm
));
526 (*swd
)->u
.hmac
.sw_mlen
= crypto_hash_digestsize(
527 crypto_hash_cast((*swd
)->sw_tfm
));
529 } else if ((*swd
)->sw_type
& SW_TYPE_COMP
) {
530 (*swd
)->sw_tfm
= crypto_comp_tfm(
531 crypto_alloc_comp(algo
, 0, CRYPTO_ALG_ASYNC
));
532 if (!(*swd
)->sw_tfm
) {
533 dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
535 swcr_freesession(NULL
, i
);
538 (*swd
)->u
.sw_comp_buf
= kmalloc(CRYPTO_MAX_DATA_LEN
, SLAB_ATOMIC
);
539 if ((*swd
)->u
.sw_comp_buf
== NULL
) {
540 swcr_freesession(NULL
, i
);
541 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
545 printk("cryptosoft: Unhandled sw_type %d\n", (*swd
)->sw_type
);
546 swcr_freesession(NULL
, i
);
551 swd
= &((*swd
)->sw_next
);
560 swcr_freesession(device_t dev
, u_int64_t tid
)
562 struct swcr_data
*swd
;
563 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
565 dprintk("%s()\n", __FUNCTION__
);
566 if (sid
> swcr_sesnum
|| swcr_sessions
== NULL
||
567 swcr_sessions
[sid
] == NULL
) {
568 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
572 /* Silently accept and return */
576 while ((swd
= swcr_sessions
[sid
]) != NULL
) {
577 swcr_sessions
[sid
] = swd
->sw_next
;
579 switch (swd
->sw_type
& SW_TYPE_ALG_AMASK
) {
583 crypto_free_ahash(__crypto_ahash_cast(swd
->sw_tfm
));
586 #ifdef HAVE_ABLKCIPHER
587 case SW_TYPE_ABLKCIPHER
:
588 crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd
->sw_tfm
));
591 case SW_TYPE_BLKCIPHER
:
592 crypto_free_blkcipher(crypto_blkcipher_cast(swd
->sw_tfm
));
596 crypto_free_hash(crypto_hash_cast(swd
->sw_tfm
));
600 execute_later((void (*)(void *))crypto_free_comp
, (void *)crypto_comp_cast(swd
->sw_tfm
));
602 crypto_free_comp(crypto_comp_cast(swd
->sw_tfm
));
605 crypto_free_tfm(swd
->sw_tfm
);
610 if (swd
->sw_type
& SW_TYPE_COMP
) {
611 if (swd
->u
.sw_comp_buf
)
612 kfree(swd
->u
.sw_comp_buf
);
614 if (swd
->u
.hmac
.sw_key
)
615 kfree(swd
->u
.hmac
.sw_key
);
622 static void swcr_process_req_complete(struct swcr_req
*req
)
624 dprintk("%s()\n", __FUNCTION__
);
626 if (req
->sw
->sw_type
& SW_TYPE_INUSE
) {
628 spin_lock_irqsave(&req
->sw
->sw_tfm_lock
, flags
);
629 req
->sw
->sw_type
&= ~SW_TYPE_INUSE
;
630 spin_unlock_irqrestore(&req
->sw
->sw_tfm_lock
, flags
);
633 if (req
->crp
->crp_etype
)
636 switch (req
->sw
->sw_type
& SW_TYPE_ALG_AMASK
) {
637 #if defined(HAVE_AHASH)
640 crypto_copyback(req
->crp
->crp_flags
, req
->crp
->crp_buf
,
641 req
->crd
->crd_inject
, req
->sw
->u
.hmac
.sw_mlen
, req
->result
);
642 ahash_request_free(req
->crypto_req
);
645 #if defined(HAVE_ABLKCIPHER)
646 case SW_TYPE_ABLKCIPHER
:
647 ablkcipher_request_free(req
->crypto_req
);
654 case SW_TYPE_BLKCIPHER
:
657 req
->crp
->crp_etype
= EINVAL
;
661 req
->crd
= req
->crd
->crd_next
;
663 swcr_process_req(req
);
668 dprintk("%s crypto_done %p\n", __FUNCTION__
, req
);
669 crypto_done(req
->crp
);
670 kmem_cache_free(swcr_req_cache
, req
);
673 #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
674 static void swcr_process_callback(struct crypto_async_request
*creq
, int err
)
676 struct swcr_req
*req
= creq
->data
;
678 dprintk("%s()\n", __FUNCTION__
);
680 if (err
== -EINPROGRESS
)
682 dprintk("%s() fail %d\n", __FUNCTION__
, -err
);
683 req
->crp
->crp_etype
= -err
;
686 swcr_process_req_complete(req
);
688 #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
691 static void swcr_process_req(struct swcr_req
*req
)
693 struct swcr_data
*sw
;
694 struct cryptop
*crp
= req
->crp
;
695 struct cryptodesc
*crd
= req
->crd
;
696 struct sk_buff
*skb
= (struct sk_buff
*) crp
->crp_buf
;
697 struct uio
*uiop
= (struct uio
*) crp
->crp_buf
;
698 int sg_num
, sg_len
, skip
;
700 dprintk("%s()\n", __FUNCTION__
);
703 * Find the crypto context.
705 * XXX Note that the logic here prevents us from having
706 * XXX the same algorithm multiple times in a session
707 * XXX (or rather, we can but it won't give us the right
708 * XXX results). To do that, we'd need some way of differentiating
709 * XXX between the various instances of an algorithm (so we can
710 * XXX locate the correct crypto context).
712 for (sw
= req
->sw_head
; sw
&& sw
->sw_alg
!= crd
->crd_alg
; sw
= sw
->sw_next
)
715 /* No such context ? */
717 crp
->crp_etype
= EINVAL
;
718 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
723 * for some types we need to ensure only one user as info is stored in
724 * the tfm during an operation that can get corrupted
726 switch (sw
->sw_type
& SW_TYPE_ALG_AMASK
) {
734 spin_lock_irqsave(&sw
->sw_tfm_lock
, flags
);
735 if (sw
->sw_type
& SW_TYPE_INUSE
) {
736 spin_unlock_irqrestore(&sw
->sw_tfm_lock
, flags
);
737 execute_later((void (*)(void *))swcr_process_req
, (void *)req
);
740 sw
->sw_type
|= SW_TYPE_INUSE
;
741 spin_unlock_irqrestore(&sw
->sw_tfm_lock
, flags
);
746 skip
= crd
->crd_skip
;
749 * setup the SG list skip from the start of the buffer
751 memset(req
->sg
, 0, sizeof(req
->sg
));
752 sg_init_table(req
->sg
, SCATTERLIST_MAX
);
753 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
759 if (skip
< skb_headlen(skb
)) {
760 len
= skb_headlen(skb
) - skip
;
761 if (len
+ sg_len
> crd
->crd_len
)
762 len
= crd
->crd_len
- sg_len
;
763 sg_set_page(&req
->sg
[sg_num
],
764 virt_to_page(skb
->data
+ skip
), len
,
765 offset_in_page(skb
->data
+ skip
));
770 skip
-= skb_headlen(skb
);
772 for (i
= 0; sg_len
< crd
->crd_len
&&
773 i
< skb_shinfo(skb
)->nr_frags
&&
774 sg_num
< SCATTERLIST_MAX
; i
++) {
775 if (skip
< skb_shinfo(skb
)->frags
[i
].size
) {
776 len
= skb_shinfo(skb
)->frags
[i
].size
- skip
;
777 if (len
+ sg_len
> crd
->crd_len
)
778 len
= crd
->crd_len
- sg_len
;
779 sg_set_page(&req
->sg
[sg_num
],
780 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
782 skb_shinfo(skb
)->frags
[i
].page_offset
+ skip
);
787 skip
-= skb_shinfo(skb
)->frags
[i
].size
;
789 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
793 for (sg_num
= 0; sg_len
< crd
->crd_len
&&
794 sg_num
< uiop
->uio_iovcnt
&&
795 sg_num
< SCATTERLIST_MAX
; sg_num
++) {
796 if (skip
<= uiop
->uio_iov
[sg_num
].iov_len
) {
797 len
= uiop
->uio_iov
[sg_num
].iov_len
- skip
;
798 if (len
+ sg_len
> crd
->crd_len
)
799 len
= crd
->crd_len
- sg_len
;
800 sg_set_page(&req
->sg
[sg_num
],
801 virt_to_page(uiop
->uio_iov
[sg_num
].iov_base
+skip
),
803 offset_in_page(uiop
->uio_iov
[sg_num
].iov_base
+skip
));
807 skip
-= uiop
->uio_iov
[sg_num
].iov_len
;
810 sg_len
= (crp
->crp_ilen
- skip
);
811 if (sg_len
> crd
->crd_len
)
812 sg_len
= crd
->crd_len
;
813 sg_set_page(&req
->sg
[0], virt_to_page(crp
->crp_buf
+ skip
),
814 sg_len
, offset_in_page(crp
->crp_buf
+ skip
));
818 switch (sw
->sw_type
& SW_TYPE_ALG_AMASK
) {
826 /* check we have room for the result */
827 if (crp
->crp_ilen
- crd
->crd_inject
< sw
->u
.hmac
.sw_mlen
) {
828 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
829 "digestsize=%d\n", crp
->crp_ilen
, crd
->crd_skip
+ sg_len
,
830 crd
->crd_inject
, sw
->u
.hmac
.sw_mlen
);
831 crp
->crp_etype
= EINVAL
;
836 ahash_request_alloc(__crypto_ahash_cast(sw
->sw_tfm
),GFP_ATOMIC
);
837 if (!req
->crypto_req
) {
838 crp
->crp_etype
= ENOMEM
;
839 dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__
, __LINE__
);
843 ahash_request_set_callback(req
->crypto_req
,
844 CRYPTO_TFM_REQ_MAY_BACKLOG
, swcr_process_callback
, req
);
846 memset(req
->result
, 0, sizeof(req
->result
));
848 if (sw
->sw_type
& SW_TYPE_AHMAC
)
849 crypto_ahash_setkey(__crypto_ahash_cast(sw
->sw_tfm
),
850 sw
->u
.hmac
.sw_key
, sw
->u
.hmac
.sw_klen
);
851 ahash_request_set_crypt(req
->crypto_req
, req
->sg
, req
->result
, sg_len
);
852 ret
= crypto_ahash_digest(req
->crypto_req
);
859 dprintk("hash OP %s %d\n", ret
? "failed" : "success", ret
);
860 crp
->crp_etype
= ret
;
864 #endif /* HAVE_AHASH */
866 #ifdef HAVE_ABLKCIPHER
867 case SW_TYPE_ABLKCIPHER
: {
869 unsigned char *ivp
= req
->iv
;
871 crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw
->sw_tfm
));
873 if (sg_len
< crypto_ablkcipher_blocksize(
874 __crypto_ablkcipher_cast(sw
->sw_tfm
))) {
875 crp
->crp_etype
= EINVAL
;
876 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__
, __LINE__
,
877 sg_len
, crypto_ablkcipher_blocksize(
878 __crypto_ablkcipher_cast(sw
->sw_tfm
)));
882 if (ivsize
> sizeof(req
->iv
)) {
883 crp
->crp_etype
= EINVAL
;
884 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
888 req
->crypto_req
= ablkcipher_request_alloc(
889 __crypto_ablkcipher_cast(sw
->sw_tfm
), GFP_ATOMIC
);
890 if (!req
->crypto_req
) {
891 crp
->crp_etype
= ENOMEM
;
892 dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
897 ablkcipher_request_set_callback(req
->crypto_req
,
898 CRYPTO_TFM_REQ_MAY_BACKLOG
, swcr_process_callback
, req
);
900 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
904 dprintk("%s key:", __FUNCTION__
);
905 for (i
= 0; i
< (crd
->crd_klen
+ 7) / 8; i
++)
906 dprintk("%s0x%x", (i
% 8) ? " " : "\n ",
907 crd
->crd_key
[i
] & 0xff);
910 /* OCF doesn't enforce keys */
911 crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw
->sw_tfm
),
912 CRYPTO_TFM_REQ_WEAK_KEY
);
913 error
= crypto_ablkcipher_setkey(
914 __crypto_ablkcipher_cast(sw
->sw_tfm
), crd
->crd_key
,
915 (crd
->crd_klen
+ 7) / 8);
917 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
918 error
, sw
->sw_tfm
->crt_flags
);
919 crp
->crp_etype
= -error
;
923 if (crd
->crd_flags
& CRD_F_ENCRYPT
) { /* encrypt */
925 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
928 get_random_bytes(ivp
, ivsize
);
930 * do we have to copy the IV back to the buffer ?
932 if ((crd
->crd_flags
& CRD_F_IV_PRESENT
) == 0) {
933 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
934 crd
->crd_inject
, ivsize
, (caddr_t
)ivp
);
936 ablkcipher_request_set_crypt(req
->crypto_req
, req
->sg
, req
->sg
,
938 ret
= crypto_ablkcipher_encrypt(req
->crypto_req
);
940 } else { /*decrypt */
942 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
945 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
946 crd
->crd_inject
, ivsize
, (caddr_t
)ivp
);
947 ablkcipher_request_set_crypt(req
->crypto_req
, req
->sg
, req
->sg
,
949 ret
= crypto_ablkcipher_decrypt(req
->crypto_req
);
958 dprintk("crypto OP %s %d\n", ret
? "failed" : "success", ret
);
959 crp
->crp_etype
= ret
;
963 #endif /* HAVE_ABLKCIPHER */
965 case SW_TYPE_BLKCIPHER
: {
966 unsigned char iv
[EALG_MAX_BLOCK_LEN
];
967 unsigned char *ivp
= iv
;
968 struct blkcipher_desc desc
;
969 int ivsize
= crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw
->sw_tfm
));
971 if (sg_len
< crypto_blkcipher_blocksize(
972 crypto_blkcipher_cast(sw
->sw_tfm
))) {
973 crp
->crp_etype
= EINVAL
;
974 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__
, __LINE__
,
975 sg_len
, crypto_blkcipher_blocksize(
976 crypto_blkcipher_cast(sw
->sw_tfm
)));
980 if (ivsize
> sizeof(iv
)) {
981 crp
->crp_etype
= EINVAL
;
982 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
986 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
990 dprintk("%s key:", __FUNCTION__
);
991 for (i
= 0; i
< (crd
->crd_klen
+ 7) / 8; i
++)
992 dprintk("%s0x%x", (i
% 8) ? " " : "\n ",
993 crd
->crd_key
[i
] & 0xff);
996 /* OCF doesn't enforce keys */
997 crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw
->sw_tfm
),
998 CRYPTO_TFM_REQ_WEAK_KEY
);
999 error
= crypto_blkcipher_setkey(
1000 crypto_blkcipher_cast(sw
->sw_tfm
), crd
->crd_key
,
1001 (crd
->crd_klen
+ 7) / 8);
1003 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
1004 error
, sw
->sw_tfm
->crt_flags
);
1005 crp
->crp_etype
= -error
;
1009 memset(&desc
, 0, sizeof(desc
));
1010 desc
.tfm
= crypto_blkcipher_cast(sw
->sw_tfm
);
1012 if (crd
->crd_flags
& CRD_F_ENCRYPT
) { /* encrypt */
1014 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
) {
1017 get_random_bytes(ivp
, ivsize
);
1020 * do we have to copy the IV back to the buffer ?
1022 if ((crd
->crd_flags
& CRD_F_IV_PRESENT
) == 0) {
1023 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
1024 crd
->crd_inject
, ivsize
, (caddr_t
)ivp
);
1027 crypto_blkcipher_encrypt_iv(&desc
, req
->sg
, req
->sg
, sg_len
);
1029 } else { /*decrypt */
1031 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
) {
1034 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
1035 crd
->crd_inject
, ivsize
, (caddr_t
)ivp
);
1038 crypto_blkcipher_decrypt_iv(&desc
, req
->sg
, req
->sg
, sg_len
);
1045 char result
[HASH_MAX_LEN
];
1046 struct hash_desc desc
;
1048 /* check we have room for the result */
1049 if (crp
->crp_ilen
- crd
->crd_inject
< sw
->u
.hmac
.sw_mlen
) {
1050 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
1051 "digestsize=%d\n", crp
->crp_ilen
, crd
->crd_skip
+ sg_len
,
1052 crd
->crd_inject
, sw
->u
.hmac
.sw_mlen
);
1053 crp
->crp_etype
= EINVAL
;
1057 memset(&desc
, 0, sizeof(desc
));
1058 desc
.tfm
= crypto_hash_cast(sw
->sw_tfm
);
1060 memset(result
, 0, sizeof(result
));
1062 if (sw
->sw_type
& SW_TYPE_HMAC
) {
1063 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1064 crypto_hmac(sw
->sw_tfm
, sw
->u
.hmac
.sw_key
, &sw
->u
.hmac
.sw_klen
,
1065 req
->sg
, sg_num
, result
);
1067 crypto_hash_setkey(desc
.tfm
, sw
->u
.hmac
.sw_key
,
1068 sw
->u
.hmac
.sw_klen
);
1069 crypto_hash_digest(&desc
, req
->sg
, sg_len
, result
);
1070 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
1072 } else { /* SW_TYPE_HASH */
1073 crypto_hash_digest(&desc
, req
->sg
, sg_len
, result
);
1076 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
1077 crd
->crd_inject
, sw
->u
.hmac
.sw_mlen
, result
);
1081 case SW_TYPE_COMP
: {
1083 void *obuf
= sw
->u
.sw_comp_buf
;
1084 int ilen
= sg_len
, olen
= CRYPTO_MAX_DATA_LEN
;
1088 * we need to use an additional copy if there is more than one
1089 * input chunk since the kernel comp routines do not handle
1090 * SG yet. Otherwise we just use the input buffer as is.
1091 * Rather than allocate another buffer we just split the tmp
1092 * buffer we already have.
1093 * Perhaps we should just use zlib directly ?
1099 for (blk
= 0; blk
< sg_num
; blk
++) {
1100 memcpy(obuf
, sg_virt(&req
->sg
[blk
]),
1101 req
->sg
[blk
].length
);
1102 obuf
+= req
->sg
[blk
].length
;
1106 ibuf
= sg_virt(&req
->sg
[0]);
1108 if (crd
->crd_flags
& CRD_F_ENCRYPT
) { /* compress */
1109 ret
= crypto_comp_compress(crypto_comp_cast(sw
->sw_tfm
),
1110 ibuf
, ilen
, obuf
, &olen
);
1111 if (!ret
&& olen
> crd
->crd_len
) {
1112 dprintk("cryptosoft: ERANGE compress %d into %d\n",
1113 crd
->crd_len
, olen
);
1114 if (swcr_fail_if_compression_grows
)
1117 } else { /* decompress */
1118 ret
= crypto_comp_decompress(crypto_comp_cast(sw
->sw_tfm
),
1119 ibuf
, ilen
, obuf
, &olen
);
1120 if (!ret
&& (olen
+ crd
->crd_inject
) > crp
->crp_olen
) {
1121 dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
1122 "space for %d,at offset %d\n",
1123 crd
->crd_len
, olen
, crp
->crp_olen
, crd
->crd_inject
);
1128 dprintk("%s,%d: ret = %d\n", __FILE__
, __LINE__
, ret
);
1131 * on success copy result back,
1132 * linux crpyto API returns -errno, we need to fix that
1134 crp
->crp_etype
= ret
< 0 ? -ret
: ret
;
1136 /* copy back the result and return it's size */
1137 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
1138 crd
->crd_inject
, olen
, obuf
);
1139 crp
->crp_olen
= olen
;
1144 /* Unknown/unsupported algorithm */
1145 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
1146 crp
->crp_etype
= EINVAL
;
1151 swcr_process_req_complete(req
);
1156 * Process a crypto request.
1159 swcr_process(device_t dev
, struct cryptop
*crp
, int hint
)
1161 struct swcr_req
*req
= NULL
;
1164 dprintk("%s()\n", __FUNCTION__
);
1167 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
1173 if (crp
->crp_desc
== NULL
|| crp
->crp_buf
== NULL
) {
1174 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
1175 crp
->crp_etype
= EINVAL
;
1179 lid
= crp
->crp_sid
& 0xffffffff;
1180 if (lid
>= swcr_sesnum
|| lid
== 0 || swcr_sessions
== NULL
||
1181 swcr_sessions
[lid
] == NULL
) {
1182 crp
->crp_etype
= ENOENT
;
1183 dprintk("%s,%d: ENOENT\n", __FILE__
, __LINE__
);
1188 * do some error checking outside of the loop for SKB and IOV processing
1189 * this leaves us with valid skb or uiop pointers for later
1191 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
1192 struct sk_buff
*skb
= (struct sk_buff
*) crp
->crp_buf
;
1193 if (skb_shinfo(skb
)->nr_frags
>= SCATTERLIST_MAX
) {
1194 printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__
, __LINE__
,
1195 skb_shinfo(skb
)->nr_frags
);
1198 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1199 struct uio
*uiop
= (struct uio
*) crp
->crp_buf
;
1200 if (uiop
->uio_iovcnt
> SCATTERLIST_MAX
) {
1201 printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__
, __LINE__
,
1208 * setup a new request ready for queuing
1210 req
= kmem_cache_alloc(swcr_req_cache
, SLAB_ATOMIC
);
1212 dprintk("%s,%d: ENOMEM\n", __FILE__
, __LINE__
);
1213 crp
->crp_etype
= ENOMEM
;
1216 memset(req
, 0, sizeof(*req
));
1218 req
->sw_head
= swcr_sessions
[lid
];
1220 req
->crd
= crp
->crp_desc
;
1222 swcr_process_req(req
);
1228 kmem_cache_free(swcr_req_cache
, req
);
1234 cryptosoft_init(void)
1236 int i
, sw_type
, mode
;
1239 dprintk("%s(%p)\n", __FUNCTION__
, cryptosoft_init
);
1241 swcr_req_cache
= kmem_cache_create("cryptosoft_req",
1242 sizeof(struct swcr_req
), 0, SLAB_HWCACHE_ALIGN
, NULL
1243 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1247 if (!swcr_req_cache
) {
1248 printk("cryptosoft: failed to create request cache\n");
1252 softc_device_init(&swcr_softc
, "cryptosoft", 0, swcr_methods
);
1254 swcr_id
= crypto_get_driverid(softc_get_device(&swcr_softc
),
1255 CRYPTOCAP_F_SOFTWARE
| CRYPTOCAP_F_SYNC
);
1257 printk("cryptosoft: Software crypto device cannot initialize!");
1261 #define REGISTER(alg) \
1262 crypto_register(swcr_id, alg, 0,0)
1264 for (i
= 0; i
< sizeof(crypto_details
)/sizeof(crypto_details
[0]); i
++) {
1267 algo
= crypto_details
[i
].alg_name
;
1268 if (!algo
|| !*algo
) {
1269 dprintk("%s:Algorithm %d not supported\n", __FUNCTION__
, i
);
1273 mode
= crypto_details
[i
].mode
;
1274 sw_type
= crypto_details
[i
].sw_type
;
1277 switch (sw_type
& SW_TYPE_ALG_MASK
) {
1278 case SW_TYPE_CIPHER
:
1279 found
= crypto_has_cipher(algo
, 0, CRYPTO_ALG_ASYNC
);
1282 found
= crypto_has_hash(algo
, 0, swcr_no_ahash
?CRYPTO_ALG_ASYNC
:0);
1285 found
= crypto_has_hash(algo
, 0, swcr_no_ahash
?CRYPTO_ALG_ASYNC
:0);
1288 found
= crypto_has_comp(algo
, 0, CRYPTO_ALG_ASYNC
);
1290 case SW_TYPE_BLKCIPHER
:
1291 found
= crypto_has_blkcipher(algo
, 0, CRYPTO_ALG_ASYNC
);
1292 if (!found
&& !swcr_no_ablk
)
1293 found
= crypto_has_ablkcipher(algo
, 0, 0);
1299 dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
1300 __FUNCTION__
, sw_type
, i
, algo
);
1307 cryptosoft_exit(void)
1309 dprintk("%s()\n", __FUNCTION__
);
1310 crypto_unregister_all(swcr_id
);
1312 kmem_cache_destroy(swcr_req_cache
);
1315 late_initcall(cryptosoft_init
);
1316 module_exit(cryptosoft_exit
);
1318 MODULE_LICENSE("Dual BSD/GPL");
1319 MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
1320 MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");