2 * An OCF module that uses the linux kernel cryptoapi, based on the
3 * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
4 * but is mostly unrecognisable,
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2004-2010 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 * ---------------------------------------------------------------------------
37 #include <linux/version.h>
38 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
39 #include <generated/autoconf.h>
41 #include <linux/autoconf.h>
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/list.h>
46 #include <linux/slab.h>
47 #include <linux/sched.h>
48 #include <linux/wait.h>
49 #include <linux/crypto.h>
51 #include <linux/skbuff.h>
52 #include <linux/random.h>
53 #include <linux/version.h>
54 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
55 #include <linux/scatterlist.h>
57 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
58 #include <crypto/hash.h>
61 #include <cryptodev.h>
65 softc_device_decl sc_dev
;
68 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
70 #define SW_TYPE_CIPHER 0x01
71 #define SW_TYPE_HMAC 0x02
72 #define SW_TYPE_HASH 0x04
73 #define SW_TYPE_COMP 0x08
74 #define SW_TYPE_BLKCIPHER 0x10
75 #define SW_TYPE_ALG_MASK 0x1f
77 #define SW_TYPE_ASYNC 0x8000
79 /* We change some of the above if we have an async interface */
81 #define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
83 #define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
84 #define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
85 #define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
87 #define SCATTERLIST_MAX 16
92 struct crypto_tfm
*sw_tfm
;
101 struct swcr_data
*sw_next
;
105 struct swcr_data
*sw_head
;
106 struct swcr_data
*sw
;
108 struct cryptodesc
*crd
;
109 struct scatterlist sg
[SCATTERLIST_MAX
];
110 unsigned char iv
[EALG_MAX_BLOCK_LEN
];
111 char result
[HASH_MAX_LEN
];
115 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
116 static kmem_cache_t
*swcr_req_cache
;
118 static struct kmem_cache
*swcr_req_cache
;
121 #ifndef CRYPTO_TFM_MODE_CBC
123 * As of linux-2.6.21 this is no longer defined, and presumably no longer
124 * needed to be passed into the crypto core code.
126 #define CRYPTO_TFM_MODE_CBC 0
127 #define CRYPTO_TFM_MODE_ECB 0
130 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
132 * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
136 /* Symmetric/Block Cipher */
137 struct blkcipher_desc
139 struct crypto_tfm
*tfm
;
142 #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
143 #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
144 #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
145 #define crypto_blkcipher_cast(X) X
146 #define crypto_blkcipher_tfm(X) X
147 #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
148 #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
149 #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
150 #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
151 #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
152 crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
153 #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
154 crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
155 #define crypto_blkcipher_set_flags(x, y) /* nop */
157 /* Hash/HMAC/Digest */
160 struct crypto_tfm
*tfm
;
162 #define hmac(X) #X , 0
163 #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
164 #define crypto_hash_cast(X) X
165 #define crypto_hash_tfm(X) X
166 #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
167 #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
168 #define crypto_hash_digest(W, X, Y, Z) \
169 crypto_digest_digest((W)->tfm, X, sg_num, Z)
171 /* Asymmetric Cipher */
172 #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
175 #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
176 #define crypto_comp_tfm(X) X
177 #define crypto_comp_cast(X) X
178 #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
179 #define plain(X) #X , 0
181 #define ecb(X) "ecb(" #X ")" , 0
182 #define cbc(X) "cbc(" #X ")" , 0
183 #define hmac(X) "hmac(" #X ")" , 0
184 #define plain(X) #X , 0
185 #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
187 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
188 /* no ablkcipher in older kernels */
189 #define crypto_alloc_ablkcipher(a,b,c) (NULL)
190 #define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
191 #define crypto_ablkcipher_set_flags(a, b) /* nop */
192 #define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
193 #define crypto_has_ablkcipher(a,b,c) (0)
195 #define HAVE_ABLKCIPHER
198 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
199 /* no ahash in older kernels */
200 #define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
201 #define crypto_alloc_ahash(a,b,c) (NULL)
202 #define crypto_ahash_digestsize(x) 0
207 struct crypto_details
{
213 static struct crypto_details crypto_details
[] = {
214 [CRYPTO_DES_CBC
] = { cbc(des
), SW_TYPE_BLKCIPHER
, },
215 [CRYPTO_3DES_CBC
] = { cbc(des3_ede
), SW_TYPE_BLKCIPHER
, },
216 [CRYPTO_BLF_CBC
] = { cbc(blowfish
), SW_TYPE_BLKCIPHER
, },
217 [CRYPTO_CAST_CBC
] = { cbc(cast5
), SW_TYPE_BLKCIPHER
, },
218 [CRYPTO_SKIPJACK_CBC
] = { cbc(skipjack
), SW_TYPE_BLKCIPHER
, },
219 [CRYPTO_MD5_HMAC
] = { hmac(md5
), SW_TYPE_HMAC
, },
220 [CRYPTO_SHA1_HMAC
] = { hmac(sha1
), SW_TYPE_HMAC
, },
221 [CRYPTO_RIPEMD160_HMAC
] = { hmac(ripemd160
), SW_TYPE_HMAC
, },
222 [CRYPTO_MD5_KPDK
] = { plain(md5
-kpdk
), SW_TYPE_HASH
, },
223 [CRYPTO_SHA1_KPDK
] = { plain(sha1
-kpdk
), SW_TYPE_HASH
, },
224 [CRYPTO_AES_CBC
] = { cbc(aes
), SW_TYPE_BLKCIPHER
, },
225 [CRYPTO_ARC4
] = { ecb(arc4
), SW_TYPE_BLKCIPHER
, },
226 [CRYPTO_MD5
] = { plain(md5
), SW_TYPE_HASH
, },
227 [CRYPTO_SHA1
] = { plain(sha1
), SW_TYPE_HASH
, },
228 [CRYPTO_NULL_HMAC
] = { hmac(digest_null
), SW_TYPE_HMAC
, },
229 [CRYPTO_NULL_CBC
] = { cbc(cipher_null
), SW_TYPE_BLKCIPHER
, },
230 [CRYPTO_DEFLATE_COMP
] = { plain(deflate
), SW_TYPE_COMP
, },
231 [CRYPTO_SHA2_256_HMAC
] = { hmac(sha256
), SW_TYPE_HMAC
, },
232 [CRYPTO_SHA2_384_HMAC
] = { hmac(sha384
), SW_TYPE_HMAC
, },
233 [CRYPTO_SHA2_512_HMAC
] = { hmac(sha512
), SW_TYPE_HMAC
, },
234 [CRYPTO_CAMELLIA_CBC
] = { cbc(camellia
), SW_TYPE_BLKCIPHER
, },
235 [CRYPTO_SHA2_256
] = { plain(sha256
), SW_TYPE_HASH
, },
236 [CRYPTO_SHA2_384
] = { plain(sha384
), SW_TYPE_HASH
, },
237 [CRYPTO_SHA2_512
] = { plain(sha512
), SW_TYPE_HASH
, },
238 [CRYPTO_RIPEMD160
] = { plain(ripemd160
), SW_TYPE_HASH
, },
241 int32_t swcr_id
= -1;
242 module_param(swcr_id
, int, 0444);
243 MODULE_PARM_DESC(swcr_id
, "Read-Only OCF ID for cryptosoft driver");
245 int swcr_fail_if_compression_grows
= 1;
246 module_param(swcr_fail_if_compression_grows
, int, 0644);
247 MODULE_PARM_DESC(swcr_fail_if_compression_grows
,
248 "Treat compression that results in more data as a failure");
250 int swcr_no_ahash
= 0;
251 module_param(swcr_no_ahash
, int, 0644);
252 MODULE_PARM_DESC(swcr_no_ahash
,
253 "Do not use async hash/hmac even if available");
255 int swcr_no_ablk
= 0;
256 module_param(swcr_no_ablk
, int, 0644);
257 MODULE_PARM_DESC(swcr_no_ablk
,
258 "Do not use async blk ciphers even if available");
260 static struct swcr_data
**swcr_sessions
= NULL
;
261 static u_int32_t swcr_sesnum
= 0;
263 static int swcr_process(device_t
, struct cryptop
*, int);
264 static int swcr_newsession(device_t
, u_int32_t
*, struct cryptoini
*);
265 static int swcr_freesession(device_t
, u_int64_t
);
267 static device_method_t swcr_methods
= {
268 /* crypto device methods */
269 DEVMETHOD(cryptodev_newsession
, swcr_newsession
),
270 DEVMETHOD(cryptodev_freesession
,swcr_freesession
),
271 DEVMETHOD(cryptodev_process
, swcr_process
),
274 #define debug swcr_debug
276 module_param(swcr_debug
, int, 0644);
277 MODULE_PARM_DESC(swcr_debug
, "Enable debug");
279 static void swcr_process_req(struct swcr_req
*req
);
282 * Generate a new software session.
285 swcr_newsession(device_t dev
, u_int32_t
*sid
, struct cryptoini
*cri
)
287 struct swcr_data
**swd
;
293 dprintk("%s()\n", __FUNCTION__
);
294 if (sid
== NULL
|| cri
== NULL
) {
295 dprintk("%s,%d - EINVAL\n", __FILE__
, __LINE__
);
300 for (i
= 1; i
< swcr_sesnum
; i
++)
301 if (swcr_sessions
[i
] == NULL
)
304 i
= 1; /* NB: to silence compiler warning */
306 if (swcr_sessions
== NULL
|| i
== swcr_sesnum
) {
307 if (swcr_sessions
== NULL
) {
308 i
= 1; /* We leave swcr_sessions[0] empty */
309 swcr_sesnum
= CRYPTO_SW_SESSIONS
;
313 swd
= kmalloc(swcr_sesnum
* sizeof(struct swcr_data
*), SLAB_ATOMIC
);
315 /* Reset session number */
316 if (swcr_sesnum
== CRYPTO_SW_SESSIONS
)
320 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
323 memset(swd
, 0, swcr_sesnum
* sizeof(struct swcr_data
*));
325 /* Copy existing sessions */
327 memcpy(swd
, swcr_sessions
,
328 (swcr_sesnum
/ 2) * sizeof(struct swcr_data
*));
329 kfree(swcr_sessions
);
335 swd
= &swcr_sessions
[i
];
339 *swd
= (struct swcr_data
*) kmalloc(sizeof(struct swcr_data
),
342 swcr_freesession(NULL
, i
);
343 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
346 memset(*swd
, 0, sizeof(struct swcr_data
));
348 if (cri
->cri_alg
< 0 ||
349 cri
->cri_alg
>=sizeof(crypto_details
)/sizeof(crypto_details
[0])){
350 printk("cryptosoft: Unknown algorithm 0x%x\n", cri
->cri_alg
);
351 swcr_freesession(NULL
, i
);
355 algo
= crypto_details
[cri
->cri_alg
].alg_name
;
356 if (!algo
|| !*algo
) {
357 printk("cryptosoft: Unsupported algorithm 0x%x\n", cri
->cri_alg
);
358 swcr_freesession(NULL
, i
);
362 mode
= crypto_details
[cri
->cri_alg
].mode
;
363 (*swd
)->sw_type
= crypto_details
[cri
->cri_alg
].sw_type
;
364 (*swd
)->sw_alg
= cri
->cri_alg
;
366 /* Algorithm specific configuration */
367 switch (cri
->cri_alg
) {
368 case CRYPTO_NULL_CBC
:
369 cri
->cri_klen
= 0; /* make it work with crypto API */
375 if ((*swd
)->sw_type
& SW_TYPE_BLKCIPHER
) {
376 dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__
,
379 /* try async first */
380 (*swd
)->sw_tfm
= swcr_no_ablk
? NULL
:
381 crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo
, 0, 0));
382 if ((*swd
)->sw_tfm
) {
383 dprintk("%s %s cipher is async\n", __FUNCTION__
, algo
);
384 (*swd
)->sw_type
|= SW_TYPE_ASYNC
;
386 dprintk("%s %s cipher is sync\n", __FUNCTION__
, algo
);
387 (*swd
)->sw_tfm
= crypto_blkcipher_tfm(
388 crypto_alloc_blkcipher(algo
, 0, CRYPTO_ALG_ASYNC
));
390 if (!(*swd
)->sw_tfm
) {
391 dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
393 swcr_freesession(NULL
, i
);
398 dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
399 __FUNCTION__
, cri
->cri_klen
, (cri
->cri_klen
+ 7) / 8);
400 for (i
= 0; i
< (cri
->cri_klen
+ 7) / 8; i
++)
401 dprintk("%s0x%x", (i
% 8) ? " " : "\n ",
402 cri
->cri_key
[i
] & 0xff);
405 if ((*swd
)->sw_type
& SW_TYPE_ASYNC
) {
406 /* OCF doesn't enforce keys */
407 crypto_ablkcipher_set_flags(
408 __crypto_ablkcipher_cast((*swd
)->sw_tfm
),
409 CRYPTO_TFM_REQ_WEAK_KEY
);
410 error
= crypto_ablkcipher_setkey(
411 __crypto_ablkcipher_cast((*swd
)->sw_tfm
),
412 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
414 /* OCF doesn't enforce keys */
415 crypto_blkcipher_set_flags(
416 crypto_blkcipher_cast((*swd
)->sw_tfm
),
417 CRYPTO_TFM_REQ_WEAK_KEY
);
418 error
= crypto_blkcipher_setkey(
419 crypto_blkcipher_cast((*swd
)->sw_tfm
),
420 cri
->cri_key
, (cri
->cri_klen
+ 7) / 8);
423 printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error
,
424 (*swd
)->sw_tfm
->crt_flags
);
425 swcr_freesession(NULL
, i
);
428 } else if ((*swd
)->sw_type
& (SW_TYPE_HMAC
| SW_TYPE_HASH
)) {
429 dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__
,
432 /* try async first */
433 (*swd
)->sw_tfm
= swcr_no_ahash
? NULL
:
434 crypto_ahash_tfm(crypto_alloc_ahash(algo
, 0, 0));
435 if ((*swd
)->sw_tfm
) {
436 dprintk("%s %s hash is async\n", __FUNCTION__
, algo
);
437 (*swd
)->sw_type
|= SW_TYPE_ASYNC
;
439 dprintk("%s %s hash is sync\n", __FUNCTION__
, algo
);
440 (*swd
)->sw_tfm
= crypto_hash_tfm(
441 crypto_alloc_hash(algo
, 0, CRYPTO_ALG_ASYNC
));
444 if (!(*swd
)->sw_tfm
) {
445 dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
447 swcr_freesession(NULL
, i
);
451 (*swd
)->u
.hmac
.sw_klen
= (cri
->cri_klen
+ 7) / 8;
452 (*swd
)->u
.hmac
.sw_key
= (char *)kmalloc((*swd
)->u
.hmac
.sw_klen
,
454 if ((*swd
)->u
.hmac
.sw_key
== NULL
) {
455 swcr_freesession(NULL
, i
);
456 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
459 memcpy((*swd
)->u
.hmac
.sw_key
, cri
->cri_key
, (*swd
)->u
.hmac
.sw_klen
);
461 (*swd
)->u
.hmac
.sw_mlen
= cri
->cri_mlen
;
462 } else if ((*swd
)->sw_type
& SW_TYPE_ASYNC
) {
463 (*swd
)->u
.hmac
.sw_mlen
= crypto_ahash_digestsize(
464 __crypto_ahash_cast((*swd
)->sw_tfm
));
466 (*swd
)->u
.hmac
.sw_mlen
= crypto_hash_digestsize(
467 crypto_hash_cast((*swd
)->sw_tfm
));
469 } else if ((*swd
)->sw_type
& SW_TYPE_COMP
) {
470 (*swd
)->sw_tfm
= crypto_comp_tfm(
471 crypto_alloc_comp(algo
, 0, CRYPTO_ALG_ASYNC
));
472 if (!(*swd
)->sw_tfm
) {
473 dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
475 swcr_freesession(NULL
, i
);
478 (*swd
)->u
.sw_comp_buf
= kmalloc(CRYPTO_MAX_DATA_LEN
, SLAB_ATOMIC
);
479 if ((*swd
)->u
.sw_comp_buf
== NULL
) {
480 swcr_freesession(NULL
, i
);
481 dprintk("%s,%d: ENOBUFS\n", __FILE__
, __LINE__
);
485 printk("cryptosoft: Unhandled sw_type %d\n", (*swd
)->sw_type
);
486 swcr_freesession(NULL
, i
);
491 swd
= &((*swd
)->sw_next
);
500 swcr_freesession(device_t dev
, u_int64_t tid
)
502 struct swcr_data
*swd
;
503 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
505 dprintk("%s()\n", __FUNCTION__
);
506 if (sid
> swcr_sesnum
|| swcr_sessions
== NULL
||
507 swcr_sessions
[sid
] == NULL
) {
508 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
512 /* Silently accept and return */
516 while ((swd
= swcr_sessions
[sid
]) != NULL
) {
517 swcr_sessions
[sid
] = swd
->sw_next
;
519 switch (swd
->sw_type
& SW_TYPE_ALG_AMASK
) {
523 crypto_free_ahash(__crypto_ahash_cast(swd
->sw_tfm
));
526 #ifdef HAVE_ABLKCIPHER
527 case SW_TYPE_ABLKCIPHER
:
528 crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd
->sw_tfm
));
531 case SW_TYPE_BLKCIPHER
:
532 crypto_free_blkcipher(crypto_blkcipher_cast(swd
->sw_tfm
));
536 crypto_free_hash(crypto_hash_cast(swd
->sw_tfm
));
539 crypto_free_comp(crypto_comp_cast(swd
->sw_tfm
));
541 crypto_free_tfm(swd
->sw_tfm
);
546 if (swd
->sw_type
& SW_TYPE_COMP
) {
547 if (swd
->u
.sw_comp_buf
)
548 kfree(swd
->u
.sw_comp_buf
);
550 if (swd
->u
.hmac
.sw_key
)
551 kfree(swd
->u
.hmac
.sw_key
);
558 #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
559 /* older kernels had no async interface */
561 static void swcr_process_callback(struct crypto_async_request
*creq
, int err
)
563 struct swcr_req
*req
= creq
->data
;
565 dprintk("%s()\n", __FUNCTION__
);
567 if (err
== -EINPROGRESS
)
569 dprintk("%s() fail %d\n", __FUNCTION__
, -err
);
570 req
->crp
->crp_etype
= -err
;
574 switch (req
->sw
->sw_type
& SW_TYPE_ALG_AMASK
) {
577 crypto_copyback(req
->crp
->crp_flags
, req
->crp
->crp_buf
,
578 req
->crd
->crd_inject
, req
->sw
->u
.hmac
.sw_mlen
, req
->result
);
579 ahash_request_free(req
->crypto_req
);
581 case SW_TYPE_ABLKCIPHER
:
582 ablkcipher_request_free(req
->crypto_req
);
585 req
->crp
->crp_etype
= EINVAL
;
589 req
->crd
= req
->crd
->crd_next
;
591 swcr_process_req(req
);
596 dprintk("%s crypto_done %p\n", __FUNCTION__
, req
);
597 crypto_done(req
->crp
);
598 kmem_cache_free(swcr_req_cache
, req
);
600 #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
603 static void swcr_process_req(struct swcr_req
*req
)
605 struct swcr_data
*sw
;
606 struct cryptop
*crp
= req
->crp
;
607 struct cryptodesc
*crd
= req
->crd
;
608 struct sk_buff
*skb
= (struct sk_buff
*) crp
->crp_buf
;
609 struct uio
*uiop
= (struct uio
*) crp
->crp_buf
;
610 int sg_num
, sg_len
, skip
;
612 dprintk("%s()\n", __FUNCTION__
);
615 * Find the crypto context.
617 * XXX Note that the logic here prevents us from having
618 * XXX the same algorithm multiple times in a session
619 * XXX (or rather, we can but it won't give us the right
620 * XXX results). To do that, we'd need some way of differentiating
621 * XXX between the various instances of an algorithm (so we can
622 * XXX locate the correct crypto context).
624 for (sw
= req
->sw_head
; sw
&& sw
->sw_alg
!= crd
->crd_alg
; sw
= sw
->sw_next
)
627 /* No such context ? */
629 crp
->crp_etype
= EINVAL
;
630 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
635 skip
= crd
->crd_skip
;
638 * setup the SG list skip from the start of the buffer
640 memset(req
->sg
, 0, sizeof(req
->sg
));
641 sg_init_table(req
->sg
, SCATTERLIST_MAX
);
642 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
648 if (skip
< skb_headlen(skb
)) {
649 len
= skb_headlen(skb
) - skip
;
650 if (len
+ sg_len
> crd
->crd_len
)
651 len
= crd
->crd_len
- sg_len
;
652 sg_set_page(&req
->sg
[sg_num
],
653 virt_to_page(skb
->data
+ skip
), len
,
654 offset_in_page(skb
->data
+ skip
));
659 skip
-= skb_headlen(skb
);
661 for (i
= 0; sg_len
< crd
->crd_len
&&
662 i
< skb_shinfo(skb
)->nr_frags
&&
663 sg_num
< SCATTERLIST_MAX
; i
++) {
664 if (skip
< skb_shinfo(skb
)->frags
[i
].size
) {
665 len
= skb_shinfo(skb
)->frags
[i
].size
- skip
;
666 if (len
+ sg_len
> crd
->crd_len
)
667 len
= crd
->crd_len
- sg_len
;
668 sg_set_page(&req
->sg
[sg_num
],
669 skb_shinfo(skb
)->frags
[i
].page
,
671 skb_shinfo(skb
)->frags
[i
].page_offset
+ skip
);
676 skip
-= skb_shinfo(skb
)->frags
[i
].size
;
678 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
682 for (sg_num
= 0; sg_len
< crd
->crd_len
&&
683 sg_num
< uiop
->uio_iovcnt
&&
684 sg_num
< SCATTERLIST_MAX
; sg_num
++) {
685 if (skip
<= uiop
->uio_iov
[sg_num
].iov_len
) {
686 len
= uiop
->uio_iov
[sg_num
].iov_len
- skip
;
687 if (len
+ sg_len
> crd
->crd_len
)
688 len
= crd
->crd_len
- sg_len
;
689 sg_set_page(&req
->sg
[sg_num
],
690 virt_to_page(uiop
->uio_iov
[sg_num
].iov_base
+skip
),
692 offset_in_page(uiop
->uio_iov
[sg_num
].iov_base
+skip
));
696 skip
-= uiop
->uio_iov
[sg_num
].iov_len
;
699 sg_len
= (crp
->crp_ilen
- skip
);
700 if (sg_len
> crd
->crd_len
)
701 sg_len
= crd
->crd_len
;
702 sg_set_page(&req
->sg
[0], virt_to_page(crp
->crp_buf
+ skip
),
703 sg_len
, offset_in_page(crp
->crp_buf
+ skip
));
707 switch (sw
->sw_type
& SW_TYPE_ALG_AMASK
) {
715 /* check we have room for the result */
716 if (crp
->crp_ilen
- crd
->crd_inject
< sw
->u
.hmac
.sw_mlen
) {
717 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
718 "digestsize=%d\n", crp
->crp_ilen
, crd
->crd_skip
+ sg_len
,
719 crd
->crd_inject
, sw
->u
.hmac
.sw_mlen
);
720 crp
->crp_etype
= EINVAL
;
725 ahash_request_alloc(__crypto_ahash_cast(sw
->sw_tfm
),GFP_KERNEL
);
726 if (!req
->crypto_req
) {
727 crp
->crp_etype
= ENOMEM
;
728 dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__
, __LINE__
);
732 ahash_request_set_callback(req
->crypto_req
,
733 CRYPTO_TFM_REQ_MAY_BACKLOG
, swcr_process_callback
, req
);
735 memset(req
->result
, 0, sizeof(req
->result
));
737 if (sw
->sw_type
& SW_TYPE_AHMAC
)
738 crypto_ahash_setkey(__crypto_ahash_cast(sw
->sw_tfm
),
739 sw
->u
.hmac
.sw_key
, sw
->u
.hmac
.sw_klen
);
740 ahash_request_set_crypt(req
->crypto_req
, req
->sg
, req
->result
, sg_len
);
741 ret
= crypto_ahash_digest(req
->crypto_req
);
748 dprintk("hash OP %s %d\n", ret
? "failed" : "success", ret
);
749 crp
->crp_etype
= ret
;
750 ahash_request_free(req
->crypto_req
);
754 #endif /* HAVE_AHASH */
756 #ifdef HAVE_ABLKCIPHER
757 case SW_TYPE_ABLKCIPHER
: {
759 unsigned char *ivp
= req
->iv
;
761 crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw
->sw_tfm
));
763 if (sg_len
< crypto_ablkcipher_blocksize(
764 __crypto_ablkcipher_cast(sw
->sw_tfm
))) {
765 crp
->crp_etype
= EINVAL
;
766 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__
, __LINE__
,
767 sg_len
, crypto_ablkcipher_blocksize(
768 __crypto_ablkcipher_cast(sw
->sw_tfm
)));
772 if (ivsize
> sizeof(req
->iv
)) {
773 crp
->crp_etype
= EINVAL
;
774 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
778 req
->crypto_req
= ablkcipher_request_alloc(
779 __crypto_ablkcipher_cast(sw
->sw_tfm
), GFP_KERNEL
);
780 if (!req
->crypto_req
) {
781 crp
->crp_etype
= ENOMEM
;
782 dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
787 ablkcipher_request_set_callback(req
->crypto_req
,
788 CRYPTO_TFM_REQ_MAY_BACKLOG
, swcr_process_callback
, req
);
790 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
794 dprintk("%s key:", __FUNCTION__
);
795 for (i
= 0; i
< (crd
->crd_klen
+ 7) / 8; i
++)
796 dprintk("%s0x%x", (i
% 8) ? " " : "\n ",
797 crd
->crd_key
[i
] & 0xff);
800 /* OCF doesn't enforce keys */
801 crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw
->sw_tfm
),
802 CRYPTO_TFM_REQ_WEAK_KEY
);
803 error
= crypto_ablkcipher_setkey(
804 __crypto_ablkcipher_cast(sw
->sw_tfm
), crd
->crd_key
,
805 (crd
->crd_klen
+ 7) / 8);
807 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
808 error
, sw
->sw_tfm
->crt_flags
);
809 crp
->crp_etype
= -error
;
813 if (crd
->crd_flags
& CRD_F_ENCRYPT
) { /* encrypt */
815 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
818 get_random_bytes(ivp
, ivsize
);
820 * do we have to copy the IV back to the buffer ?
822 if ((crd
->crd_flags
& CRD_F_IV_PRESENT
) == 0) {
823 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
824 crd
->crd_inject
, ivsize
, (caddr_t
)ivp
);
826 ablkcipher_request_set_crypt(req
->crypto_req
, req
->sg
, req
->sg
,
828 ret
= crypto_ablkcipher_encrypt(req
->crypto_req
);
830 } else { /*decrypt */
832 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
835 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
836 crd
->crd_inject
, ivsize
, (caddr_t
)ivp
);
837 ablkcipher_request_set_crypt(req
->crypto_req
, req
->sg
, req
->sg
,
839 ret
= crypto_ablkcipher_decrypt(req
->crypto_req
);
848 dprintk("crypto OP %s %d\n", ret
? "failed" : "success", ret
);
849 crp
->crp_etype
= ret
;
853 #endif /* HAVE_ABLKCIPHER */
855 case SW_TYPE_BLKCIPHER
: {
856 unsigned char iv
[EALG_MAX_BLOCK_LEN
];
857 unsigned char *ivp
= iv
;
858 struct blkcipher_desc desc
;
859 int ivsize
= crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw
->sw_tfm
));
861 if (sg_len
< crypto_blkcipher_blocksize(
862 crypto_blkcipher_cast(sw
->sw_tfm
))) {
863 crp
->crp_etype
= EINVAL
;
864 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__
, __LINE__
,
865 sg_len
, crypto_blkcipher_blocksize(
866 crypto_blkcipher_cast(sw
->sw_tfm
)));
870 if (ivsize
> sizeof(iv
)) {
871 crp
->crp_etype
= EINVAL
;
872 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
876 if (crd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
880 dprintk("%s key:", __FUNCTION__
);
881 for (i
= 0; i
< (crd
->crd_klen
+ 7) / 8; i
++)
882 dprintk("%s0x%x", (i
% 8) ? " " : "\n ",
883 crd
->crd_key
[i
] & 0xff);
886 /* OCF doesn't enforce keys */
887 crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw
->sw_tfm
),
888 CRYPTO_TFM_REQ_WEAK_KEY
);
889 error
= crypto_blkcipher_setkey(
890 crypto_blkcipher_cast(sw
->sw_tfm
), crd
->crd_key
,
891 (crd
->crd_klen
+ 7) / 8);
893 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
894 error
, sw
->sw_tfm
->crt_flags
);
895 crp
->crp_etype
= -error
;
899 memset(&desc
, 0, sizeof(desc
));
900 desc
.tfm
= crypto_blkcipher_cast(sw
->sw_tfm
);
902 if (crd
->crd_flags
& CRD_F_ENCRYPT
) { /* encrypt */
904 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
) {
907 get_random_bytes(ivp
, ivsize
);
910 * do we have to copy the IV back to the buffer ?
912 if ((crd
->crd_flags
& CRD_F_IV_PRESENT
) == 0) {
913 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
914 crd
->crd_inject
, ivsize
, (caddr_t
)ivp
);
917 crypto_blkcipher_encrypt_iv(&desc
, req
->sg
, req
->sg
, sg_len
);
919 } else { /*decrypt */
921 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
) {
924 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
925 crd
->crd_inject
, ivsize
, (caddr_t
)ivp
);
928 crypto_blkcipher_decrypt_iv(&desc
, req
->sg
, req
->sg
, sg_len
);
935 char result
[HASH_MAX_LEN
];
936 struct hash_desc desc
;
938 /* check we have room for the result */
939 if (crp
->crp_ilen
- crd
->crd_inject
< sw
->u
.hmac
.sw_mlen
) {
940 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
941 "digestsize=%d\n", crp
->crp_ilen
, crd
->crd_skip
+ sg_len
,
942 crd
->crd_inject
, sw
->u
.hmac
.sw_mlen
);
943 crp
->crp_etype
= EINVAL
;
947 memset(&desc
, 0, sizeof(desc
));
948 desc
.tfm
= crypto_hash_cast(sw
->sw_tfm
);
950 memset(result
, 0, sizeof(result
));
952 if (sw
->sw_type
& SW_TYPE_HMAC
) {
953 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
954 crypto_hmac(sw
->sw_tfm
, sw
->u
.hmac
.sw_key
, &sw
->u
.hmac
.sw_klen
,
955 req
->sg
, sg_num
, result
);
957 crypto_hash_setkey(desc
.tfm
, sw
->u
.hmac
.sw_key
,
959 crypto_hash_digest(&desc
, req
->sg
, sg_len
, result
);
960 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
962 } else { /* SW_TYPE_HASH */
963 crypto_hash_digest(&desc
, req
->sg
, sg_len
, result
);
966 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
967 crd
->crd_inject
, sw
->u
.hmac
.sw_mlen
, result
);
973 void *obuf
= sw
->u
.sw_comp_buf
;
974 int ilen
= sg_len
, olen
= CRYPTO_MAX_DATA_LEN
;
978 * we need to use an additional copy if there is more than one
979 * input chunk since the kernel comp routines do not handle
980 * SG yet. Otherwise we just use the input buffer as is.
981 * Rather than allocate another buffer we just split the tmp
982 * buffer we already have.
983 * Perhaps we should just use zlib directly ?
989 for (blk
= 0; blk
< sg_num
; blk
++) {
990 memcpy(obuf
, sg_virt(&req
->sg
[blk
]),
991 req
->sg
[blk
].length
);
992 obuf
+= req
->sg
[blk
].length
;
996 ibuf
= sg_virt(&req
->sg
[0]);
998 if (crd
->crd_flags
& CRD_F_ENCRYPT
) { /* compress */
999 ret
= crypto_comp_compress(crypto_comp_cast(sw
->sw_tfm
),
1000 ibuf
, ilen
, obuf
, &olen
);
1001 if (!ret
&& olen
> crd
->crd_len
) {
1002 dprintk("cryptosoft: ERANGE compress %d into %d\n",
1003 crd
->crd_len
, olen
);
1004 if (swcr_fail_if_compression_grows
)
1007 } else { /* decompress */
1008 ret
= crypto_comp_decompress(crypto_comp_cast(sw
->sw_tfm
),
1009 ibuf
, ilen
, obuf
, &olen
);
1010 if (!ret
&& (olen
+ crd
->crd_inject
) > crp
->crp_olen
) {
1011 dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
1012 "space for %d,at offset %d\n",
1013 crd
->crd_len
, olen
, crp
->crp_olen
, crd
->crd_inject
);
1018 dprintk("%s,%d: ret = %d\n", __FILE__
, __LINE__
, ret
);
1021 * on success copy result back,
1022 * linux crpyto API returns -errno, we need to fix that
1024 crp
->crp_etype
= ret
< 0 ? -ret
: ret
;
1026 /* copy back the result and return it's size */
1027 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
1028 crd
->crd_inject
, olen
, obuf
);
1029 crp
->crp_olen
= olen
;
1036 /* Unknown/unsupported algorithm */
1037 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
1038 crp
->crp_etype
= EINVAL
;
1044 kmem_cache_free(swcr_req_cache
, req
);
1049 * Process a crypto request.
1052 swcr_process(device_t dev
, struct cryptop
*crp
, int hint
)
1054 struct swcr_req
*req
= NULL
;
1057 dprintk("%s()\n", __FUNCTION__
);
1060 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
1066 if (crp
->crp_desc
== NULL
|| crp
->crp_buf
== NULL
) {
1067 dprintk("%s,%d: EINVAL\n", __FILE__
, __LINE__
);
1068 crp
->crp_etype
= EINVAL
;
1072 lid
= crp
->crp_sid
& 0xffffffff;
1073 if (lid
>= swcr_sesnum
|| lid
== 0 || swcr_sessions
== NULL
||
1074 swcr_sessions
[lid
] == NULL
) {
1075 crp
->crp_etype
= ENOENT
;
1076 dprintk("%s,%d: ENOENT\n", __FILE__
, __LINE__
);
1081 * do some error checking outside of the loop for SKB and IOV processing
1082 * this leaves us with valid skb or uiop pointers for later
1084 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
1085 struct sk_buff
*skb
= (struct sk_buff
*) crp
->crp_buf
;
1086 if (skb_shinfo(skb
)->nr_frags
>= SCATTERLIST_MAX
) {
1087 printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__
, __LINE__
,
1088 skb_shinfo(skb
)->nr_frags
);
1091 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1092 struct uio
*uiop
= (struct uio
*) crp
->crp_buf
;
1093 if (uiop
->uio_iovcnt
> SCATTERLIST_MAX
) {
1094 printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__
, __LINE__
,
1101 * setup a new request ready for queuing
1103 req
= kmem_cache_alloc(swcr_req_cache
, SLAB_ATOMIC
);
1105 dprintk("%s,%d: ENOMEM\n", __FILE__
, __LINE__
);
1106 crp
->crp_etype
= ENOMEM
;
1109 memset(req
, 0, sizeof(*req
));
1111 req
->sw_head
= swcr_sessions
[lid
];
1113 req
->crd
= crp
->crp_desc
;
1115 swcr_process_req(req
);
1121 kmem_cache_free(swcr_req_cache
, req
);
1127 cryptosoft_init(void)
1129 int i
, sw_type
, mode
;
1132 dprintk("%s(%p)\n", __FUNCTION__
, cryptosoft_init
);
1134 swcr_req_cache
= kmem_cache_create("cryptosoft_req",
1135 sizeof(struct swcr_req
), 0, SLAB_HWCACHE_ALIGN
, NULL
1136 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1140 if (!swcr_req_cache
) {
1141 printk("cryptosoft: failed to create request cache\n");
1145 softc_device_init(&swcr_softc
, "cryptosoft", 0, swcr_methods
);
1147 swcr_id
= crypto_get_driverid(softc_get_device(&swcr_softc
),
1148 CRYPTOCAP_F_SOFTWARE
| CRYPTOCAP_F_SYNC
);
1150 printk("cryptosoft: Software crypto device cannot initialize!");
1154 #define REGISTER(alg) \
1155 crypto_register(swcr_id, alg, 0,0)
1157 for (i
= 0; i
< sizeof(crypto_details
)/sizeof(crypto_details
[0]); i
++) {
1160 algo
= crypto_details
[i
].alg_name
;
1161 if (!algo
|| !*algo
) {
1162 dprintk("%s:Algorithm %d not supported\n", __FUNCTION__
, i
);
1166 mode
= crypto_details
[i
].mode
;
1167 sw_type
= crypto_details
[i
].sw_type
;
1170 switch (sw_type
& SW_TYPE_ALG_MASK
) {
1171 case SW_TYPE_CIPHER
:
1172 found
= crypto_has_cipher(algo
, 0, CRYPTO_ALG_ASYNC
);
1175 found
= crypto_has_hash(algo
, 0, swcr_no_ahash
?CRYPTO_ALG_ASYNC
:0);
1178 found
= crypto_has_hash(algo
, 0, swcr_no_ahash
?CRYPTO_ALG_ASYNC
:0);
1181 found
= crypto_has_comp(algo
, 0, CRYPTO_ALG_ASYNC
);
1183 case SW_TYPE_BLKCIPHER
:
1184 found
= crypto_has_blkcipher(algo
, 0, CRYPTO_ALG_ASYNC
);
1185 if (!found
&& !swcr_no_ablk
)
1186 found
= crypto_has_ablkcipher(algo
, 0, 0);
1192 dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
1193 __FUNCTION__
, sw_type
, i
, algo
);
1200 cryptosoft_exit(void)
1202 dprintk("%s()\n", __FUNCTION__
);
1203 crypto_unregister_all(swcr_id
);
1205 kmem_cache_destroy(swcr_req_cache
);
1208 late_initcall(cryptosoft_init
);
1209 module_exit(cryptosoft_exit
);
1211 MODULE_LICENSE("Dual BSD/GPL");
1212 MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
1213 MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");