[rdc] add the prereq-check on lzma
[openwrt.git] / target / linux / generic-2.6 / files / crypto / ocf / safe / safe.c
1 /*-
2 * Linux port done by David McCullough <david_mccullough@securecomputing.com>
3 * Copyright (C) 2004-2007 David McCullough
4 * The license and original author are listed below.
5 *
6 * Copyright (c) 2003 Sam Leffler, Errno Consulting
7 * Copyright (c) 2003 Global Technology Associates, Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 __FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
32 */
33
34 #ifndef AUTOCONF_INCLUDED
35 #include <linux/config.h>
36 #endif
37 #include <linux/module.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/slab.h>
42 #include <linux/wait.h>
43 #include <linux/sched.h>
44 #include <linux/pci.h>
45 #include <linux/delay.h>
46 #include <linux/interrupt.h>
47 #include <linux/spinlock.h>
48 #include <linux/random.h>
49 #include <linux/version.h>
50 #include <linux/skbuff.h>
51 #include <asm/io.h>
52
53 /*
54 * SafeNet SafeXcel-1141 hardware crypto accelerator
55 */
56
57 #include <cryptodev.h>
58 #include <uio.h>
59 #include <safe/safereg.h>
60 #include <safe/safevar.h>
61
62 #if 1
63 #define DPRINTF(a) do { \
64 if (debug) { \
65 printk("%s: ", sc ? \
66 device_get_nameunit(sc->sc_dev) : "safe"); \
67 printk a; \
68 } \
69 } while (0)
70 #else
71 #define DPRINTF(a)
72 #endif
73
74 /*
75 * until we find a cleaner way, include the BSD md5/sha1 code
76 * here
77 */
78 #define HMAC_HACK 1
79 #ifdef HMAC_HACK
80 #define LITTLE_ENDIAN 1234
81 #define BIG_ENDIAN 4321
82 #ifdef __LITTLE_ENDIAN
83 #define BYTE_ORDER LITTLE_ENDIAN
84 #endif
85 #ifdef __BIG_ENDIAN
86 #define BYTE_ORDER BIG_ENDIAN
87 #endif
88 #include <safe/md5.h>
89 #include <safe/md5.c>
90 #include <safe/sha1.h>
91 #include <safe/sha1.c>
92
93 u_int8_t hmac_ipad_buffer[64] = {
94 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
95 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
96 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
97 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
98 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
99 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
100 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
101 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
102 };
103
104 u_int8_t hmac_opad_buffer[64] = {
105 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
106 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
107 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
108 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
109 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
110 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
111 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
112 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
113 };
114 #endif /* HMAC_HACK */
115
116 /* add proc entry for this */
117 struct safe_stats safestats;
118
119 #define debug safe_debug
120 int safe_debug = 0;
121 module_param(safe_debug, int, 0644);
122 MODULE_PARM_DESC(safe_debug, "Enable debug");
123
124 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
125 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
126 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
127 static void safe_rng_init(struct safe_softc *);
128 int safe_rngbufsize = 8; /* 32 bytes each read */
129 module_param(safe_rngbufsize, int, 0644);
130 MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
131 int safe_rngmaxalarm = 8; /* max alarms before reset */
132 module_param(safe_rngmaxalarm, int, 0644);
133 MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
134 #endif /* SAFE_NO_RNG */
135
136 static void safe_totalreset(struct safe_softc *sc);
137 static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
138 static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
139 static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
140 static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
141 static int safe_kstart(struct safe_softc *sc);
142 static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
143 static void safe_kfeed(struct safe_softc *sc);
144 static void safe_kpoll(unsigned long arg);
145 static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
146 u_int32_t len, struct crparam *n);
147
148 static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
149 static int safe_freesession(device_t, u_int64_t);
150 static int safe_process(device_t, struct cryptop *, int);
151
152 static device_method_t safe_methods = {
153 /* crypto device methods */
154 DEVMETHOD(cryptodev_newsession, safe_newsession),
155 DEVMETHOD(cryptodev_freesession,safe_freesession),
156 DEVMETHOD(cryptodev_process, safe_process),
157 DEVMETHOD(cryptodev_kprocess, safe_kprocess),
158 };
159
160 #define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
161 #define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
162
163 #define SAFE_MAX_CHIPS 8
164 static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
165
166 /*
167 * split our buffers up into safe DMAable byte fragments to avoid lockup
168 * bug in 1141 HW on rev 1.0.
169 */
170
171 static int
172 pci_map_linear(
173 struct safe_softc *sc,
174 struct safe_operand *buf,
175 void *addr,
176 int len)
177 {
178 dma_addr_t tmp;
179 int chunk, tlen = len;
180
181 tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
182
183 buf->mapsize += len;
184 while (len > 0) {
185 chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
186 buf->segs[buf->nsegs].ds_addr = tmp;
187 buf->segs[buf->nsegs].ds_len = chunk;
188 buf->segs[buf->nsegs].ds_tlen = tlen;
189 buf->nsegs++;
190 tmp += chunk;
191 len -= chunk;
192 tlen = 0;
193 }
194 return 0;
195 }
196
197 /*
198 * map in a given uio buffer (great on some arches :-)
199 */
200
201 static int
202 pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
203 {
204 struct iovec *iov = uio->uio_iov;
205 int n;
206
207 DPRINTF(("%s()\n", __FUNCTION__));
208
209 buf->mapsize = 0;
210 buf->nsegs = 0;
211
212 for (n = 0; n < uio->uio_iovcnt; n++) {
213 pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
214 iov++;
215 }
216
217 /* identify this buffer by the first segment */
218 buf->map = (void *) buf->segs[0].ds_addr;
219 return(0);
220 }
221
222 /*
223 * map in a given sk_buff
224 */
225
226 static int
227 pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
228 {
229 int i;
230
231 DPRINTF(("%s()\n", __FUNCTION__));
232
233 buf->mapsize = 0;
234 buf->nsegs = 0;
235
236 pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
237
238 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
239 pci_map_linear(sc, buf,
240 page_address(skb_shinfo(skb)->frags[i].page) +
241 skb_shinfo(skb)->frags[i].page_offset,
242 skb_shinfo(skb)->frags[i].size);
243 }
244
245 /* identify this buffer by the first segment */
246 buf->map = (void *) buf->segs[0].ds_addr;
247 return(0);
248 }
249
250
251 #if 0 /* not needed at this time */
252 static void
253 pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
254 {
255 int i;
256
257 DPRINTF(("%s()\n", __FUNCTION__));
258 for (i = 0; i < buf->nsegs; i++)
259 pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
260 buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
261 }
262 #endif
263
264 static void
265 pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
266 {
267 int i;
268 DPRINTF(("%s()\n", __FUNCTION__));
269 for (i = 0; i < buf->nsegs; i++) {
270 if (buf->segs[i].ds_tlen) {
271 DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
272 pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
273 buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
274 DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
275 }
276 buf->segs[i].ds_addr = 0;
277 buf->segs[i].ds_len = 0;
278 buf->segs[i].ds_tlen = 0;
279 }
280 buf->nsegs = 0;
281 buf->mapsize = 0;
282 buf->map = 0;
283 }
284
285
286 /*
287 * SafeXcel Interrupt routine
288 */
289 static irqreturn_t
290 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
291 safe_intr(int irq, void *arg)
292 #else
293 safe_intr(int irq, void *arg, struct pt_regs *regs)
294 #endif
295 {
296 struct safe_softc *sc = arg;
297 int stat;
298 unsigned long flags;
299
300 stat = READ_REG(sc, SAFE_HM_STAT);
301
302 DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
303
304 if (stat == 0) /* shared irq, not for us */
305 return IRQ_NONE;
306
307 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
308
309 if ((stat & SAFE_INT_PE_DDONE)) {
310 /*
311 * Descriptor(s) done; scan the ring and
312 * process completed operations.
313 */
314 spin_lock_irqsave(&sc->sc_ringmtx, flags);
315 while (sc->sc_back != sc->sc_front) {
316 struct safe_ringentry *re = sc->sc_back;
317
318 #ifdef SAFE_DEBUG
319 if (debug) {
320 safe_dump_ringstate(sc, __func__);
321 safe_dump_request(sc, __func__, re);
322 }
323 #endif
324 /*
325 * safe_process marks ring entries that were allocated
326 * but not used with a csr of zero. This insures the
327 * ring front pointer never needs to be set backwards
328 * in the event that an entry is allocated but not used
329 * because of a setup error.
330 */
331 DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
332 if (re->re_desc.d_csr != 0) {
333 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
334 DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
335 break;
336 }
337 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
338 DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
339 break;
340 }
341 sc->sc_nqchip--;
342 safe_callback(sc, re);
343 }
344 if (++(sc->sc_back) == sc->sc_ringtop)
345 sc->sc_back = sc->sc_ring;
346 }
347 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
348 }
349
350 /*
351 * Check to see if we got any DMA Error
352 */
353 if (stat & SAFE_INT_PE_ERROR) {
354 printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
355 (int)READ_REG(sc, SAFE_PE_DMASTAT));
356 safestats.st_dmaerr++;
357 safe_totalreset(sc);
358 #if 0
359 safe_feed(sc);
360 #endif
361 }
362
363 if (sc->sc_needwakeup) { /* XXX check high watermark */
364 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
365 DPRINTF(("%s: wakeup crypto %x\n", __func__,
366 sc->sc_needwakeup));
367 sc->sc_needwakeup &= ~wakeup;
368 crypto_unblock(sc->sc_cid, wakeup);
369 }
370
371 return IRQ_HANDLED;
372 }
373
374 /*
375 * safe_feed() - post a request to chip
376 */
377 static void
378 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
379 {
380 DPRINTF(("%s()\n", __FUNCTION__));
381 #ifdef SAFE_DEBUG
382 if (debug) {
383 safe_dump_ringstate(sc, __func__);
384 safe_dump_request(sc, __func__, re);
385 }
386 #endif
387 sc->sc_nqchip++;
388 if (sc->sc_nqchip > safestats.st_maxqchip)
389 safestats.st_maxqchip = sc->sc_nqchip;
390 /* poke h/w to check descriptor ring, any value can be written */
391 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
392 }
393
394 #define N(a) (sizeof(a) / sizeof (a[0]))
395 static void
396 safe_setup_enckey(struct safe_session *ses, caddr_t key)
397 {
398 int i;
399
400 bcopy(key, ses->ses_key, ses->ses_klen / 8);
401
402 /* PE is little-endian, insure proper byte order */
403 for (i = 0; i < N(ses->ses_key); i++)
404 ses->ses_key[i] = htole32(ses->ses_key[i]);
405 }
406
407 static void
408 safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
409 {
410 #ifdef HMAC_HACK
411 MD5_CTX md5ctx;
412 SHA1_CTX sha1ctx;
413 int i;
414
415
416 for (i = 0; i < klen; i++)
417 key[i] ^= HMAC_IPAD_VAL;
418
419 if (algo == CRYPTO_MD5_HMAC) {
420 MD5Init(&md5ctx);
421 MD5Update(&md5ctx, key, klen);
422 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
423 bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
424 } else {
425 SHA1Init(&sha1ctx);
426 SHA1Update(&sha1ctx, key, klen);
427 SHA1Update(&sha1ctx, hmac_ipad_buffer,
428 SHA1_HMAC_BLOCK_LEN - klen);
429 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
430 }
431
432 for (i = 0; i < klen; i++)
433 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
434
435 if (algo == CRYPTO_MD5_HMAC) {
436 MD5Init(&md5ctx);
437 MD5Update(&md5ctx, key, klen);
438 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
439 bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
440 } else {
441 SHA1Init(&sha1ctx);
442 SHA1Update(&sha1ctx, key, klen);
443 SHA1Update(&sha1ctx, hmac_opad_buffer,
444 SHA1_HMAC_BLOCK_LEN - klen);
445 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
446 }
447
448 for (i = 0; i < klen; i++)
449 key[i] ^= HMAC_OPAD_VAL;
450
451 #if 0
452 /*
453 * this code prevents SHA working on a BE host,
454 * so it is obviously wrong. I think the byte
455 * swap setup we do with the chip fixes this for us
456 */
457
458 /* PE is little-endian, insure proper byte order */
459 for (i = 0; i < N(ses->ses_hminner); i++) {
460 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
461 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
462 }
463 #endif
464 #else /* HMAC_HACK */
465 printk("safe: md5/sha not implemented\n");
466 #endif /* HMAC_HACK */
467 }
468 #undef N
469
470 /*
471 * Allocate a new 'session' and return an encoded session id. 'sidp'
472 * contains our registration id, and should contain an encoded session
473 * id on successful allocation.
474 */
475 static int
476 safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
477 {
478 struct safe_softc *sc = device_get_softc(dev);
479 struct cryptoini *c, *encini = NULL, *macini = NULL;
480 struct safe_session *ses = NULL;
481 int sesn;
482
483 DPRINTF(("%s()\n", __FUNCTION__));
484
485 if (sidp == NULL || cri == NULL || sc == NULL)
486 return (EINVAL);
487
488 for (c = cri; c != NULL; c = c->cri_next) {
489 if (c->cri_alg == CRYPTO_MD5_HMAC ||
490 c->cri_alg == CRYPTO_SHA1_HMAC ||
491 c->cri_alg == CRYPTO_NULL_HMAC) {
492 if (macini)
493 return (EINVAL);
494 macini = c;
495 } else if (c->cri_alg == CRYPTO_DES_CBC ||
496 c->cri_alg == CRYPTO_3DES_CBC ||
497 c->cri_alg == CRYPTO_AES_CBC ||
498 c->cri_alg == CRYPTO_NULL_CBC) {
499 if (encini)
500 return (EINVAL);
501 encini = c;
502 } else
503 return (EINVAL);
504 }
505 if (encini == NULL && macini == NULL)
506 return (EINVAL);
507 if (encini) { /* validate key length */
508 switch (encini->cri_alg) {
509 case CRYPTO_DES_CBC:
510 if (encini->cri_klen != 64)
511 return (EINVAL);
512 break;
513 case CRYPTO_3DES_CBC:
514 if (encini->cri_klen != 192)
515 return (EINVAL);
516 break;
517 case CRYPTO_AES_CBC:
518 if (encini->cri_klen != 128 &&
519 encini->cri_klen != 192 &&
520 encini->cri_klen != 256)
521 return (EINVAL);
522 break;
523 }
524 }
525
526 if (sc->sc_sessions == NULL) {
527 ses = sc->sc_sessions = (struct safe_session *)
528 kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
529 if (ses == NULL)
530 return (ENOMEM);
531 memset(ses, 0, sizeof(struct safe_session));
532 sesn = 0;
533 sc->sc_nsessions = 1;
534 } else {
535 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
536 if (sc->sc_sessions[sesn].ses_used == 0) {
537 ses = &sc->sc_sessions[sesn];
538 break;
539 }
540 }
541
542 if (ses == NULL) {
543 sesn = sc->sc_nsessions;
544 ses = (struct safe_session *)
545 kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
546 if (ses == NULL)
547 return (ENOMEM);
548 memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
549 bcopy(sc->sc_sessions, ses, sesn *
550 sizeof(struct safe_session));
551 bzero(sc->sc_sessions, sesn *
552 sizeof(struct safe_session));
553 kfree(sc->sc_sessions);
554 sc->sc_sessions = ses;
555 ses = &sc->sc_sessions[sesn];
556 sc->sc_nsessions++;
557 }
558 }
559
560 bzero(ses, sizeof(struct safe_session));
561 ses->ses_used = 1;
562
563 if (encini) {
564 /* get an IV */
565 /* XXX may read fewer than requested */
566 read_random(ses->ses_iv, sizeof(ses->ses_iv));
567
568 ses->ses_klen = encini->cri_klen;
569 if (encini->cri_key != NULL)
570 safe_setup_enckey(ses, encini->cri_key);
571 }
572
573 if (macini) {
574 ses->ses_mlen = macini->cri_mlen;
575 if (ses->ses_mlen == 0) {
576 if (macini->cri_alg == CRYPTO_MD5_HMAC)
577 ses->ses_mlen = MD5_HASH_LEN;
578 else
579 ses->ses_mlen = SHA1_HASH_LEN;
580 }
581
582 if (macini->cri_key != NULL) {
583 safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
584 macini->cri_klen / 8);
585 }
586 }
587
588 *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
589 return (0);
590 }
591
592 /*
593 * Deallocate a session.
594 */
595 static int
596 safe_freesession(device_t dev, u_int64_t tid)
597 {
598 struct safe_softc *sc = device_get_softc(dev);
599 int session, ret;
600 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
601
602 DPRINTF(("%s()\n", __FUNCTION__));
603
604 if (sc == NULL)
605 return (EINVAL);
606
607 session = SAFE_SESSION(sid);
608 if (session < sc->sc_nsessions) {
609 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
610 ret = 0;
611 } else
612 ret = EINVAL;
613 return (ret);
614 }
615
616
617 static int
618 safe_process(device_t dev, struct cryptop *crp, int hint)
619 {
620 struct safe_softc *sc = device_get_softc(dev);
621 int err = 0, i, nicealign, uniform;
622 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
623 int bypass, oplen, ivsize;
624 caddr_t iv;
625 int16_t coffset;
626 struct safe_session *ses;
627 struct safe_ringentry *re;
628 struct safe_sarec *sa;
629 struct safe_pdesc *pd;
630 u_int32_t cmd0, cmd1, staterec;
631 unsigned long flags;
632
633 DPRINTF(("%s()\n", __FUNCTION__));
634
635 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
636 safestats.st_invalid++;
637 return (EINVAL);
638 }
639 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
640 safestats.st_badsession++;
641 return (EINVAL);
642 }
643
644 spin_lock_irqsave(&sc->sc_ringmtx, flags);
645 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
646 safestats.st_ringfull++;
647 sc->sc_needwakeup |= CRYPTO_SYMQ;
648 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
649 return (ERESTART);
650 }
651 re = sc->sc_front;
652
653 staterec = re->re_sa.sa_staterec; /* save */
654 /* NB: zero everything but the PE descriptor */
655 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
656 re->re_sa.sa_staterec = staterec; /* restore */
657
658 re->re_crp = crp;
659 re->re_sesn = SAFE_SESSION(crp->crp_sid);
660
661 re->re_src.nsegs = 0;
662 re->re_dst.nsegs = 0;
663
664 if (crp->crp_flags & CRYPTO_F_SKBUF) {
665 re->re_src_skb = (struct sk_buff *)crp->crp_buf;
666 re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
667 } else if (crp->crp_flags & CRYPTO_F_IOV) {
668 re->re_src_io = (struct uio *)crp->crp_buf;
669 re->re_dst_io = (struct uio *)crp->crp_buf;
670 } else {
671 safestats.st_badflags++;
672 err = EINVAL;
673 goto errout; /* XXX we don't handle contiguous blocks! */
674 }
675
676 sa = &re->re_sa;
677 ses = &sc->sc_sessions[re->re_sesn];
678
679 crd1 = crp->crp_desc;
680 if (crd1 == NULL) {
681 safestats.st_nodesc++;
682 err = EINVAL;
683 goto errout;
684 }
685 crd2 = crd1->crd_next;
686
687 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
688 cmd1 = 0;
689 if (crd2 == NULL) {
690 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
691 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
692 crd1->crd_alg == CRYPTO_NULL_HMAC) {
693 maccrd = crd1;
694 enccrd = NULL;
695 cmd0 |= SAFE_SA_CMD0_OP_HASH;
696 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
697 crd1->crd_alg == CRYPTO_3DES_CBC ||
698 crd1->crd_alg == CRYPTO_AES_CBC ||
699 crd1->crd_alg == CRYPTO_NULL_CBC) {
700 maccrd = NULL;
701 enccrd = crd1;
702 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
703 } else {
704 safestats.st_badalg++;
705 err = EINVAL;
706 goto errout;
707 }
708 } else {
709 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
710 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
711 crd1->crd_alg == CRYPTO_NULL_HMAC) &&
712 (crd2->crd_alg == CRYPTO_DES_CBC ||
713 crd2->crd_alg == CRYPTO_3DES_CBC ||
714 crd2->crd_alg == CRYPTO_AES_CBC ||
715 crd2->crd_alg == CRYPTO_NULL_CBC) &&
716 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
717 maccrd = crd1;
718 enccrd = crd2;
719 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
720 crd1->crd_alg == CRYPTO_3DES_CBC ||
721 crd1->crd_alg == CRYPTO_AES_CBC ||
722 crd1->crd_alg == CRYPTO_NULL_CBC) &&
723 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
724 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
725 crd2->crd_alg == CRYPTO_NULL_HMAC) &&
726 (crd1->crd_flags & CRD_F_ENCRYPT)) {
727 enccrd = crd1;
728 maccrd = crd2;
729 } else {
730 safestats.st_badalg++;
731 err = EINVAL;
732 goto errout;
733 }
734 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
735 }
736
737 if (enccrd) {
738 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
739 safe_setup_enckey(ses, enccrd->crd_key);
740
741 if (enccrd->crd_alg == CRYPTO_DES_CBC) {
742 cmd0 |= SAFE_SA_CMD0_DES;
743 cmd1 |= SAFE_SA_CMD1_CBC;
744 ivsize = 2*sizeof(u_int32_t);
745 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
746 cmd0 |= SAFE_SA_CMD0_3DES;
747 cmd1 |= SAFE_SA_CMD1_CBC;
748 ivsize = 2*sizeof(u_int32_t);
749 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
750 cmd0 |= SAFE_SA_CMD0_AES;
751 cmd1 |= SAFE_SA_CMD1_CBC;
752 if (ses->ses_klen == 128)
753 cmd1 |= SAFE_SA_CMD1_AES128;
754 else if (ses->ses_klen == 192)
755 cmd1 |= SAFE_SA_CMD1_AES192;
756 else
757 cmd1 |= SAFE_SA_CMD1_AES256;
758 ivsize = 4*sizeof(u_int32_t);
759 } else {
760 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
761 ivsize = 0;
762 }
763
764 /*
765 * Setup encrypt/decrypt state. When using basic ops
766 * we can't use an inline IV because hash/crypt offset
767 * must be from the end of the IV to the start of the
768 * crypt data and this leaves out the preceding header
769 * from the hash calculation. Instead we place the IV
770 * in the state record and set the hash/crypt offset to
771 * copy both the header+IV.
772 */
773 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
774 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
775
776 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
777 iv = enccrd->crd_iv;
778 else
779 iv = (caddr_t) ses->ses_iv;
780 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
781 crypto_copyback(crp->crp_flags, crp->crp_buf,
782 enccrd->crd_inject, ivsize, iv);
783 }
784 bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
785 /* make iv LE */
786 for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
787 re->re_sastate.sa_saved_iv[i] =
788 cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
789 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
790 re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
791 } else {
792 cmd0 |= SAFE_SA_CMD0_INBOUND;
793
794 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
795 bcopy(enccrd->crd_iv,
796 re->re_sastate.sa_saved_iv, ivsize);
797 } else {
798 crypto_copydata(crp->crp_flags, crp->crp_buf,
799 enccrd->crd_inject, ivsize,
800 (caddr_t)re->re_sastate.sa_saved_iv);
801 }
802 /* make iv LE */
803 for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
804 re->re_sastate.sa_saved_iv[i] =
805 cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
806 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
807 }
808 /*
809 * For basic encryption use the zero pad algorithm.
810 * This pads results to an 8-byte boundary and
811 * suppresses padding verification for inbound (i.e.
812 * decrypt) operations.
813 *
814 * NB: Not sure if the 8-byte pad boundary is a problem.
815 */
816 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
817
818 /* XXX assert key bufs have the same size */
819 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
820 }
821
822 if (maccrd) {
823 if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
824 safe_setup_mackey(ses, maccrd->crd_alg,
825 maccrd->crd_key, maccrd->crd_klen / 8);
826 }
827
828 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
829 cmd0 |= SAFE_SA_CMD0_MD5;
830 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
831 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
832 cmd0 |= SAFE_SA_CMD0_SHA1;
833 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
834 } else {
835 cmd0 |= SAFE_SA_CMD0_HASH_NULL;
836 }
837 /*
838 * Digest data is loaded from the SA and the hash
839 * result is saved to the state block where we
840 * retrieve it for return to the caller.
841 */
842 /* XXX assert digest bufs have the same size */
843 bcopy(ses->ses_hminner, sa->sa_indigest,
844 sizeof(sa->sa_indigest));
845 bcopy(ses->ses_hmouter, sa->sa_outdigest,
846 sizeof(sa->sa_outdigest));
847
848 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
849 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
850 }
851
852 if (enccrd && maccrd) {
853 /*
854 * The offset from hash data to the start of
855 * crypt data is the difference in the skips.
856 */
857 bypass = maccrd->crd_skip;
858 coffset = enccrd->crd_skip - maccrd->crd_skip;
859 if (coffset < 0) {
860 DPRINTF(("%s: hash does not precede crypt; "
861 "mac skip %u enc skip %u\n",
862 __func__, maccrd->crd_skip, enccrd->crd_skip));
863 safestats.st_skipmismatch++;
864 err = EINVAL;
865 goto errout;
866 }
867 oplen = enccrd->crd_skip + enccrd->crd_len;
868 if (maccrd->crd_skip + maccrd->crd_len != oplen) {
869 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
870 __func__, maccrd->crd_skip + maccrd->crd_len,
871 oplen));
872 safestats.st_lenmismatch++;
873 err = EINVAL;
874 goto errout;
875 }
876 #ifdef SAFE_DEBUG
877 if (debug) {
878 printf("mac: skip %d, len %d, inject %d\n",
879 maccrd->crd_skip, maccrd->crd_len,
880 maccrd->crd_inject);
881 printf("enc: skip %d, len %d, inject %d\n",
882 enccrd->crd_skip, enccrd->crd_len,
883 enccrd->crd_inject);
884 printf("bypass %d coffset %d oplen %d\n",
885 bypass, coffset, oplen);
886 }
887 #endif
888 if (coffset & 3) { /* offset must be 32-bit aligned */
889 DPRINTF(("%s: coffset %u misaligned\n",
890 __func__, coffset));
891 safestats.st_coffmisaligned++;
892 err = EINVAL;
893 goto errout;
894 }
895 coffset >>= 2;
896 if (coffset > 255) { /* offset must be <256 dwords */
897 DPRINTF(("%s: coffset %u too big\n",
898 __func__, coffset));
899 safestats.st_cofftoobig++;
900 err = EINVAL;
901 goto errout;
902 }
903 /*
904 * Tell the hardware to copy the header to the output.
905 * The header is defined as the data from the end of
906 * the bypass to the start of data to be encrypted.
907 * Typically this is the inline IV. Note that you need
908 * to do this even if src+dst are the same; it appears
909 * that w/o this bit the crypted data is written
910 * immediately after the bypass data.
911 */
912 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
913 /*
914 * Disable IP header mutable bit handling. This is
915 * needed to get correct HMAC calculations.
916 */
917 cmd1 |= SAFE_SA_CMD1_MUTABLE;
918 } else {
919 if (enccrd) {
920 bypass = enccrd->crd_skip;
921 oplen = bypass + enccrd->crd_len;
922 } else {
923 bypass = maccrd->crd_skip;
924 oplen = bypass + maccrd->crd_len;
925 }
926 coffset = 0;
927 }
928 /* XXX verify multiple of 4 when using s/g */
929 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
930 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
931 safestats.st_bypasstoobig++;
932 err = EINVAL;
933 goto errout;
934 }
935
936 if (crp->crp_flags & CRYPTO_F_SKBUF) {
937 if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
938 safestats.st_noload++;
939 err = ENOMEM;
940 goto errout;
941 }
942 } else if (crp->crp_flags & CRYPTO_F_IOV) {
943 if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
944 safestats.st_noload++;
945 err = ENOMEM;
946 goto errout;
947 }
948 }
949 nicealign = safe_dmamap_aligned(sc, &re->re_src);
950 uniform = safe_dmamap_uniform(sc, &re->re_src);
951
952 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
953 nicealign, uniform, re->re_src.nsegs));
954 if (re->re_src.nsegs > 1) {
955 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
956 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
957 for (i = 0; i < re->re_src_nsegs; i++) {
958 /* NB: no need to check if there's space */
959 pd = sc->sc_spfree;
960 if (++(sc->sc_spfree) == sc->sc_springtop)
961 sc->sc_spfree = sc->sc_spring;
962
963 KASSERT((pd->pd_flags&3) == 0 ||
964 (pd->pd_flags&3) == SAFE_PD_DONE,
965 ("bogus source particle descriptor; flags %x",
966 pd->pd_flags));
967 pd->pd_addr = re->re_src_segs[i].ds_addr;
968 pd->pd_size = re->re_src_segs[i].ds_len;
969 pd->pd_flags = SAFE_PD_READY;
970 }
971 cmd0 |= SAFE_SA_CMD0_IGATHER;
972 } else {
973 /*
974 * No need for gather, reference the operand directly.
975 */
976 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
977 }
978
979 if (enccrd == NULL && maccrd != NULL) {
980 /*
981 * Hash op; no destination needed.
982 */
983 } else {
984 if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
985 if (!nicealign) {
986 safestats.st_iovmisaligned++;
987 err = EINVAL;
988 goto errout;
989 }
990 if (uniform != 1) {
991 device_printf(sc->sc_dev, "!uniform source\n");
992 if (!uniform) {
993 /*
994 * There's no way to handle the DMA
995 * requirements with this uio. We
996 * could create a separate DMA area for
997 * the result and then copy it back,
998 * but for now we just bail and return
999 * an error. Note that uio requests
1000 * > SAFE_MAX_DSIZE are handled because
1001 * the DMA map and segment list for the
1002 * destination wil result in a
1003 * destination particle list that does
1004 * the necessary scatter DMA.
1005 */
1006 safestats.st_iovnotuniform++;
1007 err = EINVAL;
1008 goto errout;
1009 }
1010 } else
1011 re->re_dst = re->re_src;
1012 } else {
1013 safestats.st_badflags++;
1014 err = EINVAL;
1015 goto errout;
1016 }
1017
1018 if (re->re_dst.nsegs > 1) {
1019 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
1020 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
1021 for (i = 0; i < re->re_dst_nsegs; i++) {
1022 pd = sc->sc_dpfree;
1023 KASSERT((pd->pd_flags&3) == 0 ||
1024 (pd->pd_flags&3) == SAFE_PD_DONE,
1025 ("bogus dest particle descriptor; flags %x",
1026 pd->pd_flags));
1027 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
1028 sc->sc_dpfree = sc->sc_dpring;
1029 pd->pd_addr = re->re_dst_segs[i].ds_addr;
1030 pd->pd_flags = SAFE_PD_READY;
1031 }
1032 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1033 } else {
1034 /*
1035 * No need for scatter, reference the operand directly.
1036 */
1037 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1038 }
1039 }
1040
1041 /*
1042 * All done with setup; fillin the SA command words
1043 * and the packet engine descriptor. The operation
1044 * is now ready for submission to the hardware.
1045 */
1046 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1047 sa->sa_cmd1 = cmd1
1048 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1049 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1050 | SAFE_SA_CMD1_SRPCI
1051 ;
1052 /*
1053 * NB: the order of writes is important here. In case the
1054 * chip is scanning the ring because of an outstanding request
1055 * it might nab this one too. In that case we need to make
1056 * sure the setup is complete before we write the length
1057 * field of the descriptor as it signals the descriptor is
1058 * ready for processing.
1059 */
1060 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1061 if (maccrd)
1062 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1063 wmb();
1064 re->re_desc.d_len = oplen
1065 | SAFE_PE_LEN_READY
1066 | (bypass << SAFE_PE_LEN_BYPASS_S)
1067 ;
1068
1069 safestats.st_ipackets++;
1070 safestats.st_ibytes += oplen;
1071
1072 if (++(sc->sc_front) == sc->sc_ringtop)
1073 sc->sc_front = sc->sc_ring;
1074
1075 /* XXX honor batching */
1076 safe_feed(sc, re);
1077 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1078 return (0);
1079
1080 errout:
1081 if (re->re_src.map != re->re_dst.map)
1082 pci_unmap_operand(sc, &re->re_dst);
1083 if (re->re_src.map)
1084 pci_unmap_operand(sc, &re->re_src);
1085 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1086 if (err != ERESTART) {
1087 crp->crp_etype = err;
1088 crypto_done(crp);
1089 } else {
1090 sc->sc_needwakeup |= CRYPTO_SYMQ;
1091 }
1092 return (err);
1093 }
1094
1095 static void
1096 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1097 {
1098 struct cryptop *crp = (struct cryptop *)re->re_crp;
1099 struct cryptodesc *crd;
1100
1101 DPRINTF(("%s()\n", __FUNCTION__));
1102
1103 safestats.st_opackets++;
1104 safestats.st_obytes += re->re_dst.mapsize;
1105
1106 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1107 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1108 re->re_desc.d_csr,
1109 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1110 safestats.st_peoperr++;
1111 crp->crp_etype = EIO; /* something more meaningful? */
1112 }
1113
1114 if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
1115 pci_unmap_operand(sc, &re->re_dst);
1116 pci_unmap_operand(sc, &re->re_src);
1117
1118 /*
1119 * If result was written to a differet mbuf chain, swap
1120 * it in as the return value and reclaim the original.
1121 */
1122 if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
1123 device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
1124 /* kfree_skb(skb) */
1125 /* crp->crp_buf = (caddr_t)re->re_dst_skb */
1126 return;
1127 }
1128
1129 if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
1130 /* copy out IV for future use */
1131 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1132 int i;
1133 int ivsize;
1134
1135 if (crd->crd_alg == CRYPTO_DES_CBC ||
1136 crd->crd_alg == CRYPTO_3DES_CBC) {
1137 ivsize = 2*sizeof(u_int32_t);
1138 } else if (crd->crd_alg == CRYPTO_AES_CBC) {
1139 ivsize = 4*sizeof(u_int32_t);
1140 } else
1141 continue;
1142 crypto_copydata(crp->crp_flags, crp->crp_buf,
1143 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1144 (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
1145 for (i = 0;
1146 i < ivsize/sizeof(sc->sc_sessions[re->re_sesn].ses_iv[0]);
1147 i++)
1148 sc->sc_sessions[re->re_sesn].ses_iv[i] =
1149 cpu_to_le32(sc->sc_sessions[re->re_sesn].ses_iv[i]);
1150 break;
1151 }
1152 }
1153
1154 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1155 /* copy out ICV result */
1156 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1157 if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
1158 crd->crd_alg == CRYPTO_SHA1_HMAC ||
1159 crd->crd_alg == CRYPTO_NULL_HMAC))
1160 continue;
1161 if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
1162 /*
1163 * SHA-1 ICV's are byte-swapped; fix 'em up
1164 * before copy them to their destination.
1165 */
1166 re->re_sastate.sa_saved_indigest[0] =
1167 cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
1168 re->re_sastate.sa_saved_indigest[1] =
1169 cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
1170 re->re_sastate.sa_saved_indigest[2] =
1171 cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
1172 } else {
1173 re->re_sastate.sa_saved_indigest[0] =
1174 cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
1175 re->re_sastate.sa_saved_indigest[1] =
1176 cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
1177 re->re_sastate.sa_saved_indigest[2] =
1178 cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
1179 }
1180 crypto_copyback(crp->crp_flags, crp->crp_buf,
1181 crd->crd_inject,
1182 sc->sc_sessions[re->re_sesn].ses_mlen,
1183 (caddr_t)re->re_sastate.sa_saved_indigest);
1184 break;
1185 }
1186 }
1187 crypto_done(crp);
1188 }
1189
1190
1191 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
1192 #define SAFE_RNG_MAXWAIT 1000
1193
1194 static void
1195 safe_rng_init(struct safe_softc *sc)
1196 {
1197 u_int32_t w, v;
1198 int i;
1199
1200 DPRINTF(("%s()\n", __FUNCTION__));
1201
1202 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1203 /* use default value according to the manual */
1204 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1205 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1206
1207 /*
1208 * There is a bug in rev 1.0 of the 1140 that when the RNG
1209 * is brought out of reset the ready status flag does not
1210 * work until the RNG has finished its internal initialization.
1211 *
1212 * So in order to determine the device is through its
1213 * initialization we must read the data register, using the
1214 * status reg in the read in case it is initialized. Then read
1215 * the data register until it changes from the first read.
1216 * Once it changes read the data register until it changes
1217 * again. At this time the RNG is considered initialized.
1218 * This could take between 750ms - 1000ms in time.
1219 */
1220 i = 0;
1221 w = READ_REG(sc, SAFE_RNG_OUT);
1222 do {
1223 v = READ_REG(sc, SAFE_RNG_OUT);
1224 if (v != w) {
1225 w = v;
1226 break;
1227 }
1228 DELAY(10);
1229 } while (++i < SAFE_RNG_MAXWAIT);
1230
1231 /* Wait Until data changes again */
1232 i = 0;
1233 do {
1234 v = READ_REG(sc, SAFE_RNG_OUT);
1235 if (v != w)
1236 break;
1237 DELAY(10);
1238 } while (++i < SAFE_RNG_MAXWAIT);
1239 }
1240
1241 static __inline void
1242 safe_rng_disable_short_cycle(struct safe_softc *sc)
1243 {
1244 DPRINTF(("%s()\n", __FUNCTION__));
1245
1246 WRITE_REG(sc, SAFE_RNG_CTRL,
1247 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1248 }
1249
1250 static __inline void
1251 safe_rng_enable_short_cycle(struct safe_softc *sc)
1252 {
1253 DPRINTF(("%s()\n", __FUNCTION__));
1254
1255 WRITE_REG(sc, SAFE_RNG_CTRL,
1256 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1257 }
1258
1259 static __inline u_int32_t
1260 safe_rng_read(struct safe_softc *sc)
1261 {
1262 int i;
1263
1264 i = 0;
1265 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1266 ;
1267 return READ_REG(sc, SAFE_RNG_OUT);
1268 }
1269
1270 static int
1271 safe_read_random(void *arg, u_int32_t *buf, int maxwords)
1272 {
1273 struct safe_softc *sc = (struct safe_softc *) arg;
1274 int i, rc;
1275
1276 DPRINTF(("%s()\n", __FUNCTION__));
1277
1278 safestats.st_rng++;
1279 /*
1280 * Fetch the next block of data.
1281 */
1282 if (maxwords > safe_rngbufsize)
1283 maxwords = safe_rngbufsize;
1284 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1285 maxwords = SAFE_RNG_MAXBUFSIZ;
1286 retry:
1287 /* read as much as we can */
1288 for (rc = 0; rc < maxwords; rc++) {
1289 if (READ_REG(sc, SAFE_RNG_STAT) != 0)
1290 break;
1291 buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
1292 }
1293 if (rc == 0)
1294 return 0;
1295 /*
1296 * Check the comparator alarm count and reset the h/w if
1297 * it exceeds our threshold. This guards against the
1298 * hardware oscillators resonating with external signals.
1299 */
1300 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1301 u_int32_t freq_inc, w;
1302
1303 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1304 (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1305 safestats.st_rngalarm++;
1306 safe_rng_enable_short_cycle(sc);
1307 freq_inc = 18;
1308 for (i = 0; i < 64; i++) {
1309 w = READ_REG(sc, SAFE_RNG_CNFG);
1310 freq_inc = ((w + freq_inc) & 0x3fL);
1311 w = ((w & ~0x3fL) | freq_inc);
1312 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1313
1314 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1315
1316 (void) safe_rng_read(sc);
1317 DELAY(25);
1318
1319 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1320 safe_rng_disable_short_cycle(sc);
1321 goto retry;
1322 }
1323 freq_inc = 1;
1324 }
1325 safe_rng_disable_short_cycle(sc);
1326 } else
1327 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1328
1329 return(rc);
1330 }
1331 #endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
1332
1333
1334 /*
1335 * Resets the board. Values in the regesters are left as is
1336 * from the reset (i.e. initial values are assigned elsewhere).
1337 */
1338 static void
1339 safe_reset_board(struct safe_softc *sc)
1340 {
1341 u_int32_t v;
1342 /*
1343 * Reset the device. The manual says no delay
1344 * is needed between marking and clearing reset.
1345 */
1346 DPRINTF(("%s()\n", __FUNCTION__));
1347
1348 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1349 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1350 SAFE_PE_DMACFG_SGRESET);
1351 WRITE_REG(sc, SAFE_PE_DMACFG, v
1352 | SAFE_PE_DMACFG_PERESET
1353 | SAFE_PE_DMACFG_PDRRESET
1354 | SAFE_PE_DMACFG_SGRESET);
1355 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1356 }
1357
1358 /*
1359 * Initialize registers we need to touch only once.
1360 */
1361 static void
1362 safe_init_board(struct safe_softc *sc)
1363 {
1364 u_int32_t v, dwords;
1365
1366 DPRINTF(("%s()\n", __FUNCTION__));
1367
1368 v = READ_REG(sc, SAFE_PE_DMACFG);
1369 v &=~ ( SAFE_PE_DMACFG_PEMODE
1370 | SAFE_PE_DMACFG_FSENA /* failsafe enable */
1371 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1372 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1373 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1374 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1375 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1376 | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
1377 );
1378 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1379 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1380 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1381 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1382 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1383 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1384 #if 0
1385 | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
1386 #endif
1387 ;
1388 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1389
1390 #ifdef __BIG_ENDIAN
1391 /* tell the safenet that we are 4321 and not 1234 */
1392 WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
1393 #endif
1394
1395 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1396 /*
1397 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1398 * "target mode transfers" done while the chip is DMA'ing
1399 * >1020 bytes cause the hardware to lockup. To avoid this
1400 * we reduce the max PCI transfer size and use small source
1401 * particle descriptors (<= 256 bytes).
1402 */
1403 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1404 device_printf(sc->sc_dev,
1405 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1406 (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
1407 (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
1408 (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
1409 sc->sc_max_dsize = 256;
1410 } else {
1411 sc->sc_max_dsize = SAFE_MAX_DSIZE;
1412 }
1413
1414 /* NB: operands+results are overlaid */
1415 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1416 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1417 /*
1418 * Configure ring entry size and number of items in the ring.
1419 */
1420 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1421 ("PE ring entry not 32-bit aligned!"));
1422 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1423 WRITE_REG(sc, SAFE_PE_RINGCFG,
1424 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1425 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1426
1427 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1428 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1429 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1430 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1431 /*
1432 * NB: destination particles are fixed size. We use
1433 * an mbuf cluster and require all results go to
1434 * clusters or smaller.
1435 */
1436 WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
1437
1438 /* it's now safe to enable PE mode, do it */
1439 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1440
1441 /*
1442 * Configure hardware to use level-triggered interrupts and
1443 * to interrupt after each descriptor is processed.
1444 */
1445 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1446 WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
1447 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1448 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1449 }
1450
1451
1452 /*
1453 * Clean up after a chip crash.
1454 * It is assumed that the caller in splimp()
1455 */
1456 static void
1457 safe_cleanchip(struct safe_softc *sc)
1458 {
1459 DPRINTF(("%s()\n", __FUNCTION__));
1460
1461 if (sc->sc_nqchip != 0) {
1462 struct safe_ringentry *re = sc->sc_back;
1463
1464 while (re != sc->sc_front) {
1465 if (re->re_desc.d_csr != 0)
1466 safe_free_entry(sc, re);
1467 if (++re == sc->sc_ringtop)
1468 re = sc->sc_ring;
1469 }
1470 sc->sc_back = re;
1471 sc->sc_nqchip = 0;
1472 }
1473 }
1474
1475 /*
1476 * free a safe_q
1477 * It is assumed that the caller is within splimp().
1478 */
1479 static int
1480 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
1481 {
1482 struct cryptop *crp;
1483
1484 DPRINTF(("%s()\n", __FUNCTION__));
1485
1486 /*
1487 * Free header MCR
1488 */
1489 if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
1490 #ifdef NOTYET
1491 m_freem(re->re_dst_m);
1492 #else
1493 printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
1494 #endif
1495
1496 crp = (struct cryptop *)re->re_crp;
1497
1498 re->re_desc.d_csr = 0;
1499
1500 crp->crp_etype = EFAULT;
1501 crypto_done(crp);
1502 return(0);
1503 }
1504
1505 /*
1506 * Routine to reset the chip and clean up.
1507 * It is assumed that the caller is in splimp()
1508 */
1509 static void
1510 safe_totalreset(struct safe_softc *sc)
1511 {
1512 DPRINTF(("%s()\n", __FUNCTION__));
1513
1514 safe_reset_board(sc);
1515 safe_init_board(sc);
1516 safe_cleanchip(sc);
1517 }
1518
1519 /*
1520 * Is the operand suitable aligned for direct DMA. Each
1521 * segment must be aligned on a 32-bit boundary and all
1522 * but the last segment must be a multiple of 4 bytes.
1523 */
1524 static int
1525 safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
1526 {
1527 int i;
1528
1529 DPRINTF(("%s()\n", __FUNCTION__));
1530
1531 for (i = 0; i < op->nsegs; i++) {
1532 if (op->segs[i].ds_addr & 3)
1533 return (0);
1534 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
1535 return (0);
1536 }
1537 return (1);
1538 }
1539
1540 /*
1541 * Is the operand suitable for direct DMA as the destination
1542 * of an operation. The hardware requires that each ``particle''
1543 * but the last in an operation result have the same size. We
1544 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
1545 * 0 if some segment is not a multiple of of this size, 1 if all
1546 * segments are exactly this size, or 2 if segments are at worst
1547 * a multple of this size.
1548 */
1549 static int
1550 safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
1551 {
1552 int result = 1;
1553
1554 DPRINTF(("%s()\n", __FUNCTION__));
1555
1556 if (op->nsegs > 0) {
1557 int i;
1558
1559 for (i = 0; i < op->nsegs-1; i++) {
1560 if (op->segs[i].ds_len % sc->sc_max_dsize)
1561 return (0);
1562 if (op->segs[i].ds_len != sc->sc_max_dsize)
1563 result = 2;
1564 }
1565 }
1566 return (result);
1567 }
1568
1569 static int
1570 safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
1571 {
1572 struct safe_softc *sc = device_get_softc(dev);
1573 struct safe_pkq *q;
1574 unsigned long flags;
1575
1576 DPRINTF(("%s()\n", __FUNCTION__));
1577
1578 if (sc == NULL) {
1579 krp->krp_status = EINVAL;
1580 goto err;
1581 }
1582
1583 if (krp->krp_op != CRK_MOD_EXP) {
1584 krp->krp_status = EOPNOTSUPP;
1585 goto err;
1586 }
1587
1588 q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
1589 if (q == NULL) {
1590 krp->krp_status = ENOMEM;
1591 goto err;
1592 }
1593 memset(q, 0, sizeof(*q));
1594 q->pkq_krp = krp;
1595 INIT_LIST_HEAD(&q->pkq_list);
1596
1597 spin_lock_irqsave(&sc->sc_pkmtx, flags);
1598 list_add_tail(&q->pkq_list, &sc->sc_pkq);
1599 safe_kfeed(sc);
1600 spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
1601 return (0);
1602
1603 err:
1604 crypto_kdone(krp);
1605 return (0);
1606 }
1607
1608 #define SAFE_CRK_PARAM_BASE 0
1609 #define SAFE_CRK_PARAM_EXP 1
1610 #define SAFE_CRK_PARAM_MOD 2
1611
1612 static int
1613 safe_kstart(struct safe_softc *sc)
1614 {
1615 struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
1616 int exp_bits, mod_bits, base_bits;
1617 u_int32_t op, a_off, b_off, c_off, d_off;
1618
1619 DPRINTF(("%s()\n", __FUNCTION__));
1620
1621 if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
1622 krp->krp_status = EINVAL;
1623 return (1);
1624 }
1625
1626 base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
1627 if (base_bits > 2048)
1628 goto too_big;
1629 if (base_bits <= 0) /* 5. base not zero */
1630 goto too_small;
1631
1632 exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
1633 if (exp_bits > 2048)
1634 goto too_big;
1635 if (exp_bits <= 0) /* 1. exponent word length > 0 */
1636 goto too_small; /* 4. exponent not zero */
1637
1638 mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
1639 if (mod_bits > 2048)
1640 goto too_big;
1641 if (mod_bits <= 32) /* 2. modulus word length > 1 */
1642 goto too_small; /* 8. MSW of modulus != zero */
1643 if (mod_bits < exp_bits) /* 3 modulus len >= exponent len */
1644 goto too_small;
1645 if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
1646 goto bad_domain; /* 6. modulus is odd */
1647 if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
1648 goto too_small; /* make sure result will fit */
1649
1650 /* 7. modulus > base */
1651 if (mod_bits < base_bits)
1652 goto too_small;
1653 if (mod_bits == base_bits) {
1654 u_int8_t *basep, *modp;
1655 int i;
1656
1657 basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
1658 ((base_bits + 7) / 8) - 1;
1659 modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
1660 ((mod_bits + 7) / 8) - 1;
1661
1662 for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
1663 if (*modp < *basep)
1664 goto too_small;
1665 if (*modp > *basep)
1666 break;
1667 }
1668 }
1669
1670 /* And on the 9th step, he rested. */
1671
1672 WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
1673 WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
1674 if (mod_bits > 1024) {
1675 op = SAFE_PK_FUNC_EXP4;
1676 a_off = 0x000;
1677 b_off = 0x100;
1678 c_off = 0x200;
1679 d_off = 0x300;
1680 } else {
1681 op = SAFE_PK_FUNC_EXP16;
1682 a_off = 0x000;
1683 b_off = 0x080;
1684 c_off = 0x100;
1685 d_off = 0x180;
1686 }
1687 sc->sc_pk_reslen = b_off - a_off;
1688 sc->sc_pk_resoff = d_off;
1689
1690 /* A is exponent, B is modulus, C is base, D is result */
1691 safe_kload_reg(sc, a_off, b_off - a_off,
1692 &krp->krp_param[SAFE_CRK_PARAM_EXP]);
1693 WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
1694 safe_kload_reg(sc, b_off, b_off - a_off,
1695 &krp->krp_param[SAFE_CRK_PARAM_MOD]);
1696 WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
1697 safe_kload_reg(sc, c_off, b_off - a_off,
1698 &krp->krp_param[SAFE_CRK_PARAM_BASE]);
1699 WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
1700 WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
1701
1702 WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
1703
1704 return (0);
1705
1706 too_big:
1707 krp->krp_status = E2BIG;
1708 return (1);
1709 too_small:
1710 krp->krp_status = ERANGE;
1711 return (1);
1712 bad_domain:
1713 krp->krp_status = EDOM;
1714 return (1);
1715 }
1716
1717 static int
1718 safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
1719 {
1720 u_int plen = (cr->crp_nbits + 7) / 8;
1721 int i, sig = plen * 8;
1722 u_int8_t c, *p = cr->crp_p;
1723
1724 DPRINTF(("%s()\n", __FUNCTION__));
1725
1726 for (i = plen - 1; i >= 0; i--) {
1727 c = p[i];
1728 if (c != 0) {
1729 while ((c & 0x80) == 0) {
1730 sig--;
1731 c <<= 1;
1732 }
1733 break;
1734 }
1735 sig -= 8;
1736 }
1737 return (sig);
1738 }
1739
1740 static void
1741 safe_kfeed(struct safe_softc *sc)
1742 {
1743 struct safe_pkq *q, *tmp;
1744
1745 DPRINTF(("%s()\n", __FUNCTION__));
1746
1747 if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
1748 return;
1749 if (sc->sc_pkq_cur != NULL)
1750 return;
1751 list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
1752 sc->sc_pkq_cur = q;
1753 list_del(&q->pkq_list);
1754 if (safe_kstart(sc) != 0) {
1755 crypto_kdone(q->pkq_krp);
1756 kfree(q);
1757 sc->sc_pkq_cur = NULL;
1758 } else {
1759 /* op started, start polling */
1760 mod_timer(&sc->sc_pkto, jiffies + 1);
1761 break;
1762 }
1763 }
1764 }
1765
1766 static void
1767 safe_kpoll(unsigned long arg)
1768 {
1769 struct safe_softc *sc = NULL;
1770 struct safe_pkq *q;
1771 struct crparam *res;
1772 int i;
1773 u_int32_t buf[64];
1774 unsigned long flags;
1775
1776 DPRINTF(("%s()\n", __FUNCTION__));
1777
1778 if (arg >= SAFE_MAX_CHIPS)
1779 return;
1780 sc = safe_chip_idx[arg];
1781 if (!sc) {
1782 DPRINTF(("%s() - bad callback\n", __FUNCTION__));
1783 return;
1784 }
1785
1786 spin_lock_irqsave(&sc->sc_pkmtx, flags);
1787 if (sc->sc_pkq_cur == NULL)
1788 goto out;
1789 if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
1790 /* still running, check back later */
1791 mod_timer(&sc->sc_pkto, jiffies + 1);
1792 goto out;
1793 }
1794
1795 q = sc->sc_pkq_cur;
1796 res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
1797 bzero(buf, sizeof(buf));
1798 bzero(res->crp_p, (res->crp_nbits + 7) / 8);
1799 for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
1800 buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
1801 sc->sc_pk_resoff + (i << 2)));
1802 bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
1803 /*
1804 * reduce the bits that need copying if possible
1805 */
1806 res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
1807 res->crp_nbits = safe_ksigbits(sc, res);
1808
1809 for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
1810 WRITE_REG(sc, i, 0);
1811
1812 crypto_kdone(q->pkq_krp);
1813 kfree(q);
1814 sc->sc_pkq_cur = NULL;
1815
1816 safe_kfeed(sc);
1817 out:
1818 spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
1819 }
1820
1821 static void
1822 safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
1823 struct crparam *n)
1824 {
1825 u_int32_t buf[64], i;
1826
1827 DPRINTF(("%s()\n", __FUNCTION__));
1828
1829 bzero(buf, sizeof(buf));
1830 bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
1831
1832 for (i = 0; i < len >> 2; i++)
1833 WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
1834 cpu_to_le32(buf[i]));
1835 }
1836
1837 #ifdef SAFE_DEBUG
1838 static void
1839 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
1840 {
1841 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
1842 , tag
1843 , READ_REG(sc, SAFE_DMA_ENDIAN)
1844 , READ_REG(sc, SAFE_DMA_SRCADDR)
1845 , READ_REG(sc, SAFE_DMA_DSTADDR)
1846 , READ_REG(sc, SAFE_DMA_STAT)
1847 );
1848 }
1849
1850 static void
1851 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
1852 {
1853 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
1854 , tag
1855 , READ_REG(sc, SAFE_HI_CFG)
1856 , READ_REG(sc, SAFE_HI_MASK)
1857 , READ_REG(sc, SAFE_HI_DESC_CNT)
1858 , READ_REG(sc, SAFE_HU_STAT)
1859 , READ_REG(sc, SAFE_HM_STAT)
1860 );
1861 }
1862
1863 static void
1864 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
1865 {
1866 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
1867
1868 /* NB: assume caller has lock on ring */
1869 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
1870 tag,
1871 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
1872 (unsigned long)(sc->sc_back - sc->sc_ring),
1873 (unsigned long)(sc->sc_front - sc->sc_ring));
1874 }
1875
1876 static void
1877 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
1878 {
1879 int ix, nsegs;
1880
1881 ix = re - sc->sc_ring;
1882 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
1883 , tag
1884 , re, ix
1885 , re->re_desc.d_csr
1886 , re->re_desc.d_src
1887 , re->re_desc.d_dst
1888 , re->re_desc.d_sa
1889 , re->re_desc.d_len
1890 );
1891 if (re->re_src.nsegs > 1) {
1892 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
1893 sizeof(struct safe_pdesc);
1894 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
1895 printf(" spd[%u] %p: %p size %u flags %x"
1896 , ix, &sc->sc_spring[ix]
1897 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
1898 , sc->sc_spring[ix].pd_size
1899 , sc->sc_spring[ix].pd_flags
1900 );
1901 if (sc->sc_spring[ix].pd_size == 0)
1902 printf(" (zero!)");
1903 printf("\n");
1904 if (++ix == SAFE_TOTAL_SPART)
1905 ix = 0;
1906 }
1907 }
1908 if (re->re_dst.nsegs > 1) {
1909 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
1910 sizeof(struct safe_pdesc);
1911 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
1912 printf(" dpd[%u] %p: %p flags %x\n"
1913 , ix, &sc->sc_dpring[ix]
1914 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
1915 , sc->sc_dpring[ix].pd_flags
1916 );
1917 if (++ix == SAFE_TOTAL_DPART)
1918 ix = 0;
1919 }
1920 }
1921 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
1922 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
1923 printf("sa: key %x %x %x %x %x %x %x %x\n"
1924 , re->re_sa.sa_key[0]
1925 , re->re_sa.sa_key[1]
1926 , re->re_sa.sa_key[2]
1927 , re->re_sa.sa_key[3]
1928 , re->re_sa.sa_key[4]
1929 , re->re_sa.sa_key[5]
1930 , re->re_sa.sa_key[6]
1931 , re->re_sa.sa_key[7]
1932 );
1933 printf("sa: indigest %x %x %x %x %x\n"
1934 , re->re_sa.sa_indigest[0]
1935 , re->re_sa.sa_indigest[1]
1936 , re->re_sa.sa_indigest[2]
1937 , re->re_sa.sa_indigest[3]
1938 , re->re_sa.sa_indigest[4]
1939 );
1940 printf("sa: outdigest %x %x %x %x %x\n"
1941 , re->re_sa.sa_outdigest[0]
1942 , re->re_sa.sa_outdigest[1]
1943 , re->re_sa.sa_outdigest[2]
1944 , re->re_sa.sa_outdigest[3]
1945 , re->re_sa.sa_outdigest[4]
1946 );
1947 printf("sr: iv %x %x %x %x\n"
1948 , re->re_sastate.sa_saved_iv[0]
1949 , re->re_sastate.sa_saved_iv[1]
1950 , re->re_sastate.sa_saved_iv[2]
1951 , re->re_sastate.sa_saved_iv[3]
1952 );
1953 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
1954 , re->re_sastate.sa_saved_hashbc
1955 , re->re_sastate.sa_saved_indigest[0]
1956 , re->re_sastate.sa_saved_indigest[1]
1957 , re->re_sastate.sa_saved_indigest[2]
1958 , re->re_sastate.sa_saved_indigest[3]
1959 , re->re_sastate.sa_saved_indigest[4]
1960 );
1961 }
1962
1963 static void
1964 safe_dump_ring(struct safe_softc *sc, const char *tag)
1965 {
1966 unsigned long flags;
1967
1968 spin_lock_irqsave(&sc->sc_ringmtx, flags);
1969 printf("\nSafeNet Ring State:\n");
1970 safe_dump_intrstate(sc, tag);
1971 safe_dump_dmastatus(sc, tag);
1972 safe_dump_ringstate(sc, tag);
1973 if (sc->sc_nqchip) {
1974 struct safe_ringentry *re = sc->sc_back;
1975 do {
1976 safe_dump_request(sc, tag, re);
1977 if (++re == sc->sc_ringtop)
1978 re = sc->sc_ring;
1979 } while (re != sc->sc_front);
1980 }
1981 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1982 }
1983 #endif /* SAFE_DEBUG */
1984
1985
1986 static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1987 {
1988 struct safe_softc *sc = NULL;
1989 u32 mem_start, mem_len, cmd;
1990 int i, rc, devinfo;
1991 dma_addr_t raddr;
1992 static int num_chips = 0;
1993
1994 DPRINTF(("%s()\n", __FUNCTION__));
1995
1996 if (pci_enable_device(dev) < 0)
1997 return(-ENODEV);
1998
1999 if (!dev->irq) {
2000 printk("safe: found device with no IRQ assigned. check BIOS settings!");
2001 pci_disable_device(dev);
2002 return(-ENODEV);
2003 }
2004
2005 if (pci_set_mwi(dev)) {
2006 printk("safe: pci_set_mwi failed!");
2007 return(-ENODEV);
2008 }
2009
2010 sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
2011 if (!sc)
2012 return(-ENOMEM);
2013 memset(sc, 0, sizeof(*sc));
2014
2015 softc_device_init(sc, "safe", num_chips, safe_methods);
2016
2017 sc->sc_irq = -1;
2018 sc->sc_cid = -1;
2019 sc->sc_pcidev = dev;
2020 if (num_chips < SAFE_MAX_CHIPS) {
2021 safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
2022 num_chips++;
2023 }
2024
2025 INIT_LIST_HEAD(&sc->sc_pkq);
2026 spin_lock_init(&sc->sc_pkmtx);
2027
2028 pci_set_drvdata(sc->sc_pcidev, sc);
2029
2030 /* we read its hardware registers as memory */
2031 mem_start = pci_resource_start(sc->sc_pcidev, 0);
2032 mem_len = pci_resource_len(sc->sc_pcidev, 0);
2033
2034 sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
2035 if (!sc->sc_base_addr) {
2036 device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
2037 mem_start, mem_start + mem_len - 1);
2038 goto out;
2039 }
2040
2041 /* fix up the bus size */
2042 if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
2043 device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
2044 goto out;
2045 }
2046 if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
2047 device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
2048 goto out;
2049 }
2050
2051 pci_set_master(sc->sc_pcidev);
2052
2053 pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
2054
2055 if (!(cmd & PCI_COMMAND_MEMORY)) {
2056 device_printf(sc->sc_dev, "failed to enable memory mapping\n");
2057 goto out;
2058 }
2059
2060 if (!(cmd & PCI_COMMAND_MASTER)) {
2061 device_printf(sc->sc_dev, "failed to enable bus mastering\n");
2062 goto out;
2063 }
2064
2065 rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
2066 if (rc) {
2067 device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
2068 goto out;
2069 }
2070 sc->sc_irq = dev->irq;
2071
2072 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
2073 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
2074
2075 /*
2076 * Allocate packet engine descriptors.
2077 */
2078 sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
2079 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
2080 &sc->sc_ringalloc.dma_paddr);
2081 if (!sc->sc_ringalloc.dma_vaddr) {
2082 device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
2083 goto out;
2084 }
2085
2086 /*
2087 * Hookup the static portion of all our data structures.
2088 */
2089 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
2090 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
2091 sc->sc_front = sc->sc_ring;
2092 sc->sc_back = sc->sc_ring;
2093 raddr = sc->sc_ringalloc.dma_paddr;
2094 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
2095 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
2096 struct safe_ringentry *re = &sc->sc_ring[i];
2097
2098 re->re_desc.d_sa = raddr +
2099 offsetof(struct safe_ringentry, re_sa);
2100 re->re_sa.sa_staterec = raddr +
2101 offsetof(struct safe_ringentry, re_sastate);
2102
2103 raddr += sizeof (struct safe_ringentry);
2104 }
2105 spin_lock_init(&sc->sc_ringmtx);
2106
2107 /*
2108 * Allocate scatter and gather particle descriptors.
2109 */
2110 sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
2111 SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
2112 &sc->sc_spalloc.dma_paddr);
2113 if (!sc->sc_spalloc.dma_vaddr) {
2114 device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
2115 goto out;
2116 }
2117 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
2118 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
2119 sc->sc_spfree = sc->sc_spring;
2120 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
2121
2122 sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
2123 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2124 &sc->sc_dpalloc.dma_paddr);
2125 if (!sc->sc_dpalloc.dma_vaddr) {
2126 device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
2127 goto out;
2128 }
2129 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
2130 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
2131 sc->sc_dpfree = sc->sc_dpring;
2132 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
2133
2134 sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
2135 if (sc->sc_cid < 0) {
2136 device_printf(sc->sc_dev, "could not get crypto driver id\n");
2137 goto out;
2138 }
2139
2140 printf("%s:", device_get_nameunit(sc->sc_dev));
2141
2142 devinfo = READ_REG(sc, SAFE_DEVINFO);
2143 if (devinfo & SAFE_DEVINFO_RNG) {
2144 sc->sc_flags |= SAFE_FLAGS_RNG;
2145 printf(" rng");
2146 }
2147 if (devinfo & SAFE_DEVINFO_PKEY) {
2148 printf(" key");
2149 sc->sc_flags |= SAFE_FLAGS_KEY;
2150 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
2151 #if 0
2152 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
2153 #endif
2154 init_timer(&sc->sc_pkto);
2155 sc->sc_pkto.function = safe_kpoll;
2156 sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
2157 }
2158 if (devinfo & SAFE_DEVINFO_DES) {
2159 printf(" des/3des");
2160 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
2161 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
2162 }
2163 if (devinfo & SAFE_DEVINFO_AES) {
2164 printf(" aes");
2165 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
2166 }
2167 if (devinfo & SAFE_DEVINFO_MD5) {
2168 printf(" md5");
2169 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
2170 }
2171 if (devinfo & SAFE_DEVINFO_SHA1) {
2172 printf(" sha1");
2173 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
2174 }
2175 printf(" null");
2176 crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
2177 crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
2178 /* XXX other supported algorithms */
2179 printf("\n");
2180
2181 safe_reset_board(sc); /* reset h/w */
2182 safe_init_board(sc); /* init h/w */
2183
2184 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
2185 if (sc->sc_flags & SAFE_FLAGS_RNG) {
2186 safe_rng_init(sc);
2187 crypto_rregister(sc->sc_cid, safe_read_random, sc);
2188 }
2189 #endif /* SAFE_NO_RNG */
2190
2191 return (0);
2192
2193 out:
2194 if (sc->sc_cid >= 0)
2195 crypto_unregister_all(sc->sc_cid);
2196 if (sc->sc_irq != -1)
2197 free_irq(sc->sc_irq, sc);
2198 if (sc->sc_ringalloc.dma_vaddr)
2199 pci_free_consistent(sc->sc_pcidev,
2200 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
2201 sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
2202 if (sc->sc_spalloc.dma_vaddr)
2203 pci_free_consistent(sc->sc_pcidev,
2204 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2205 sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
2206 if (sc->sc_dpalloc.dma_vaddr)
2207 pci_free_consistent(sc->sc_pcidev,
2208 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2209 sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
2210 kfree(sc);
2211 return(-ENODEV);
2212 }
2213
2214 static void safe_remove(struct pci_dev *dev)
2215 {
2216 struct safe_softc *sc = pci_get_drvdata(dev);
2217
2218 DPRINTF(("%s()\n", __FUNCTION__));
2219
2220 /* XXX wait/abort active ops */
2221
2222 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
2223
2224 del_timer_sync(&sc->sc_pkto);
2225
2226 crypto_unregister_all(sc->sc_cid);
2227
2228 safe_cleanchip(sc);
2229
2230 if (sc->sc_irq != -1)
2231 free_irq(sc->sc_irq, sc);
2232 if (sc->sc_ringalloc.dma_vaddr)
2233 pci_free_consistent(sc->sc_pcidev,
2234 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
2235 sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
2236 if (sc->sc_spalloc.dma_vaddr)
2237 pci_free_consistent(sc->sc_pcidev,
2238 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2239 sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
2240 if (sc->sc_dpalloc.dma_vaddr)
2241 pci_free_consistent(sc->sc_pcidev,
2242 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2243 sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
2244 sc->sc_irq = -1;
2245 sc->sc_ringalloc.dma_vaddr = NULL;
2246 sc->sc_spalloc.dma_vaddr = NULL;
2247 sc->sc_dpalloc.dma_vaddr = NULL;
2248 }
2249
2250 static struct pci_device_id safe_pci_tbl[] = {
2251 { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
2252 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2253 { },
2254 };
2255 MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
2256
2257 static struct pci_driver safe_driver = {
2258 .name = "safe",
2259 .id_table = safe_pci_tbl,
2260 .probe = safe_probe,
2261 .remove = safe_remove,
2262 /* add PM stuff here one day */
2263 };
2264
2265 static int __init safe_init (void)
2266 {
2267 struct safe_softc *sc = NULL;
2268 int rc;
2269
2270 DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
2271
2272 rc = pci_register_driver(&safe_driver);
2273 pci_register_driver_compat(&safe_driver, rc);
2274
2275 return rc;
2276 }
2277
2278 static void __exit safe_exit (void)
2279 {
2280 pci_unregister_driver(&safe_driver);
2281 }
2282
2283 module_init(safe_init);
2284 module_exit(safe_exit);
2285
2286 MODULE_LICENSE("BSD");
2287 MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
2288 MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
This page took 0.16165 seconds and 5 git commands to generate.