2 * Linux port done by David McCullough <david_mccullough@mcafee.com>
3 * Copyright (C) 2004-2010 David McCullough
4 * The license and original author are listed below.
6 * Copyright (c) 2003 Sam Leffler, Errno Consulting
7 * Copyright (c) 2003 Global Technology Associates, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 __FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
34 #include <linux/version.h>
35 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
36 #include <generated/autoconf.h>
38 #include <linux/autoconf.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
44 #include <linux/slab.h>
45 #include <linux/wait.h>
46 #include <linux/sched.h>
47 #include <linux/pci.h>
48 #include <linux/delay.h>
49 #include <linux/interrupt.h>
50 #include <linux/spinlock.h>
51 #include <linux/random.h>
52 #include <linux/version.h>
53 #include <linux/skbuff.h>
57 * SafeNet SafeXcel-1141 hardware crypto accelerator
60 #include <cryptodev.h>
62 #include <safe/safereg.h>
63 #include <safe/safevar.h>
66 #define DPRINTF(a) do { \
69 device_get_nameunit(sc->sc_dev) : "safe"); \
78 * until we find a cleaner way, include the BSD md5/sha1 code
83 #define LITTLE_ENDIAN 1234
84 #define BIG_ENDIAN 4321
85 #ifdef __LITTLE_ENDIAN
86 #define BYTE_ORDER LITTLE_ENDIAN
89 #define BYTE_ORDER BIG_ENDIAN
93 #include <safe/sha1.h>
94 #include <safe/sha1.c>
96 u_int8_t hmac_ipad_buffer
[64] = {
97 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
98 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
99 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
100 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
101 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
102 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
103 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
104 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
107 u_int8_t hmac_opad_buffer
[64] = {
108 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
109 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
110 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
111 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
112 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
113 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
114 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
115 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
117 #endif /* HMAC_HACK */
119 /* add proc entry for this */
120 struct safe_stats safestats
;
122 #define debug safe_debug
124 module_param(safe_debug
, int, 0644);
125 MODULE_PARM_DESC(safe_debug
, "Enable debug");
127 static void safe_callback(struct safe_softc
*, struct safe_ringentry
*);
128 static void safe_feed(struct safe_softc
*, struct safe_ringentry
*);
129 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
130 static void safe_rng_init(struct safe_softc
*);
131 int safe_rngbufsize
= 8; /* 32 bytes each read */
132 module_param(safe_rngbufsize
, int, 0644);
133 MODULE_PARM_DESC(safe_rngbufsize
, "RNG polling buffer size (32-bit words)");
134 int safe_rngmaxalarm
= 8; /* max alarms before reset */
135 module_param(safe_rngmaxalarm
, int, 0644);
136 MODULE_PARM_DESC(safe_rngmaxalarm
, "RNG max alarms before reset");
137 #endif /* SAFE_NO_RNG */
139 static void safe_totalreset(struct safe_softc
*sc
);
140 static int safe_dmamap_aligned(struct safe_softc
*sc
, const struct safe_operand
*op
);
141 static int safe_dmamap_uniform(struct safe_softc
*sc
, const struct safe_operand
*op
);
142 static int safe_free_entry(struct safe_softc
*sc
, struct safe_ringentry
*re
);
143 static int safe_kprocess(device_t dev
, struct cryptkop
*krp
, int hint
);
144 static int safe_kstart(struct safe_softc
*sc
);
145 static int safe_ksigbits(struct safe_softc
*sc
, struct crparam
*cr
);
146 static void safe_kfeed(struct safe_softc
*sc
);
147 static void safe_kpoll(unsigned long arg
);
148 static void safe_kload_reg(struct safe_softc
*sc
, u_int32_t off
,
149 u_int32_t len
, struct crparam
*n
);
151 static int safe_newsession(device_t
, u_int32_t
*, struct cryptoini
*);
152 static int safe_freesession(device_t
, u_int64_t
);
153 static int safe_process(device_t
, struct cryptop
*, int);
155 static device_method_t safe_methods
= {
156 /* crypto device methods */
157 DEVMETHOD(cryptodev_newsession
, safe_newsession
),
158 DEVMETHOD(cryptodev_freesession
,safe_freesession
),
159 DEVMETHOD(cryptodev_process
, safe_process
),
160 DEVMETHOD(cryptodev_kprocess
, safe_kprocess
),
163 #define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
164 #define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
166 #define SAFE_MAX_CHIPS 8
167 static struct safe_softc
*safe_chip_idx
[SAFE_MAX_CHIPS
];
170 * split our buffers up into safe DMAable byte fragments to avoid lockup
171 * bug in 1141 HW on rev 1.0.
176 struct safe_softc
*sc
,
177 struct safe_operand
*buf
,
182 int chunk
, tlen
= len
;
184 tmp
= pci_map_single(sc
->sc_pcidev
, addr
, len
, PCI_DMA_BIDIRECTIONAL
);
188 chunk
= (len
> sc
->sc_max_dsize
) ? sc
->sc_max_dsize
: len
;
189 buf
->segs
[buf
->nsegs
].ds_addr
= tmp
;
190 buf
->segs
[buf
->nsegs
].ds_len
= chunk
;
191 buf
->segs
[buf
->nsegs
].ds_tlen
= tlen
;
201 * map in a given uio buffer (great on some arches :-)
205 pci_map_uio(struct safe_softc
*sc
, struct safe_operand
*buf
, struct uio
*uio
)
207 struct iovec
*iov
= uio
->uio_iov
;
210 DPRINTF(("%s()\n", __FUNCTION__
));
215 for (n
= 0; n
< uio
->uio_iovcnt
; n
++) {
216 pci_map_linear(sc
, buf
, iov
->iov_base
, iov
->iov_len
);
220 /* identify this buffer by the first segment */
221 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
226 * map in a given sk_buff
230 pci_map_skb(struct safe_softc
*sc
,struct safe_operand
*buf
,struct sk_buff
*skb
)
234 DPRINTF(("%s()\n", __FUNCTION__
));
239 pci_map_linear(sc
, buf
, skb
->data
, skb_headlen(skb
));
241 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
242 pci_map_linear(sc
, buf
,
243 page_address(skb_shinfo(skb
)->frags
[i
].page
) +
244 skb_shinfo(skb
)->frags
[i
].page_offset
,
245 skb_shinfo(skb
)->frags
[i
].size
);
248 /* identify this buffer by the first segment */
249 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
254 #if 0 /* not needed at this time */
256 pci_sync_operand(struct safe_softc
*sc
, struct safe_operand
*buf
)
260 DPRINTF(("%s()\n", __FUNCTION__
));
261 for (i
= 0; i
< buf
->nsegs
; i
++)
262 pci_dma_sync_single_for_cpu(sc
->sc_pcidev
, buf
->segs
[i
].ds_addr
,
263 buf
->segs
[i
].ds_len
, PCI_DMA_BIDIRECTIONAL
);
268 pci_unmap_operand(struct safe_softc
*sc
, struct safe_operand
*buf
)
271 DPRINTF(("%s()\n", __FUNCTION__
));
272 for (i
= 0; i
< buf
->nsegs
; i
++) {
273 if (buf
->segs
[i
].ds_tlen
) {
274 DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__
, i
, buf
->segs
[i
].ds_addr
, buf
->segs
[i
].ds_tlen
));
275 pci_unmap_single(sc
->sc_pcidev
, buf
->segs
[i
].ds_addr
,
276 buf
->segs
[i
].ds_tlen
, PCI_DMA_BIDIRECTIONAL
);
277 DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__
, i
, buf
->segs
[i
].ds_addr
, buf
->segs
[i
].ds_tlen
));
279 buf
->segs
[i
].ds_addr
= 0;
280 buf
->segs
[i
].ds_len
= 0;
281 buf
->segs
[i
].ds_tlen
= 0;
290 * SafeXcel Interrupt routine
293 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
294 safe_intr(int irq
, void *arg
)
296 safe_intr(int irq
, void *arg
, struct pt_regs
*regs
)
299 struct safe_softc
*sc
= arg
;
303 stat
= READ_REG(sc
, SAFE_HM_STAT
);
305 DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__
, stat
));
307 if (stat
== 0) /* shared irq, not for us */
310 WRITE_REG(sc
, SAFE_HI_CLR
, stat
); /* IACK */
312 if ((stat
& SAFE_INT_PE_DDONE
)) {
314 * Descriptor(s) done; scan the ring and
315 * process completed operations.
317 spin_lock_irqsave(&sc
->sc_ringmtx
, flags
);
318 while (sc
->sc_back
!= sc
->sc_front
) {
319 struct safe_ringentry
*re
= sc
->sc_back
;
323 safe_dump_ringstate(sc
, __func__
);
324 safe_dump_request(sc
, __func__
, re
);
328 * safe_process marks ring entries that were allocated
329 * but not used with a csr of zero. This insures the
330 * ring front pointer never needs to be set backwards
331 * in the event that an entry is allocated but not used
332 * because of a setup error.
334 DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__
, re
->re_desc
.d_csr
));
335 if (re
->re_desc
.d_csr
!= 0) {
336 if (!SAFE_PE_CSR_IS_DONE(re
->re_desc
.d_csr
)) {
337 DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__
));
340 if (!SAFE_PE_LEN_IS_DONE(re
->re_desc
.d_len
)) {
341 DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__
));
345 safe_callback(sc
, re
);
347 if (++(sc
->sc_back
) == sc
->sc_ringtop
)
348 sc
->sc_back
= sc
->sc_ring
;
350 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
354 * Check to see if we got any DMA Error
356 if (stat
& SAFE_INT_PE_ERROR
) {
357 printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc
->sc_dev
),
358 (int)READ_REG(sc
, SAFE_PE_DMASTAT
));
359 safestats
.st_dmaerr
++;
366 if (sc
->sc_needwakeup
) { /* XXX check high watermark */
367 int wakeup
= sc
->sc_needwakeup
& (CRYPTO_SYMQ
|CRYPTO_ASYMQ
);
368 DPRINTF(("%s: wakeup crypto %x\n", __func__
,
370 sc
->sc_needwakeup
&= ~wakeup
;
371 crypto_unblock(sc
->sc_cid
, wakeup
);
378 * safe_feed() - post a request to chip
381 safe_feed(struct safe_softc
*sc
, struct safe_ringentry
*re
)
383 DPRINTF(("%s()\n", __FUNCTION__
));
386 safe_dump_ringstate(sc
, __func__
);
387 safe_dump_request(sc
, __func__
, re
);
391 if (sc
->sc_nqchip
> safestats
.st_maxqchip
)
392 safestats
.st_maxqchip
= sc
->sc_nqchip
;
393 /* poke h/w to check descriptor ring, any value can be written */
394 WRITE_REG(sc
, SAFE_HI_RD_DESCR
, 0);
397 #define N(a) (sizeof(a) / sizeof (a[0]))
399 safe_setup_enckey(struct safe_session
*ses
, caddr_t key
)
403 bcopy(key
, ses
->ses_key
, ses
->ses_klen
/ 8);
405 /* PE is little-endian, insure proper byte order */
406 for (i
= 0; i
< N(ses
->ses_key
); i
++)
407 ses
->ses_key
[i
] = htole32(ses
->ses_key
[i
]);
411 safe_setup_mackey(struct safe_session
*ses
, int algo
, caddr_t key
, int klen
)
419 for (i
= 0; i
< klen
; i
++)
420 key
[i
] ^= HMAC_IPAD_VAL
;
422 if (algo
== CRYPTO_MD5_HMAC
) {
424 MD5Update(&md5ctx
, key
, klen
);
425 MD5Update(&md5ctx
, hmac_ipad_buffer
, MD5_HMAC_BLOCK_LEN
- klen
);
426 bcopy(md5ctx
.md5_st8
, ses
->ses_hminner
, sizeof(md5ctx
.md5_st8
));
429 SHA1Update(&sha1ctx
, key
, klen
);
430 SHA1Update(&sha1ctx
, hmac_ipad_buffer
,
431 SHA1_HMAC_BLOCK_LEN
- klen
);
432 bcopy(sha1ctx
.h
.b32
, ses
->ses_hminner
, sizeof(sha1ctx
.h
.b32
));
435 for (i
= 0; i
< klen
; i
++)
436 key
[i
] ^= (HMAC_IPAD_VAL
^ HMAC_OPAD_VAL
);
438 if (algo
== CRYPTO_MD5_HMAC
) {
440 MD5Update(&md5ctx
, key
, klen
);
441 MD5Update(&md5ctx
, hmac_opad_buffer
, MD5_HMAC_BLOCK_LEN
- klen
);
442 bcopy(md5ctx
.md5_st8
, ses
->ses_hmouter
, sizeof(md5ctx
.md5_st8
));
445 SHA1Update(&sha1ctx
, key
, klen
);
446 SHA1Update(&sha1ctx
, hmac_opad_buffer
,
447 SHA1_HMAC_BLOCK_LEN
- klen
);
448 bcopy(sha1ctx
.h
.b32
, ses
->ses_hmouter
, sizeof(sha1ctx
.h
.b32
));
451 for (i
= 0; i
< klen
; i
++)
452 key
[i
] ^= HMAC_OPAD_VAL
;
456 * this code prevents SHA working on a BE host,
457 * so it is obviously wrong. I think the byte
458 * swap setup we do with the chip fixes this for us
461 /* PE is little-endian, insure proper byte order */
462 for (i
= 0; i
< N(ses
->ses_hminner
); i
++) {
463 ses
->ses_hminner
[i
] = htole32(ses
->ses_hminner
[i
]);
464 ses
->ses_hmouter
[i
] = htole32(ses
->ses_hmouter
[i
]);
467 #else /* HMAC_HACK */
468 printk("safe: md5/sha not implemented\n");
469 #endif /* HMAC_HACK */
474 * Allocate a new 'session' and return an encoded session id. 'sidp'
475 * contains our registration id, and should contain an encoded session
476 * id on successful allocation.
479 safe_newsession(device_t dev
, u_int32_t
*sidp
, struct cryptoini
*cri
)
481 struct safe_softc
*sc
= device_get_softc(dev
);
482 struct cryptoini
*c
, *encini
= NULL
, *macini
= NULL
;
483 struct safe_session
*ses
= NULL
;
486 DPRINTF(("%s()\n", __FUNCTION__
));
488 if (sidp
== NULL
|| cri
== NULL
|| sc
== NULL
)
491 for (c
= cri
; c
!= NULL
; c
= c
->cri_next
) {
492 if (c
->cri_alg
== CRYPTO_MD5_HMAC
||
493 c
->cri_alg
== CRYPTO_SHA1_HMAC
||
494 c
->cri_alg
== CRYPTO_NULL_HMAC
) {
498 } else if (c
->cri_alg
== CRYPTO_DES_CBC
||
499 c
->cri_alg
== CRYPTO_3DES_CBC
||
500 c
->cri_alg
== CRYPTO_AES_CBC
||
501 c
->cri_alg
== CRYPTO_NULL_CBC
) {
508 if (encini
== NULL
&& macini
== NULL
)
510 if (encini
) { /* validate key length */
511 switch (encini
->cri_alg
) {
513 if (encini
->cri_klen
!= 64)
516 case CRYPTO_3DES_CBC
:
517 if (encini
->cri_klen
!= 192)
521 if (encini
->cri_klen
!= 128 &&
522 encini
->cri_klen
!= 192 &&
523 encini
->cri_klen
!= 256)
529 if (sc
->sc_sessions
== NULL
) {
530 ses
= sc
->sc_sessions
= (struct safe_session
*)
531 kmalloc(sizeof(struct safe_session
), SLAB_ATOMIC
);
534 memset(ses
, 0, sizeof(struct safe_session
));
536 sc
->sc_nsessions
= 1;
538 for (sesn
= 0; sesn
< sc
->sc_nsessions
; sesn
++) {
539 if (sc
->sc_sessions
[sesn
].ses_used
== 0) {
540 ses
= &sc
->sc_sessions
[sesn
];
546 sesn
= sc
->sc_nsessions
;
547 ses
= (struct safe_session
*)
548 kmalloc((sesn
+ 1) * sizeof(struct safe_session
), SLAB_ATOMIC
);
551 memset(ses
, 0, (sesn
+ 1) * sizeof(struct safe_session
));
552 bcopy(sc
->sc_sessions
, ses
, sesn
*
553 sizeof(struct safe_session
));
554 bzero(sc
->sc_sessions
, sesn
*
555 sizeof(struct safe_session
));
556 kfree(sc
->sc_sessions
);
557 sc
->sc_sessions
= ses
;
558 ses
= &sc
->sc_sessions
[sesn
];
563 bzero(ses
, sizeof(struct safe_session
));
568 /* XXX may read fewer than requested */
569 read_random(ses
->ses_iv
, sizeof(ses
->ses_iv
));
571 ses
->ses_klen
= encini
->cri_klen
;
572 if (encini
->cri_key
!= NULL
)
573 safe_setup_enckey(ses
, encini
->cri_key
);
577 ses
->ses_mlen
= macini
->cri_mlen
;
578 if (ses
->ses_mlen
== 0) {
579 if (macini
->cri_alg
== CRYPTO_MD5_HMAC
)
580 ses
->ses_mlen
= MD5_HASH_LEN
;
582 ses
->ses_mlen
= SHA1_HASH_LEN
;
585 if (macini
->cri_key
!= NULL
) {
586 safe_setup_mackey(ses
, macini
->cri_alg
, macini
->cri_key
,
587 macini
->cri_klen
/ 8);
591 *sidp
= SAFE_SID(device_get_unit(sc
->sc_dev
), sesn
);
596 * Deallocate a session.
599 safe_freesession(device_t dev
, u_int64_t tid
)
601 struct safe_softc
*sc
= device_get_softc(dev
);
603 u_int32_t sid
= ((u_int32_t
) tid
) & 0xffffffff;
605 DPRINTF(("%s()\n", __FUNCTION__
));
610 session
= SAFE_SESSION(sid
);
611 if (session
< sc
->sc_nsessions
) {
612 bzero(&sc
->sc_sessions
[session
], sizeof(sc
->sc_sessions
[session
]));
621 safe_process(device_t dev
, struct cryptop
*crp
, int hint
)
623 struct safe_softc
*sc
= device_get_softc(dev
);
624 int err
= 0, i
, nicealign
, uniform
;
625 struct cryptodesc
*crd1
, *crd2
, *maccrd
, *enccrd
;
626 int bypass
, oplen
, ivsize
;
629 struct safe_session
*ses
;
630 struct safe_ringentry
*re
;
631 struct safe_sarec
*sa
;
632 struct safe_pdesc
*pd
;
633 u_int32_t cmd0
, cmd1
, staterec
;
636 DPRINTF(("%s()\n", __FUNCTION__
));
638 if (crp
== NULL
|| crp
->crp_callback
== NULL
|| sc
== NULL
) {
639 safestats
.st_invalid
++;
642 if (SAFE_SESSION(crp
->crp_sid
) >= sc
->sc_nsessions
) {
643 safestats
.st_badsession
++;
647 spin_lock_irqsave(&sc
->sc_ringmtx
, flags
);
648 if (sc
->sc_front
== sc
->sc_back
&& sc
->sc_nqchip
!= 0) {
649 safestats
.st_ringfull
++;
650 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
651 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
656 staterec
= re
->re_sa
.sa_staterec
; /* save */
657 /* NB: zero everything but the PE descriptor */
658 bzero(&re
->re_sa
, sizeof(struct safe_ringentry
) - sizeof(re
->re_desc
));
659 re
->re_sa
.sa_staterec
= staterec
; /* restore */
662 re
->re_sesn
= SAFE_SESSION(crp
->crp_sid
);
664 re
->re_src
.nsegs
= 0;
665 re
->re_dst
.nsegs
= 0;
667 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
668 re
->re_src_skb
= (struct sk_buff
*)crp
->crp_buf
;
669 re
->re_dst_skb
= (struct sk_buff
*)crp
->crp_buf
;
670 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
671 re
->re_src_io
= (struct uio
*)crp
->crp_buf
;
672 re
->re_dst_io
= (struct uio
*)crp
->crp_buf
;
674 safestats
.st_badflags
++;
676 goto errout
; /* XXX we don't handle contiguous blocks! */
680 ses
= &sc
->sc_sessions
[re
->re_sesn
];
682 crd1
= crp
->crp_desc
;
684 safestats
.st_nodesc
++;
688 crd2
= crd1
->crd_next
;
690 cmd0
= SAFE_SA_CMD0_BASIC
; /* basic group operation */
693 if (crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
694 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
||
695 crd1
->crd_alg
== CRYPTO_NULL_HMAC
) {
698 cmd0
|= SAFE_SA_CMD0_OP_HASH
;
699 } else if (crd1
->crd_alg
== CRYPTO_DES_CBC
||
700 crd1
->crd_alg
== CRYPTO_3DES_CBC
||
701 crd1
->crd_alg
== CRYPTO_AES_CBC
||
702 crd1
->crd_alg
== CRYPTO_NULL_CBC
) {
705 cmd0
|= SAFE_SA_CMD0_OP_CRYPT
;
707 safestats
.st_badalg
++;
712 if ((crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
713 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
||
714 crd1
->crd_alg
== CRYPTO_NULL_HMAC
) &&
715 (crd2
->crd_alg
== CRYPTO_DES_CBC
||
716 crd2
->crd_alg
== CRYPTO_3DES_CBC
||
717 crd2
->crd_alg
== CRYPTO_AES_CBC
||
718 crd2
->crd_alg
== CRYPTO_NULL_CBC
) &&
719 ((crd2
->crd_flags
& CRD_F_ENCRYPT
) == 0)) {
722 } else if ((crd1
->crd_alg
== CRYPTO_DES_CBC
||
723 crd1
->crd_alg
== CRYPTO_3DES_CBC
||
724 crd1
->crd_alg
== CRYPTO_AES_CBC
||
725 crd1
->crd_alg
== CRYPTO_NULL_CBC
) &&
726 (crd2
->crd_alg
== CRYPTO_MD5_HMAC
||
727 crd2
->crd_alg
== CRYPTO_SHA1_HMAC
||
728 crd2
->crd_alg
== CRYPTO_NULL_HMAC
) &&
729 (crd1
->crd_flags
& CRD_F_ENCRYPT
)) {
733 safestats
.st_badalg
++;
737 cmd0
|= SAFE_SA_CMD0_OP_BOTH
;
741 if (enccrd
->crd_flags
& CRD_F_KEY_EXPLICIT
)
742 safe_setup_enckey(ses
, enccrd
->crd_key
);
744 if (enccrd
->crd_alg
== CRYPTO_DES_CBC
) {
745 cmd0
|= SAFE_SA_CMD0_DES
;
746 cmd1
|= SAFE_SA_CMD1_CBC
;
747 ivsize
= 2*sizeof(u_int32_t
);
748 } else if (enccrd
->crd_alg
== CRYPTO_3DES_CBC
) {
749 cmd0
|= SAFE_SA_CMD0_3DES
;
750 cmd1
|= SAFE_SA_CMD1_CBC
;
751 ivsize
= 2*sizeof(u_int32_t
);
752 } else if (enccrd
->crd_alg
== CRYPTO_AES_CBC
) {
753 cmd0
|= SAFE_SA_CMD0_AES
;
754 cmd1
|= SAFE_SA_CMD1_CBC
;
755 if (ses
->ses_klen
== 128)
756 cmd1
|= SAFE_SA_CMD1_AES128
;
757 else if (ses
->ses_klen
== 192)
758 cmd1
|= SAFE_SA_CMD1_AES192
;
760 cmd1
|= SAFE_SA_CMD1_AES256
;
761 ivsize
= 4*sizeof(u_int32_t
);
763 cmd0
|= SAFE_SA_CMD0_CRYPT_NULL
;
768 * Setup encrypt/decrypt state. When using basic ops
769 * we can't use an inline IV because hash/crypt offset
770 * must be from the end of the IV to the start of the
771 * crypt data and this leaves out the preceding header
772 * from the hash calculation. Instead we place the IV
773 * in the state record and set the hash/crypt offset to
774 * copy both the header+IV.
776 if (enccrd
->crd_flags
& CRD_F_ENCRYPT
) {
777 cmd0
|= SAFE_SA_CMD0_OUTBOUND
;
779 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
782 iv
= (caddr_t
) ses
->ses_iv
;
783 if ((enccrd
->crd_flags
& CRD_F_IV_PRESENT
) == 0) {
784 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
785 enccrd
->crd_inject
, ivsize
, iv
);
787 bcopy(iv
, re
->re_sastate
.sa_saved_iv
, ivsize
);
789 for (i
= 0; i
< ivsize
/sizeof(re
->re_sastate
.sa_saved_iv
[0]); i
++)
790 re
->re_sastate
.sa_saved_iv
[i
] =
791 cpu_to_le32(re
->re_sastate
.sa_saved_iv
[i
]);
792 cmd0
|= SAFE_SA_CMD0_IVLD_STATE
| SAFE_SA_CMD0_SAVEIV
;
793 re
->re_flags
|= SAFE_QFLAGS_COPYOUTIV
;
795 cmd0
|= SAFE_SA_CMD0_INBOUND
;
797 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
) {
798 bcopy(enccrd
->crd_iv
,
799 re
->re_sastate
.sa_saved_iv
, ivsize
);
801 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
802 enccrd
->crd_inject
, ivsize
,
803 (caddr_t
)re
->re_sastate
.sa_saved_iv
);
806 for (i
= 0; i
< ivsize
/sizeof(re
->re_sastate
.sa_saved_iv
[0]); i
++)
807 re
->re_sastate
.sa_saved_iv
[i
] =
808 cpu_to_le32(re
->re_sastate
.sa_saved_iv
[i
]);
809 cmd0
|= SAFE_SA_CMD0_IVLD_STATE
;
812 * For basic encryption use the zero pad algorithm.
813 * This pads results to an 8-byte boundary and
814 * suppresses padding verification for inbound (i.e.
815 * decrypt) operations.
817 * NB: Not sure if the 8-byte pad boundary is a problem.
819 cmd0
|= SAFE_SA_CMD0_PAD_ZERO
;
821 /* XXX assert key bufs have the same size */
822 bcopy(ses
->ses_key
, sa
->sa_key
, sizeof(sa
->sa_key
));
826 if (maccrd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
827 safe_setup_mackey(ses
, maccrd
->crd_alg
,
828 maccrd
->crd_key
, maccrd
->crd_klen
/ 8);
831 if (maccrd
->crd_alg
== CRYPTO_MD5_HMAC
) {
832 cmd0
|= SAFE_SA_CMD0_MD5
;
833 cmd1
|= SAFE_SA_CMD1_HMAC
; /* NB: enable HMAC */
834 } else if (maccrd
->crd_alg
== CRYPTO_SHA1_HMAC
) {
835 cmd0
|= SAFE_SA_CMD0_SHA1
;
836 cmd1
|= SAFE_SA_CMD1_HMAC
; /* NB: enable HMAC */
838 cmd0
|= SAFE_SA_CMD0_HASH_NULL
;
841 * Digest data is loaded from the SA and the hash
842 * result is saved to the state block where we
843 * retrieve it for return to the caller.
845 /* XXX assert digest bufs have the same size */
846 bcopy(ses
->ses_hminner
, sa
->sa_indigest
,
847 sizeof(sa
->sa_indigest
));
848 bcopy(ses
->ses_hmouter
, sa
->sa_outdigest
,
849 sizeof(sa
->sa_outdigest
));
851 cmd0
|= SAFE_SA_CMD0_HSLD_SA
| SAFE_SA_CMD0_SAVEHASH
;
852 re
->re_flags
|= SAFE_QFLAGS_COPYOUTICV
;
855 if (enccrd
&& maccrd
) {
857 * The offset from hash data to the start of
858 * crypt data is the difference in the skips.
860 bypass
= maccrd
->crd_skip
;
861 coffset
= enccrd
->crd_skip
- maccrd
->crd_skip
;
863 DPRINTF(("%s: hash does not precede crypt; "
864 "mac skip %u enc skip %u\n",
865 __func__
, maccrd
->crd_skip
, enccrd
->crd_skip
));
866 safestats
.st_skipmismatch
++;
870 oplen
= enccrd
->crd_skip
+ enccrd
->crd_len
;
871 if (maccrd
->crd_skip
+ maccrd
->crd_len
!= oplen
) {
872 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
873 __func__
, maccrd
->crd_skip
+ maccrd
->crd_len
,
875 safestats
.st_lenmismatch
++;
881 printf("mac: skip %d, len %d, inject %d\n",
882 maccrd
->crd_skip
, maccrd
->crd_len
,
884 printf("enc: skip %d, len %d, inject %d\n",
885 enccrd
->crd_skip
, enccrd
->crd_len
,
887 printf("bypass %d coffset %d oplen %d\n",
888 bypass
, coffset
, oplen
);
891 if (coffset
& 3) { /* offset must be 32-bit aligned */
892 DPRINTF(("%s: coffset %u misaligned\n",
894 safestats
.st_coffmisaligned
++;
899 if (coffset
> 255) { /* offset must be <256 dwords */
900 DPRINTF(("%s: coffset %u too big\n",
902 safestats
.st_cofftoobig
++;
907 * Tell the hardware to copy the header to the output.
908 * The header is defined as the data from the end of
909 * the bypass to the start of data to be encrypted.
910 * Typically this is the inline IV. Note that you need
911 * to do this even if src+dst are the same; it appears
912 * that w/o this bit the crypted data is written
913 * immediately after the bypass data.
915 cmd1
|= SAFE_SA_CMD1_HDRCOPY
;
917 * Disable IP header mutable bit handling. This is
918 * needed to get correct HMAC calculations.
920 cmd1
|= SAFE_SA_CMD1_MUTABLE
;
923 bypass
= enccrd
->crd_skip
;
924 oplen
= bypass
+ enccrd
->crd_len
;
926 bypass
= maccrd
->crd_skip
;
927 oplen
= bypass
+ maccrd
->crd_len
;
931 /* XXX verify multiple of 4 when using s/g */
932 if (bypass
> 96) { /* bypass offset must be <= 96 bytes */
933 DPRINTF(("%s: bypass %u too big\n", __func__
, bypass
));
934 safestats
.st_bypasstoobig
++;
939 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
940 if (pci_map_skb(sc
, &re
->re_src
, re
->re_src_skb
)) {
941 safestats
.st_noload
++;
945 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
946 if (pci_map_uio(sc
, &re
->re_src
, re
->re_src_io
)) {
947 safestats
.st_noload
++;
952 nicealign
= safe_dmamap_aligned(sc
, &re
->re_src
);
953 uniform
= safe_dmamap_uniform(sc
, &re
->re_src
);
955 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
956 nicealign
, uniform
, re
->re_src
.nsegs
));
957 if (re
->re_src
.nsegs
> 1) {
958 re
->re_desc
.d_src
= sc
->sc_spalloc
.dma_paddr
+
959 ((caddr_t
) sc
->sc_spfree
- (caddr_t
) sc
->sc_spring
);
960 for (i
= 0; i
< re
->re_src_nsegs
; i
++) {
961 /* NB: no need to check if there's space */
963 if (++(sc
->sc_spfree
) == sc
->sc_springtop
)
964 sc
->sc_spfree
= sc
->sc_spring
;
966 KASSERT((pd
->pd_flags
&3) == 0 ||
967 (pd
->pd_flags
&3) == SAFE_PD_DONE
,
968 ("bogus source particle descriptor; flags %x",
970 pd
->pd_addr
= re
->re_src_segs
[i
].ds_addr
;
971 pd
->pd_size
= re
->re_src_segs
[i
].ds_len
;
972 pd
->pd_flags
= SAFE_PD_READY
;
974 cmd0
|= SAFE_SA_CMD0_IGATHER
;
977 * No need for gather, reference the operand directly.
979 re
->re_desc
.d_src
= re
->re_src_segs
[0].ds_addr
;
982 if (enccrd
== NULL
&& maccrd
!= NULL
) {
984 * Hash op; no destination needed.
987 if (crp
->crp_flags
& (CRYPTO_F_IOV
|CRYPTO_F_SKBUF
)) {
989 safestats
.st_iovmisaligned
++;
994 device_printf(sc
->sc_dev
, "!uniform source\n");
997 * There's no way to handle the DMA
998 * requirements with this uio. We
999 * could create a separate DMA area for
1000 * the result and then copy it back,
1001 * but for now we just bail and return
1002 * an error. Note that uio requests
1003 * > SAFE_MAX_DSIZE are handled because
1004 * the DMA map and segment list for the
1005 * destination wil result in a
1006 * destination particle list that does
1007 * the necessary scatter DMA.
1009 safestats
.st_iovnotuniform
++;
1014 re
->re_dst
= re
->re_src
;
1016 safestats
.st_badflags
++;
1021 if (re
->re_dst
.nsegs
> 1) {
1022 re
->re_desc
.d_dst
= sc
->sc_dpalloc
.dma_paddr
+
1023 ((caddr_t
) sc
->sc_dpfree
- (caddr_t
) sc
->sc_dpring
);
1024 for (i
= 0; i
< re
->re_dst_nsegs
; i
++) {
1026 KASSERT((pd
->pd_flags
&3) == 0 ||
1027 (pd
->pd_flags
&3) == SAFE_PD_DONE
,
1028 ("bogus dest particle descriptor; flags %x",
1030 if (++(sc
->sc_dpfree
) == sc
->sc_dpringtop
)
1031 sc
->sc_dpfree
= sc
->sc_dpring
;
1032 pd
->pd_addr
= re
->re_dst_segs
[i
].ds_addr
;
1033 pd
->pd_flags
= SAFE_PD_READY
;
1035 cmd0
|= SAFE_SA_CMD0_OSCATTER
;
1038 * No need for scatter, reference the operand directly.
1040 re
->re_desc
.d_dst
= re
->re_dst_segs
[0].ds_addr
;
1045 * All done with setup; fillin the SA command words
1046 * and the packet engine descriptor. The operation
1047 * is now ready for submission to the hardware.
1049 sa
->sa_cmd0
= cmd0
| SAFE_SA_CMD0_IPCI
| SAFE_SA_CMD0_OPCI
;
1051 | (coffset
<< SAFE_SA_CMD1_OFFSET_S
)
1052 | SAFE_SA_CMD1_SAREV1
/* Rev 1 SA data structure */
1053 | SAFE_SA_CMD1_SRPCI
1056 * NB: the order of writes is important here. In case the
1057 * chip is scanning the ring because of an outstanding request
1058 * it might nab this one too. In that case we need to make
1059 * sure the setup is complete before we write the length
1060 * field of the descriptor as it signals the descriptor is
1061 * ready for processing.
1063 re
->re_desc
.d_csr
= SAFE_PE_CSR_READY
| SAFE_PE_CSR_SAPCI
;
1065 re
->re_desc
.d_csr
|= SAFE_PE_CSR_LOADSA
| SAFE_PE_CSR_HASHFINAL
;
1067 re
->re_desc
.d_len
= oplen
1069 | (bypass
<< SAFE_PE_LEN_BYPASS_S
)
1072 safestats
.st_ipackets
++;
1073 safestats
.st_ibytes
+= oplen
;
1075 if (++(sc
->sc_front
) == sc
->sc_ringtop
)
1076 sc
->sc_front
= sc
->sc_ring
;
1078 /* XXX honor batching */
1080 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
1084 if (re
->re_src
.map
!= re
->re_dst
.map
)
1085 pci_unmap_operand(sc
, &re
->re_dst
);
1087 pci_unmap_operand(sc
, &re
->re_src
);
1088 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
1089 if (err
!= ERESTART
) {
1090 crp
->crp_etype
= err
;
1093 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
1099 safe_callback(struct safe_softc
*sc
, struct safe_ringentry
*re
)
1101 struct cryptop
*crp
= (struct cryptop
*)re
->re_crp
;
1102 struct cryptodesc
*crd
;
1104 DPRINTF(("%s()\n", __FUNCTION__
));
1106 safestats
.st_opackets
++;
1107 safestats
.st_obytes
+= re
->re_dst
.mapsize
;
1109 if (re
->re_desc
.d_csr
& SAFE_PE_CSR_STATUS
) {
1110 device_printf(sc
->sc_dev
, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1112 re
->re_sa
.sa_cmd0
, re
->re_sa
.sa_cmd1
);
1113 safestats
.st_peoperr
++;
1114 crp
->crp_etype
= EIO
; /* something more meaningful? */
1117 if (re
->re_dst
.map
!= NULL
&& re
->re_dst
.map
!= re
->re_src
.map
)
1118 pci_unmap_operand(sc
, &re
->re_dst
);
1119 pci_unmap_operand(sc
, &re
->re_src
);
1122 * If result was written to a differet mbuf chain, swap
1123 * it in as the return value and reclaim the original.
1125 if ((crp
->crp_flags
& CRYPTO_F_SKBUF
) && re
->re_src_skb
!= re
->re_dst_skb
) {
1126 device_printf(sc
->sc_dev
, "no CRYPTO_F_SKBUF swapping support\n");
1127 /* kfree_skb(skb) */
1128 /* crp->crp_buf = (caddr_t)re->re_dst_skb */
1132 if (re
->re_flags
& SAFE_QFLAGS_COPYOUTIV
) {
1133 /* copy out IV for future use */
1134 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
1138 if (crd
->crd_alg
== CRYPTO_DES_CBC
||
1139 crd
->crd_alg
== CRYPTO_3DES_CBC
) {
1140 ivsize
= 2*sizeof(u_int32_t
);
1141 } else if (crd
->crd_alg
== CRYPTO_AES_CBC
) {
1142 ivsize
= 4*sizeof(u_int32_t
);
1145 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
1146 crd
->crd_skip
+ crd
->crd_len
- ivsize
, ivsize
,
1147 (caddr_t
)sc
->sc_sessions
[re
->re_sesn
].ses_iv
);
1149 i
< ivsize
/sizeof(sc
->sc_sessions
[re
->re_sesn
].ses_iv
[0]);
1151 sc
->sc_sessions
[re
->re_sesn
].ses_iv
[i
] =
1152 cpu_to_le32(sc
->sc_sessions
[re
->re_sesn
].ses_iv
[i
]);
1157 if (re
->re_flags
& SAFE_QFLAGS_COPYOUTICV
) {
1158 /* copy out ICV result */
1159 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
1160 if (!(crd
->crd_alg
== CRYPTO_MD5_HMAC
||
1161 crd
->crd_alg
== CRYPTO_SHA1_HMAC
||
1162 crd
->crd_alg
== CRYPTO_NULL_HMAC
))
1164 if (crd
->crd_alg
== CRYPTO_SHA1_HMAC
) {
1166 * SHA-1 ICV's are byte-swapped; fix 'em up
1167 * before copy them to their destination.
1169 re
->re_sastate
.sa_saved_indigest
[0] =
1170 cpu_to_be32(re
->re_sastate
.sa_saved_indigest
[0]);
1171 re
->re_sastate
.sa_saved_indigest
[1] =
1172 cpu_to_be32(re
->re_sastate
.sa_saved_indigest
[1]);
1173 re
->re_sastate
.sa_saved_indigest
[2] =
1174 cpu_to_be32(re
->re_sastate
.sa_saved_indigest
[2]);
1176 re
->re_sastate
.sa_saved_indigest
[0] =
1177 cpu_to_le32(re
->re_sastate
.sa_saved_indigest
[0]);
1178 re
->re_sastate
.sa_saved_indigest
[1] =
1179 cpu_to_le32(re
->re_sastate
.sa_saved_indigest
[1]);
1180 re
->re_sastate
.sa_saved_indigest
[2] =
1181 cpu_to_le32(re
->re_sastate
.sa_saved_indigest
[2]);
1183 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
1185 sc
->sc_sessions
[re
->re_sesn
].ses_mlen
,
1186 (caddr_t
)re
->re_sastate
.sa_saved_indigest
);
1194 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
1195 #define SAFE_RNG_MAXWAIT 1000
1198 safe_rng_init(struct safe_softc
*sc
)
1203 DPRINTF(("%s()\n", __FUNCTION__
));
1205 WRITE_REG(sc
, SAFE_RNG_CTRL
, 0);
1206 /* use default value according to the manual */
1207 WRITE_REG(sc
, SAFE_RNG_CNFG
, 0x834); /* magic from SafeNet */
1208 WRITE_REG(sc
, SAFE_RNG_ALM_CNT
, 0);
1211 * There is a bug in rev 1.0 of the 1140 that when the RNG
1212 * is brought out of reset the ready status flag does not
1213 * work until the RNG has finished its internal initialization.
1215 * So in order to determine the device is through its
1216 * initialization we must read the data register, using the
1217 * status reg in the read in case it is initialized. Then read
1218 * the data register until it changes from the first read.
1219 * Once it changes read the data register until it changes
1220 * again. At this time the RNG is considered initialized.
1221 * This could take between 750ms - 1000ms in time.
1224 w
= READ_REG(sc
, SAFE_RNG_OUT
);
1226 v
= READ_REG(sc
, SAFE_RNG_OUT
);
1232 } while (++i
< SAFE_RNG_MAXWAIT
);
1234 /* Wait Until data changes again */
1237 v
= READ_REG(sc
, SAFE_RNG_OUT
);
1241 } while (++i
< SAFE_RNG_MAXWAIT
);
1244 static __inline
void
1245 safe_rng_disable_short_cycle(struct safe_softc
*sc
)
1247 DPRINTF(("%s()\n", __FUNCTION__
));
1249 WRITE_REG(sc
, SAFE_RNG_CTRL
,
1250 READ_REG(sc
, SAFE_RNG_CTRL
) &~ SAFE_RNG_CTRL_SHORTEN
);
1253 static __inline
void
1254 safe_rng_enable_short_cycle(struct safe_softc
*sc
)
1256 DPRINTF(("%s()\n", __FUNCTION__
));
1258 WRITE_REG(sc
, SAFE_RNG_CTRL
,
1259 READ_REG(sc
, SAFE_RNG_CTRL
) | SAFE_RNG_CTRL_SHORTEN
);
1262 static __inline u_int32_t
1263 safe_rng_read(struct safe_softc
*sc
)
1268 while (READ_REG(sc
, SAFE_RNG_STAT
) != 0 && ++i
< SAFE_RNG_MAXWAIT
)
1270 return READ_REG(sc
, SAFE_RNG_OUT
);
1274 safe_read_random(void *arg
, u_int32_t
*buf
, int maxwords
)
1276 struct safe_softc
*sc
= (struct safe_softc
*) arg
;
1279 DPRINTF(("%s()\n", __FUNCTION__
));
1283 * Fetch the next block of data.
1285 if (maxwords
> safe_rngbufsize
)
1286 maxwords
= safe_rngbufsize
;
1287 if (maxwords
> SAFE_RNG_MAXBUFSIZ
)
1288 maxwords
= SAFE_RNG_MAXBUFSIZ
;
1290 /* read as much as we can */
1291 for (rc
= 0; rc
< maxwords
; rc
++) {
1292 if (READ_REG(sc
, SAFE_RNG_STAT
) != 0)
1294 buf
[rc
] = READ_REG(sc
, SAFE_RNG_OUT
);
1299 * Check the comparator alarm count and reset the h/w if
1300 * it exceeds our threshold. This guards against the
1301 * hardware oscillators resonating with external signals.
1303 if (READ_REG(sc
, SAFE_RNG_ALM_CNT
) > safe_rngmaxalarm
) {
1304 u_int32_t freq_inc
, w
;
1306 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__
,
1307 (unsigned)READ_REG(sc
, SAFE_RNG_ALM_CNT
), safe_rngmaxalarm
));
1308 safestats
.st_rngalarm
++;
1309 safe_rng_enable_short_cycle(sc
);
1311 for (i
= 0; i
< 64; i
++) {
1312 w
= READ_REG(sc
, SAFE_RNG_CNFG
);
1313 freq_inc
= ((w
+ freq_inc
) & 0x3fL
);
1314 w
= ((w
& ~0x3fL
) | freq_inc
);
1315 WRITE_REG(sc
, SAFE_RNG_CNFG
, w
);
1317 WRITE_REG(sc
, SAFE_RNG_ALM_CNT
, 0);
1319 (void) safe_rng_read(sc
);
1322 if (READ_REG(sc
, SAFE_RNG_ALM_CNT
) == 0) {
1323 safe_rng_disable_short_cycle(sc
);
1328 safe_rng_disable_short_cycle(sc
);
1330 WRITE_REG(sc
, SAFE_RNG_ALM_CNT
, 0);
1334 #endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
1338 * Resets the board. Values in the regesters are left as is
1339 * from the reset (i.e. initial values are assigned elsewhere).
1342 safe_reset_board(struct safe_softc
*sc
)
1346 * Reset the device. The manual says no delay
1347 * is needed between marking and clearing reset.
1349 DPRINTF(("%s()\n", __FUNCTION__
));
1351 v
= READ_REG(sc
, SAFE_PE_DMACFG
) &~
1352 (SAFE_PE_DMACFG_PERESET
| SAFE_PE_DMACFG_PDRRESET
|
1353 SAFE_PE_DMACFG_SGRESET
);
1354 WRITE_REG(sc
, SAFE_PE_DMACFG
, v
1355 | SAFE_PE_DMACFG_PERESET
1356 | SAFE_PE_DMACFG_PDRRESET
1357 | SAFE_PE_DMACFG_SGRESET
);
1358 WRITE_REG(sc
, SAFE_PE_DMACFG
, v
);
1362 * Initialize registers we need to touch only once.
1365 safe_init_board(struct safe_softc
*sc
)
1367 u_int32_t v
, dwords
;
1369 DPRINTF(("%s()\n", __FUNCTION__
));
1371 v
= READ_REG(sc
, SAFE_PE_DMACFG
);
1372 v
&=~ ( SAFE_PE_DMACFG_PEMODE
1373 | SAFE_PE_DMACFG_FSENA
/* failsafe enable */
1374 | SAFE_PE_DMACFG_GPRPCI
/* gather ring on PCI */
1375 | SAFE_PE_DMACFG_SPRPCI
/* scatter ring on PCI */
1376 | SAFE_PE_DMACFG_ESDESC
/* endian-swap descriptors */
1377 | SAFE_PE_DMACFG_ESPDESC
/* endian-swap part. desc's */
1378 | SAFE_PE_DMACFG_ESSA
/* endian-swap SA's */
1379 | SAFE_PE_DMACFG_ESPACKET
/* swap the packet data */
1381 v
|= SAFE_PE_DMACFG_FSENA
/* failsafe enable */
1382 | SAFE_PE_DMACFG_GPRPCI
/* gather ring on PCI */
1383 | SAFE_PE_DMACFG_SPRPCI
/* scatter ring on PCI */
1384 | SAFE_PE_DMACFG_ESDESC
/* endian-swap descriptors */
1385 | SAFE_PE_DMACFG_ESPDESC
/* endian-swap part. desc's */
1386 | SAFE_PE_DMACFG_ESSA
/* endian-swap SA's */
1388 | SAFE_PE_DMACFG_ESPACKET
/* swap the packet data */
1391 WRITE_REG(sc
, SAFE_PE_DMACFG
, v
);
1394 /* tell the safenet that we are 4321 and not 1234 */
1395 WRITE_REG(sc
, SAFE_ENDIAN
, 0xe4e41b1b);
1398 if (sc
->sc_chiprev
== SAFE_REV(1,0)) {
1400 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1401 * "target mode transfers" done while the chip is DMA'ing
1402 * >1020 bytes cause the hardware to lockup. To avoid this
1403 * we reduce the max PCI transfer size and use small source
1404 * particle descriptors (<= 256 bytes).
1406 WRITE_REG(sc
, SAFE_DMA_CFG
, 256);
1407 device_printf(sc
->sc_dev
,
1408 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1409 (unsigned) ((READ_REG(sc
, SAFE_DMA_CFG
)>>2) & 0xff),
1410 (unsigned) SAFE_REV_MAJ(sc
->sc_chiprev
),
1411 (unsigned) SAFE_REV_MIN(sc
->sc_chiprev
));
1412 sc
->sc_max_dsize
= 256;
1414 sc
->sc_max_dsize
= SAFE_MAX_DSIZE
;
1417 /* NB: operands+results are overlaid */
1418 WRITE_REG(sc
, SAFE_PE_PDRBASE
, sc
->sc_ringalloc
.dma_paddr
);
1419 WRITE_REG(sc
, SAFE_PE_RDRBASE
, sc
->sc_ringalloc
.dma_paddr
);
1421 * Configure ring entry size and number of items in the ring.
1423 KASSERT((sizeof(struct safe_ringentry
) % sizeof(u_int32_t
)) == 0,
1424 ("PE ring entry not 32-bit aligned!"));
1425 dwords
= sizeof(struct safe_ringentry
) / sizeof(u_int32_t
);
1426 WRITE_REG(sc
, SAFE_PE_RINGCFG
,
1427 (dwords
<< SAFE_PE_RINGCFG_OFFSET_S
) | SAFE_MAX_NQUEUE
);
1428 WRITE_REG(sc
, SAFE_PE_RINGPOLL
, 0); /* disable polling */
1430 WRITE_REG(sc
, SAFE_PE_GRNGBASE
, sc
->sc_spalloc
.dma_paddr
);
1431 WRITE_REG(sc
, SAFE_PE_SRNGBASE
, sc
->sc_dpalloc
.dma_paddr
);
1432 WRITE_REG(sc
, SAFE_PE_PARTSIZE
,
1433 (SAFE_TOTAL_DPART
<<16) | SAFE_TOTAL_SPART
);
1435 * NB: destination particles are fixed size. We use
1436 * an mbuf cluster and require all results go to
1437 * clusters or smaller.
1439 WRITE_REG(sc
, SAFE_PE_PARTCFG
, sc
->sc_max_dsize
);
1441 /* it's now safe to enable PE mode, do it */
1442 WRITE_REG(sc
, SAFE_PE_DMACFG
, v
| SAFE_PE_DMACFG_PEMODE
);
1445 * Configure hardware to use level-triggered interrupts and
1446 * to interrupt after each descriptor is processed.
1448 WRITE_REG(sc
, SAFE_HI_CFG
, SAFE_HI_CFG_LEVEL
);
1449 WRITE_REG(sc
, SAFE_HI_CLR
, 0xffffffff);
1450 WRITE_REG(sc
, SAFE_HI_DESC_CNT
, 1);
1451 WRITE_REG(sc
, SAFE_HI_MASK
, SAFE_INT_PE_DDONE
| SAFE_INT_PE_ERROR
);
1456 * Clean up after a chip crash.
1457 * It is assumed that the caller in splimp()
1460 safe_cleanchip(struct safe_softc
*sc
)
1462 DPRINTF(("%s()\n", __FUNCTION__
));
1464 if (sc
->sc_nqchip
!= 0) {
1465 struct safe_ringentry
*re
= sc
->sc_back
;
1467 while (re
!= sc
->sc_front
) {
1468 if (re
->re_desc
.d_csr
!= 0)
1469 safe_free_entry(sc
, re
);
1470 if (++re
== sc
->sc_ringtop
)
1480 * It is assumed that the caller is within splimp().
1483 safe_free_entry(struct safe_softc
*sc
, struct safe_ringentry
*re
)
1485 struct cryptop
*crp
;
1487 DPRINTF(("%s()\n", __FUNCTION__
));
1492 if ((re
->re_dst_skb
!= NULL
) && (re
->re_src_skb
!= re
->re_dst_skb
))
1494 m_freem(re
->re_dst_m
);
1496 printk("%s,%d: SKB not supported\n", __FILE__
, __LINE__
);
1499 crp
= (struct cryptop
*)re
->re_crp
;
1501 re
->re_desc
.d_csr
= 0;
1503 crp
->crp_etype
= EFAULT
;
1509 * Routine to reset the chip and clean up.
1510 * It is assumed that the caller is in splimp()
1513 safe_totalreset(struct safe_softc
*sc
)
1515 DPRINTF(("%s()\n", __FUNCTION__
));
1517 safe_reset_board(sc
);
1518 safe_init_board(sc
);
1523 * Is the operand suitable aligned for direct DMA. Each
1524 * segment must be aligned on a 32-bit boundary and all
1525 * but the last segment must be a multiple of 4 bytes.
1528 safe_dmamap_aligned(struct safe_softc
*sc
, const struct safe_operand
*op
)
1532 DPRINTF(("%s()\n", __FUNCTION__
));
1534 for (i
= 0; i
< op
->nsegs
; i
++) {
1535 if (op
->segs
[i
].ds_addr
& 3)
1537 if (i
!= (op
->nsegs
- 1) && (op
->segs
[i
].ds_len
& 3))
1544 * Is the operand suitable for direct DMA as the destination
1545 * of an operation. The hardware requires that each ``particle''
1546 * but the last in an operation result have the same size. We
1547 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
1548 * 0 if some segment is not a multiple of of this size, 1 if all
1549 * segments are exactly this size, or 2 if segments are at worst
1550 * a multple of this size.
1553 safe_dmamap_uniform(struct safe_softc
*sc
, const struct safe_operand
*op
)
1557 DPRINTF(("%s()\n", __FUNCTION__
));
1559 if (op
->nsegs
> 0) {
1562 for (i
= 0; i
< op
->nsegs
-1; i
++) {
1563 if (op
->segs
[i
].ds_len
% sc
->sc_max_dsize
)
1565 if (op
->segs
[i
].ds_len
!= sc
->sc_max_dsize
)
1573 safe_kprocess(device_t dev
, struct cryptkop
*krp
, int hint
)
1575 struct safe_softc
*sc
= device_get_softc(dev
);
1577 unsigned long flags
;
1579 DPRINTF(("%s()\n", __FUNCTION__
));
1582 krp
->krp_status
= EINVAL
;
1586 if (krp
->krp_op
!= CRK_MOD_EXP
) {
1587 krp
->krp_status
= EOPNOTSUPP
;
1591 q
= (struct safe_pkq
*) kmalloc(sizeof(*q
), GFP_KERNEL
);
1593 krp
->krp_status
= ENOMEM
;
1596 memset(q
, 0, sizeof(*q
));
1598 INIT_LIST_HEAD(&q
->pkq_list
);
1600 spin_lock_irqsave(&sc
->sc_pkmtx
, flags
);
1601 list_add_tail(&q
->pkq_list
, &sc
->sc_pkq
);
1603 spin_unlock_irqrestore(&sc
->sc_pkmtx
, flags
);
1611 #define SAFE_CRK_PARAM_BASE 0
1612 #define SAFE_CRK_PARAM_EXP 1
1613 #define SAFE_CRK_PARAM_MOD 2
1616 safe_kstart(struct safe_softc
*sc
)
1618 struct cryptkop
*krp
= sc
->sc_pkq_cur
->pkq_krp
;
1619 int exp_bits
, mod_bits
, base_bits
;
1620 u_int32_t op
, a_off
, b_off
, c_off
, d_off
;
1622 DPRINTF(("%s()\n", __FUNCTION__
));
1624 if (krp
->krp_iparams
< 3 || krp
->krp_oparams
!= 1) {
1625 krp
->krp_status
= EINVAL
;
1629 base_bits
= safe_ksigbits(sc
, &krp
->krp_param
[SAFE_CRK_PARAM_BASE
]);
1630 if (base_bits
> 2048)
1632 if (base_bits
<= 0) /* 5. base not zero */
1635 exp_bits
= safe_ksigbits(sc
, &krp
->krp_param
[SAFE_CRK_PARAM_EXP
]);
1636 if (exp_bits
> 2048)
1638 if (exp_bits
<= 0) /* 1. exponent word length > 0 */
1639 goto too_small
; /* 4. exponent not zero */
1641 mod_bits
= safe_ksigbits(sc
, &krp
->krp_param
[SAFE_CRK_PARAM_MOD
]);
1642 if (mod_bits
> 2048)
1644 if (mod_bits
<= 32) /* 2. modulus word length > 1 */
1645 goto too_small
; /* 8. MSW of modulus != zero */
1646 if (mod_bits
< exp_bits
) /* 3 modulus len >= exponent len */
1648 if ((krp
->krp_param
[SAFE_CRK_PARAM_MOD
].crp_p
[0] & 1) == 0)
1649 goto bad_domain
; /* 6. modulus is odd */
1650 if (mod_bits
> krp
->krp_param
[krp
->krp_iparams
].crp_nbits
)
1651 goto too_small
; /* make sure result will fit */
1653 /* 7. modulus > base */
1654 if (mod_bits
< base_bits
)
1656 if (mod_bits
== base_bits
) {
1657 u_int8_t
*basep
, *modp
;
1660 basep
= krp
->krp_param
[SAFE_CRK_PARAM_BASE
].crp_p
+
1661 ((base_bits
+ 7) / 8) - 1;
1662 modp
= krp
->krp_param
[SAFE_CRK_PARAM_MOD
].crp_p
+
1663 ((mod_bits
+ 7) / 8) - 1;
1665 for (i
= 0; i
< (mod_bits
+ 7) / 8; i
++, basep
--, modp
--) {
1673 /* And on the 9th step, he rested. */
1675 WRITE_REG(sc
, SAFE_PK_A_LEN
, (exp_bits
+ 31) / 32);
1676 WRITE_REG(sc
, SAFE_PK_B_LEN
, (mod_bits
+ 31) / 32);
1677 if (mod_bits
> 1024) {
1678 op
= SAFE_PK_FUNC_EXP4
;
1684 op
= SAFE_PK_FUNC_EXP16
;
1690 sc
->sc_pk_reslen
= b_off
- a_off
;
1691 sc
->sc_pk_resoff
= d_off
;
1693 /* A is exponent, B is modulus, C is base, D is result */
1694 safe_kload_reg(sc
, a_off
, b_off
- a_off
,
1695 &krp
->krp_param
[SAFE_CRK_PARAM_EXP
]);
1696 WRITE_REG(sc
, SAFE_PK_A_ADDR
, a_off
>> 2);
1697 safe_kload_reg(sc
, b_off
, b_off
- a_off
,
1698 &krp
->krp_param
[SAFE_CRK_PARAM_MOD
]);
1699 WRITE_REG(sc
, SAFE_PK_B_ADDR
, b_off
>> 2);
1700 safe_kload_reg(sc
, c_off
, b_off
- a_off
,
1701 &krp
->krp_param
[SAFE_CRK_PARAM_BASE
]);
1702 WRITE_REG(sc
, SAFE_PK_C_ADDR
, c_off
>> 2);
1703 WRITE_REG(sc
, SAFE_PK_D_ADDR
, d_off
>> 2);
1705 WRITE_REG(sc
, SAFE_PK_FUNC
, op
| SAFE_PK_FUNC_RUN
);
1710 krp
->krp_status
= E2BIG
;
1713 krp
->krp_status
= ERANGE
;
1716 krp
->krp_status
= EDOM
;
1721 safe_ksigbits(struct safe_softc
*sc
, struct crparam
*cr
)
1723 u_int plen
= (cr
->crp_nbits
+ 7) / 8;
1724 int i
, sig
= plen
* 8;
1725 u_int8_t c
, *p
= cr
->crp_p
;
1727 DPRINTF(("%s()\n", __FUNCTION__
));
1729 for (i
= plen
- 1; i
>= 0; i
--) {
1732 while ((c
& 0x80) == 0) {
1744 safe_kfeed(struct safe_softc
*sc
)
1746 struct safe_pkq
*q
, *tmp
;
1748 DPRINTF(("%s()\n", __FUNCTION__
));
1750 if (list_empty(&sc
->sc_pkq
) && sc
->sc_pkq_cur
== NULL
)
1752 if (sc
->sc_pkq_cur
!= NULL
)
1754 list_for_each_entry_safe(q
, tmp
, &sc
->sc_pkq
, pkq_list
) {
1756 list_del(&q
->pkq_list
);
1757 if (safe_kstart(sc
) != 0) {
1758 crypto_kdone(q
->pkq_krp
);
1760 sc
->sc_pkq_cur
= NULL
;
1762 /* op started, start polling */
1763 mod_timer(&sc
->sc_pkto
, jiffies
+ 1);
1770 safe_kpoll(unsigned long arg
)
1772 struct safe_softc
*sc
= NULL
;
1774 struct crparam
*res
;
1777 unsigned long flags
;
1779 DPRINTF(("%s()\n", __FUNCTION__
));
1781 if (arg
>= SAFE_MAX_CHIPS
)
1783 sc
= safe_chip_idx
[arg
];
1785 DPRINTF(("%s() - bad callback\n", __FUNCTION__
));
1789 spin_lock_irqsave(&sc
->sc_pkmtx
, flags
);
1790 if (sc
->sc_pkq_cur
== NULL
)
1792 if (READ_REG(sc
, SAFE_PK_FUNC
) & SAFE_PK_FUNC_RUN
) {
1793 /* still running, check back later */
1794 mod_timer(&sc
->sc_pkto
, jiffies
+ 1);
1799 res
= &q
->pkq_krp
->krp_param
[q
->pkq_krp
->krp_iparams
];
1800 bzero(buf
, sizeof(buf
));
1801 bzero(res
->crp_p
, (res
->crp_nbits
+ 7) / 8);
1802 for (i
= 0; i
< sc
->sc_pk_reslen
>> 2; i
++)
1803 buf
[i
] = le32_to_cpu(READ_REG(sc
, SAFE_PK_RAM_START
+
1804 sc
->sc_pk_resoff
+ (i
<< 2)));
1805 bcopy(buf
, res
->crp_p
, (res
->crp_nbits
+ 7) / 8);
1807 * reduce the bits that need copying if possible
1809 res
->crp_nbits
= min(res
->crp_nbits
,sc
->sc_pk_reslen
* 8);
1810 res
->crp_nbits
= safe_ksigbits(sc
, res
);
1812 for (i
= SAFE_PK_RAM_START
; i
< SAFE_PK_RAM_END
; i
+= 4)
1813 WRITE_REG(sc
, i
, 0);
1815 crypto_kdone(q
->pkq_krp
);
1817 sc
->sc_pkq_cur
= NULL
;
1821 spin_unlock_irqrestore(&sc
->sc_pkmtx
, flags
);
1825 safe_kload_reg(struct safe_softc
*sc
, u_int32_t off
, u_int32_t len
,
1828 u_int32_t buf
[64], i
;
1830 DPRINTF(("%s()\n", __FUNCTION__
));
1832 bzero(buf
, sizeof(buf
));
1833 bcopy(n
->crp_p
, buf
, (n
->crp_nbits
+ 7) / 8);
1835 for (i
= 0; i
< len
>> 2; i
++)
1836 WRITE_REG(sc
, SAFE_PK_RAM_START
+ off
+ (i
<< 2),
1837 cpu_to_le32(buf
[i
]));
1842 safe_dump_dmastatus(struct safe_softc
*sc
, const char *tag
)
1844 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
1846 , READ_REG(sc
, SAFE_DMA_ENDIAN
)
1847 , READ_REG(sc
, SAFE_DMA_SRCADDR
)
1848 , READ_REG(sc
, SAFE_DMA_DSTADDR
)
1849 , READ_REG(sc
, SAFE_DMA_STAT
)
1854 safe_dump_intrstate(struct safe_softc
*sc
, const char *tag
)
1856 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
1858 , READ_REG(sc
, SAFE_HI_CFG
)
1859 , READ_REG(sc
, SAFE_HI_MASK
)
1860 , READ_REG(sc
, SAFE_HI_DESC_CNT
)
1861 , READ_REG(sc
, SAFE_HU_STAT
)
1862 , READ_REG(sc
, SAFE_HM_STAT
)
1867 safe_dump_ringstate(struct safe_softc
*sc
, const char *tag
)
1869 u_int32_t estat
= READ_REG(sc
, SAFE_PE_ERNGSTAT
);
1871 /* NB: assume caller has lock on ring */
1872 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
1874 estat
, (estat
>> SAFE_PE_ERNGSTAT_NEXT_S
),
1875 (unsigned long)(sc
->sc_back
- sc
->sc_ring
),
1876 (unsigned long)(sc
->sc_front
- sc
->sc_ring
));
1880 safe_dump_request(struct safe_softc
*sc
, const char* tag
, struct safe_ringentry
*re
)
1884 ix
= re
- sc
->sc_ring
;
1885 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
1894 if (re
->re_src
.nsegs
> 1) {
1895 ix
= (re
->re_desc
.d_src
- sc
->sc_spalloc
.dma_paddr
) /
1896 sizeof(struct safe_pdesc
);
1897 for (nsegs
= re
->re_src
.nsegs
; nsegs
; nsegs
--) {
1898 printf(" spd[%u] %p: %p size %u flags %x"
1899 , ix
, &sc
->sc_spring
[ix
]
1900 , (caddr_t
)(uintptr_t) sc
->sc_spring
[ix
].pd_addr
1901 , sc
->sc_spring
[ix
].pd_size
1902 , sc
->sc_spring
[ix
].pd_flags
1904 if (sc
->sc_spring
[ix
].pd_size
== 0)
1907 if (++ix
== SAFE_TOTAL_SPART
)
1911 if (re
->re_dst
.nsegs
> 1) {
1912 ix
= (re
->re_desc
.d_dst
- sc
->sc_dpalloc
.dma_paddr
) /
1913 sizeof(struct safe_pdesc
);
1914 for (nsegs
= re
->re_dst
.nsegs
; nsegs
; nsegs
--) {
1915 printf(" dpd[%u] %p: %p flags %x\n"
1916 , ix
, &sc
->sc_dpring
[ix
]
1917 , (caddr_t
)(uintptr_t) sc
->sc_dpring
[ix
].pd_addr
1918 , sc
->sc_dpring
[ix
].pd_flags
1920 if (++ix
== SAFE_TOTAL_DPART
)
1924 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
1925 re
->re_sa
.sa_cmd0
, re
->re_sa
.sa_cmd1
, re
->re_sa
.sa_staterec
);
1926 printf("sa: key %x %x %x %x %x %x %x %x\n"
1927 , re
->re_sa
.sa_key
[0]
1928 , re
->re_sa
.sa_key
[1]
1929 , re
->re_sa
.sa_key
[2]
1930 , re
->re_sa
.sa_key
[3]
1931 , re
->re_sa
.sa_key
[4]
1932 , re
->re_sa
.sa_key
[5]
1933 , re
->re_sa
.sa_key
[6]
1934 , re
->re_sa
.sa_key
[7]
1936 printf("sa: indigest %x %x %x %x %x\n"
1937 , re
->re_sa
.sa_indigest
[0]
1938 , re
->re_sa
.sa_indigest
[1]
1939 , re
->re_sa
.sa_indigest
[2]
1940 , re
->re_sa
.sa_indigest
[3]
1941 , re
->re_sa
.sa_indigest
[4]
1943 printf("sa: outdigest %x %x %x %x %x\n"
1944 , re
->re_sa
.sa_outdigest
[0]
1945 , re
->re_sa
.sa_outdigest
[1]
1946 , re
->re_sa
.sa_outdigest
[2]
1947 , re
->re_sa
.sa_outdigest
[3]
1948 , re
->re_sa
.sa_outdigest
[4]
1950 printf("sr: iv %x %x %x %x\n"
1951 , re
->re_sastate
.sa_saved_iv
[0]
1952 , re
->re_sastate
.sa_saved_iv
[1]
1953 , re
->re_sastate
.sa_saved_iv
[2]
1954 , re
->re_sastate
.sa_saved_iv
[3]
1956 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
1957 , re
->re_sastate
.sa_saved_hashbc
1958 , re
->re_sastate
.sa_saved_indigest
[0]
1959 , re
->re_sastate
.sa_saved_indigest
[1]
1960 , re
->re_sastate
.sa_saved_indigest
[2]
1961 , re
->re_sastate
.sa_saved_indigest
[3]
1962 , re
->re_sastate
.sa_saved_indigest
[4]
1967 safe_dump_ring(struct safe_softc
*sc
, const char *tag
)
1969 unsigned long flags
;
1971 spin_lock_irqsave(&sc
->sc_ringmtx
, flags
);
1972 printf("\nSafeNet Ring State:\n");
1973 safe_dump_intrstate(sc
, tag
);
1974 safe_dump_dmastatus(sc
, tag
);
1975 safe_dump_ringstate(sc
, tag
);
1976 if (sc
->sc_nqchip
) {
1977 struct safe_ringentry
*re
= sc
->sc_back
;
1979 safe_dump_request(sc
, tag
, re
);
1980 if (++re
== sc
->sc_ringtop
)
1982 } while (re
!= sc
->sc_front
);
1984 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
1986 #endif /* SAFE_DEBUG */
1989 static int safe_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
)
1991 struct safe_softc
*sc
= NULL
;
1992 u32 mem_start
, mem_len
, cmd
;
1995 static int num_chips
= 0;
1997 DPRINTF(("%s()\n", __FUNCTION__
));
1999 if (pci_enable_device(dev
) < 0)
2003 printk("safe: found device with no IRQ assigned. check BIOS settings!");
2004 pci_disable_device(dev
);
2008 if (pci_set_mwi(dev
)) {
2009 printk("safe: pci_set_mwi failed!");
2013 sc
= (struct safe_softc
*) kmalloc(sizeof(*sc
), GFP_KERNEL
);
2016 memset(sc
, 0, sizeof(*sc
));
2018 softc_device_init(sc
, "safe", num_chips
, safe_methods
);
2022 sc
->sc_pcidev
= dev
;
2023 if (num_chips
< SAFE_MAX_CHIPS
) {
2024 safe_chip_idx
[device_get_unit(sc
->sc_dev
)] = sc
;
2028 INIT_LIST_HEAD(&sc
->sc_pkq
);
2029 spin_lock_init(&sc
->sc_pkmtx
);
2031 pci_set_drvdata(sc
->sc_pcidev
, sc
);
2033 /* we read its hardware registers as memory */
2034 mem_start
= pci_resource_start(sc
->sc_pcidev
, 0);
2035 mem_len
= pci_resource_len(sc
->sc_pcidev
, 0);
2037 sc
->sc_base_addr
= (ocf_iomem_t
) ioremap(mem_start
, mem_len
);
2038 if (!sc
->sc_base_addr
) {
2039 device_printf(sc
->sc_dev
, "failed to ioremap 0x%x-0x%x\n",
2040 mem_start
, mem_start
+ mem_len
- 1);
2044 /* fix up the bus size */
2045 if (pci_set_dma_mask(sc
->sc_pcidev
, DMA_32BIT_MASK
)) {
2046 device_printf(sc
->sc_dev
, "No usable DMA configuration, aborting.\n");
2049 if (pci_set_consistent_dma_mask(sc
->sc_pcidev
, DMA_32BIT_MASK
)) {
2050 device_printf(sc
->sc_dev
, "No usable consistent DMA configuration, aborting.\n");
2054 pci_set_master(sc
->sc_pcidev
);
2056 pci_read_config_dword(sc
->sc_pcidev
, PCI_COMMAND
, &cmd
);
2058 if (!(cmd
& PCI_COMMAND_MEMORY
)) {
2059 device_printf(sc
->sc_dev
, "failed to enable memory mapping\n");
2063 if (!(cmd
& PCI_COMMAND_MASTER
)) {
2064 device_printf(sc
->sc_dev
, "failed to enable bus mastering\n");
2068 rc
= request_irq(dev
->irq
, safe_intr
, IRQF_SHARED
, "safe", sc
);
2070 device_printf(sc
->sc_dev
, "failed to hook irq %d\n", sc
->sc_irq
);
2073 sc
->sc_irq
= dev
->irq
;
2075 sc
->sc_chiprev
= READ_REG(sc
, SAFE_DEVINFO
) &
2076 (SAFE_DEVINFO_REV_MAJ
| SAFE_DEVINFO_REV_MIN
);
2079 * Allocate packet engine descriptors.
2081 sc
->sc_ringalloc
.dma_vaddr
= pci_alloc_consistent(sc
->sc_pcidev
,
2082 SAFE_MAX_NQUEUE
* sizeof (struct safe_ringentry
),
2083 &sc
->sc_ringalloc
.dma_paddr
);
2084 if (!sc
->sc_ringalloc
.dma_vaddr
) {
2085 device_printf(sc
->sc_dev
, "cannot allocate PE descriptor ring\n");
2090 * Hookup the static portion of all our data structures.
2092 sc
->sc_ring
= (struct safe_ringentry
*) sc
->sc_ringalloc
.dma_vaddr
;
2093 sc
->sc_ringtop
= sc
->sc_ring
+ SAFE_MAX_NQUEUE
;
2094 sc
->sc_front
= sc
->sc_ring
;
2095 sc
->sc_back
= sc
->sc_ring
;
2096 raddr
= sc
->sc_ringalloc
.dma_paddr
;
2097 bzero(sc
->sc_ring
, SAFE_MAX_NQUEUE
* sizeof(struct safe_ringentry
));
2098 for (i
= 0; i
< SAFE_MAX_NQUEUE
; i
++) {
2099 struct safe_ringentry
*re
= &sc
->sc_ring
[i
];
2101 re
->re_desc
.d_sa
= raddr
+
2102 offsetof(struct safe_ringentry
, re_sa
);
2103 re
->re_sa
.sa_staterec
= raddr
+
2104 offsetof(struct safe_ringentry
, re_sastate
);
2106 raddr
+= sizeof (struct safe_ringentry
);
2108 spin_lock_init(&sc
->sc_ringmtx
);
2111 * Allocate scatter and gather particle descriptors.
2113 sc
->sc_spalloc
.dma_vaddr
= pci_alloc_consistent(sc
->sc_pcidev
,
2114 SAFE_TOTAL_SPART
* sizeof (struct safe_pdesc
),
2115 &sc
->sc_spalloc
.dma_paddr
);
2116 if (!sc
->sc_spalloc
.dma_vaddr
) {
2117 device_printf(sc
->sc_dev
, "cannot allocate source particle descriptor ring\n");
2120 sc
->sc_spring
= (struct safe_pdesc
*) sc
->sc_spalloc
.dma_vaddr
;
2121 sc
->sc_springtop
= sc
->sc_spring
+ SAFE_TOTAL_SPART
;
2122 sc
->sc_spfree
= sc
->sc_spring
;
2123 bzero(sc
->sc_spring
, SAFE_TOTAL_SPART
* sizeof(struct safe_pdesc
));
2125 sc
->sc_dpalloc
.dma_vaddr
= pci_alloc_consistent(sc
->sc_pcidev
,
2126 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2127 &sc
->sc_dpalloc
.dma_paddr
);
2128 if (!sc
->sc_dpalloc
.dma_vaddr
) {
2129 device_printf(sc
->sc_dev
, "cannot allocate destination particle descriptor ring\n");
2132 sc
->sc_dpring
= (struct safe_pdesc
*) sc
->sc_dpalloc
.dma_vaddr
;
2133 sc
->sc_dpringtop
= sc
->sc_dpring
+ SAFE_TOTAL_DPART
;
2134 sc
->sc_dpfree
= sc
->sc_dpring
;
2135 bzero(sc
->sc_dpring
, SAFE_TOTAL_DPART
* sizeof(struct safe_pdesc
));
2137 sc
->sc_cid
= crypto_get_driverid(softc_get_device(sc
), CRYPTOCAP_F_HARDWARE
);
2138 if (sc
->sc_cid
< 0) {
2139 device_printf(sc
->sc_dev
, "could not get crypto driver id\n");
2143 printf("%s:", device_get_nameunit(sc
->sc_dev
));
2145 devinfo
= READ_REG(sc
, SAFE_DEVINFO
);
2146 if (devinfo
& SAFE_DEVINFO_RNG
) {
2147 sc
->sc_flags
|= SAFE_FLAGS_RNG
;
2150 if (devinfo
& SAFE_DEVINFO_PKEY
) {
2152 sc
->sc_flags
|= SAFE_FLAGS_KEY
;
2153 crypto_kregister(sc
->sc_cid
, CRK_MOD_EXP
, 0);
2155 crypto_kregister(sc
->sc_cid
, CRK_MOD_EXP_CRT
, 0);
2157 init_timer(&sc
->sc_pkto
);
2158 sc
->sc_pkto
.function
= safe_kpoll
;
2159 sc
->sc_pkto
.data
= (unsigned long) device_get_unit(sc
->sc_dev
);
2161 if (devinfo
& SAFE_DEVINFO_DES
) {
2162 printf(" des/3des");
2163 crypto_register(sc
->sc_cid
, CRYPTO_3DES_CBC
, 0, 0);
2164 crypto_register(sc
->sc_cid
, CRYPTO_DES_CBC
, 0, 0);
2166 if (devinfo
& SAFE_DEVINFO_AES
) {
2168 crypto_register(sc
->sc_cid
, CRYPTO_AES_CBC
, 0, 0);
2170 if (devinfo
& SAFE_DEVINFO_MD5
) {
2172 crypto_register(sc
->sc_cid
, CRYPTO_MD5_HMAC
, 0, 0);
2174 if (devinfo
& SAFE_DEVINFO_SHA1
) {
2176 crypto_register(sc
->sc_cid
, CRYPTO_SHA1_HMAC
, 0, 0);
2179 crypto_register(sc
->sc_cid
, CRYPTO_NULL_CBC
, 0, 0);
2180 crypto_register(sc
->sc_cid
, CRYPTO_NULL_HMAC
, 0, 0);
2181 /* XXX other supported algorithms */
2184 safe_reset_board(sc
); /* reset h/w */
2185 safe_init_board(sc
); /* init h/w */
2187 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
2188 if (sc
->sc_flags
& SAFE_FLAGS_RNG
) {
2190 crypto_rregister(sc
->sc_cid
, safe_read_random
, sc
);
2192 #endif /* SAFE_NO_RNG */
2197 if (sc
->sc_cid
>= 0)
2198 crypto_unregister_all(sc
->sc_cid
);
2199 if (sc
->sc_irq
!= -1)
2200 free_irq(sc
->sc_irq
, sc
);
2201 if (sc
->sc_ringalloc
.dma_vaddr
)
2202 pci_free_consistent(sc
->sc_pcidev
,
2203 SAFE_MAX_NQUEUE
* sizeof (struct safe_ringentry
),
2204 sc
->sc_ringalloc
.dma_vaddr
, sc
->sc_ringalloc
.dma_paddr
);
2205 if (sc
->sc_spalloc
.dma_vaddr
)
2206 pci_free_consistent(sc
->sc_pcidev
,
2207 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2208 sc
->sc_spalloc
.dma_vaddr
, sc
->sc_spalloc
.dma_paddr
);
2209 if (sc
->sc_dpalloc
.dma_vaddr
)
2210 pci_free_consistent(sc
->sc_pcidev
,
2211 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2212 sc
->sc_dpalloc
.dma_vaddr
, sc
->sc_dpalloc
.dma_paddr
);
2217 static void safe_remove(struct pci_dev
*dev
)
2219 struct safe_softc
*sc
= pci_get_drvdata(dev
);
2221 DPRINTF(("%s()\n", __FUNCTION__
));
2223 /* XXX wait/abort active ops */
2225 WRITE_REG(sc
, SAFE_HI_MASK
, 0); /* disable interrupts */
2227 del_timer_sync(&sc
->sc_pkto
);
2229 crypto_unregister_all(sc
->sc_cid
);
2233 if (sc
->sc_irq
!= -1)
2234 free_irq(sc
->sc_irq
, sc
);
2235 if (sc
->sc_ringalloc
.dma_vaddr
)
2236 pci_free_consistent(sc
->sc_pcidev
,
2237 SAFE_MAX_NQUEUE
* sizeof (struct safe_ringentry
),
2238 sc
->sc_ringalloc
.dma_vaddr
, sc
->sc_ringalloc
.dma_paddr
);
2239 if (sc
->sc_spalloc
.dma_vaddr
)
2240 pci_free_consistent(sc
->sc_pcidev
,
2241 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2242 sc
->sc_spalloc
.dma_vaddr
, sc
->sc_spalloc
.dma_paddr
);
2243 if (sc
->sc_dpalloc
.dma_vaddr
)
2244 pci_free_consistent(sc
->sc_pcidev
,
2245 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2246 sc
->sc_dpalloc
.dma_vaddr
, sc
->sc_dpalloc
.dma_paddr
);
2248 sc
->sc_ringalloc
.dma_vaddr
= NULL
;
2249 sc
->sc_spalloc
.dma_vaddr
= NULL
;
2250 sc
->sc_dpalloc
.dma_vaddr
= NULL
;
2253 static struct pci_device_id safe_pci_tbl
[] = {
2254 { PCI_VENDOR_SAFENET
, PCI_PRODUCT_SAFEXCEL
,
2255 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2258 MODULE_DEVICE_TABLE(pci
, safe_pci_tbl
);
2260 static struct pci_driver safe_driver
= {
2262 .id_table
= safe_pci_tbl
,
2263 .probe
= safe_probe
,
2264 .remove
= safe_remove
,
2265 /* add PM stuff here one day */
2268 static int __init
safe_init (void)
2270 struct safe_softc
*sc
= NULL
;
2273 DPRINTF(("%s(%p)\n", __FUNCTION__
, safe_init
));
2275 rc
= pci_register_driver(&safe_driver
);
2276 pci_register_driver_compat(&safe_driver
, rc
);
2281 static void __exit
safe_exit (void)
2283 pci_unregister_driver(&safe_driver
);
2286 module_init(safe_init
);
2287 module_exit(safe_exit
);
2289 MODULE_LICENSE("BSD");
2290 MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
2291 MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");