2 * Linux port done by David McCullough <david_mccullough@mcafee.com>
3 * Copyright (C) 2004-2010 David McCullough
4 * The license and original author are listed below.
6 * Copyright (c) 2003 Sam Leffler, Errno Consulting
7 * Copyright (c) 2003 Global Technology Associates, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 __FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
34 #ifndef AUTOCONF_INCLUDED
35 #include <linux/config.h>
37 #include <linux/module.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/slab.h>
42 #include <linux/wait.h>
43 #include <linux/sched.h>
44 #include <linux/pci.h>
45 #include <linux/delay.h>
46 #include <linux/interrupt.h>
47 #include <linux/spinlock.h>
48 #include <linux/random.h>
49 #include <linux/version.h>
50 #include <linux/skbuff.h>
54 * SafeNet SafeXcel-1141 hardware crypto accelerator
57 #include <cryptodev.h>
59 #include <safe/safereg.h>
60 #include <safe/safevar.h>
63 #define DPRINTF(a) do { \
66 device_get_nameunit(sc->sc_dev) : "safe"); \
75 * until we find a cleaner way, include the BSD md5/sha1 code
80 #define LITTLE_ENDIAN 1234
81 #define BIG_ENDIAN 4321
82 #ifdef __LITTLE_ENDIAN
83 #define BYTE_ORDER LITTLE_ENDIAN
86 #define BYTE_ORDER BIG_ENDIAN
90 #include <safe/sha1.h>
91 #include <safe/sha1.c>
93 u_int8_t hmac_ipad_buffer
[64] = {
94 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
95 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
96 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
97 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
98 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
99 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
100 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
101 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
104 u_int8_t hmac_opad_buffer
[64] = {
105 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
106 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
107 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
108 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
109 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
110 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
111 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
112 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
114 #endif /* HMAC_HACK */
116 /* add proc entry for this */
117 struct safe_stats safestats
;
119 #define debug safe_debug
121 module_param(safe_debug
, int, 0644);
122 MODULE_PARM_DESC(safe_debug
, "Enable debug");
124 static void safe_callback(struct safe_softc
*, struct safe_ringentry
*);
125 static void safe_feed(struct safe_softc
*, struct safe_ringentry
*);
126 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
127 static void safe_rng_init(struct safe_softc
*);
128 int safe_rngbufsize
= 8; /* 32 bytes each read */
129 module_param(safe_rngbufsize
, int, 0644);
130 MODULE_PARM_DESC(safe_rngbufsize
, "RNG polling buffer size (32-bit words)");
131 int safe_rngmaxalarm
= 8; /* max alarms before reset */
132 module_param(safe_rngmaxalarm
, int, 0644);
133 MODULE_PARM_DESC(safe_rngmaxalarm
, "RNG max alarms before reset");
134 #endif /* SAFE_NO_RNG */
136 static void safe_totalreset(struct safe_softc
*sc
);
137 static int safe_dmamap_aligned(struct safe_softc
*sc
, const struct safe_operand
*op
);
138 static int safe_dmamap_uniform(struct safe_softc
*sc
, const struct safe_operand
*op
);
139 static int safe_free_entry(struct safe_softc
*sc
, struct safe_ringentry
*re
);
140 static int safe_kprocess(device_t dev
, struct cryptkop
*krp
, int hint
);
141 static int safe_kstart(struct safe_softc
*sc
);
142 static int safe_ksigbits(struct safe_softc
*sc
, struct crparam
*cr
);
143 static void safe_kfeed(struct safe_softc
*sc
);
144 static void safe_kpoll(unsigned long arg
);
145 static void safe_kload_reg(struct safe_softc
*sc
, u_int32_t off
,
146 u_int32_t len
, struct crparam
*n
);
148 static int safe_newsession(device_t
, u_int32_t
*, struct cryptoini
*);
149 static int safe_freesession(device_t
, u_int64_t
);
150 static int safe_process(device_t
, struct cryptop
*, int);
152 static device_method_t safe_methods
= {
153 /* crypto device methods */
154 DEVMETHOD(cryptodev_newsession
, safe_newsession
),
155 DEVMETHOD(cryptodev_freesession
,safe_freesession
),
156 DEVMETHOD(cryptodev_process
, safe_process
),
157 DEVMETHOD(cryptodev_kprocess
, safe_kprocess
),
160 #define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
161 #define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
163 #define SAFE_MAX_CHIPS 8
164 static struct safe_softc
*safe_chip_idx
[SAFE_MAX_CHIPS
];
167 * split our buffers up into safe DMAable byte fragments to avoid lockup
168 * bug in 1141 HW on rev 1.0.
173 struct safe_softc
*sc
,
174 struct safe_operand
*buf
,
179 int chunk
, tlen
= len
;
181 tmp
= pci_map_single(sc
->sc_pcidev
, addr
, len
, PCI_DMA_BIDIRECTIONAL
);
185 chunk
= (len
> sc
->sc_max_dsize
) ? sc
->sc_max_dsize
: len
;
186 buf
->segs
[buf
->nsegs
].ds_addr
= tmp
;
187 buf
->segs
[buf
->nsegs
].ds_len
= chunk
;
188 buf
->segs
[buf
->nsegs
].ds_tlen
= tlen
;
198 * map in a given uio buffer (great on some arches :-)
202 pci_map_uio(struct safe_softc
*sc
, struct safe_operand
*buf
, struct uio
*uio
)
204 struct iovec
*iov
= uio
->uio_iov
;
207 DPRINTF(("%s()\n", __FUNCTION__
));
212 for (n
= 0; n
< uio
->uio_iovcnt
; n
++) {
213 pci_map_linear(sc
, buf
, iov
->iov_base
, iov
->iov_len
);
217 /* identify this buffer by the first segment */
218 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
223 * map in a given sk_buff
227 pci_map_skb(struct safe_softc
*sc
,struct safe_operand
*buf
,struct sk_buff
*skb
)
231 DPRINTF(("%s()\n", __FUNCTION__
));
236 pci_map_linear(sc
, buf
, skb
->data
, skb_headlen(skb
));
238 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
239 pci_map_linear(sc
, buf
,
240 page_address(skb_shinfo(skb
)->frags
[i
].page
) +
241 skb_shinfo(skb
)->frags
[i
].page_offset
,
242 skb_shinfo(skb
)->frags
[i
].size
);
245 /* identify this buffer by the first segment */
246 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
251 #if 0 /* not needed at this time */
253 pci_sync_operand(struct safe_softc
*sc
, struct safe_operand
*buf
)
257 DPRINTF(("%s()\n", __FUNCTION__
));
258 for (i
= 0; i
< buf
->nsegs
; i
++)
259 pci_dma_sync_single_for_cpu(sc
->sc_pcidev
, buf
->segs
[i
].ds_addr
,
260 buf
->segs
[i
].ds_len
, PCI_DMA_BIDIRECTIONAL
);
265 pci_unmap_operand(struct safe_softc
*sc
, struct safe_operand
*buf
)
268 DPRINTF(("%s()\n", __FUNCTION__
));
269 for (i
= 0; i
< buf
->nsegs
; i
++) {
270 if (buf
->segs
[i
].ds_tlen
) {
271 DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__
, i
, buf
->segs
[i
].ds_addr
, buf
->segs
[i
].ds_tlen
));
272 pci_unmap_single(sc
->sc_pcidev
, buf
->segs
[i
].ds_addr
,
273 buf
->segs
[i
].ds_tlen
, PCI_DMA_BIDIRECTIONAL
);
274 DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__
, i
, buf
->segs
[i
].ds_addr
, buf
->segs
[i
].ds_tlen
));
276 buf
->segs
[i
].ds_addr
= 0;
277 buf
->segs
[i
].ds_len
= 0;
278 buf
->segs
[i
].ds_tlen
= 0;
287 * SafeXcel Interrupt routine
290 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
291 safe_intr(int irq
, void *arg
)
293 safe_intr(int irq
, void *arg
, struct pt_regs
*regs
)
296 struct safe_softc
*sc
= arg
;
300 stat
= READ_REG(sc
, SAFE_HM_STAT
);
302 DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__
, stat
));
304 if (stat
== 0) /* shared irq, not for us */
307 WRITE_REG(sc
, SAFE_HI_CLR
, stat
); /* IACK */
309 if ((stat
& SAFE_INT_PE_DDONE
)) {
311 * Descriptor(s) done; scan the ring and
312 * process completed operations.
314 spin_lock_irqsave(&sc
->sc_ringmtx
, flags
);
315 while (sc
->sc_back
!= sc
->sc_front
) {
316 struct safe_ringentry
*re
= sc
->sc_back
;
320 safe_dump_ringstate(sc
, __func__
);
321 safe_dump_request(sc
, __func__
, re
);
325 * safe_process marks ring entries that were allocated
326 * but not used with a csr of zero. This insures the
327 * ring front pointer never needs to be set backwards
328 * in the event that an entry is allocated but not used
329 * because of a setup error.
331 DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__
, re
->re_desc
.d_csr
));
332 if (re
->re_desc
.d_csr
!= 0) {
333 if (!SAFE_PE_CSR_IS_DONE(re
->re_desc
.d_csr
)) {
334 DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__
));
337 if (!SAFE_PE_LEN_IS_DONE(re
->re_desc
.d_len
)) {
338 DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__
));
342 safe_callback(sc
, re
);
344 if (++(sc
->sc_back
) == sc
->sc_ringtop
)
345 sc
->sc_back
= sc
->sc_ring
;
347 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
351 * Check to see if we got any DMA Error
353 if (stat
& SAFE_INT_PE_ERROR
) {
354 printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc
->sc_dev
),
355 (int)READ_REG(sc
, SAFE_PE_DMASTAT
));
356 safestats
.st_dmaerr
++;
363 if (sc
->sc_needwakeup
) { /* XXX check high watermark */
364 int wakeup
= sc
->sc_needwakeup
& (CRYPTO_SYMQ
|CRYPTO_ASYMQ
);
365 DPRINTF(("%s: wakeup crypto %x\n", __func__
,
367 sc
->sc_needwakeup
&= ~wakeup
;
368 crypto_unblock(sc
->sc_cid
, wakeup
);
375 * safe_feed() - post a request to chip
378 safe_feed(struct safe_softc
*sc
, struct safe_ringentry
*re
)
380 DPRINTF(("%s()\n", __FUNCTION__
));
383 safe_dump_ringstate(sc
, __func__
);
384 safe_dump_request(sc
, __func__
, re
);
388 if (sc
->sc_nqchip
> safestats
.st_maxqchip
)
389 safestats
.st_maxqchip
= sc
->sc_nqchip
;
390 /* poke h/w to check descriptor ring, any value can be written */
391 WRITE_REG(sc
, SAFE_HI_RD_DESCR
, 0);
394 #define N(a) (sizeof(a) / sizeof (a[0]))
396 safe_setup_enckey(struct safe_session
*ses
, caddr_t key
)
400 bcopy(key
, ses
->ses_key
, ses
->ses_klen
/ 8);
402 /* PE is little-endian, insure proper byte order */
403 for (i
= 0; i
< N(ses
->ses_key
); i
++)
404 ses
->ses_key
[i
] = htole32(ses
->ses_key
[i
]);
408 safe_setup_mackey(struct safe_session
*ses
, int algo
, caddr_t key
, int klen
)
416 for (i
= 0; i
< klen
; i
++)
417 key
[i
] ^= HMAC_IPAD_VAL
;
419 if (algo
== CRYPTO_MD5_HMAC
) {
421 MD5Update(&md5ctx
, key
, klen
);
422 MD5Update(&md5ctx
, hmac_ipad_buffer
, MD5_HMAC_BLOCK_LEN
- klen
);
423 bcopy(md5ctx
.md5_st8
, ses
->ses_hminner
, sizeof(md5ctx
.md5_st8
));
426 SHA1Update(&sha1ctx
, key
, klen
);
427 SHA1Update(&sha1ctx
, hmac_ipad_buffer
,
428 SHA1_HMAC_BLOCK_LEN
- klen
);
429 bcopy(sha1ctx
.h
.b32
, ses
->ses_hminner
, sizeof(sha1ctx
.h
.b32
));
432 for (i
= 0; i
< klen
; i
++)
433 key
[i
] ^= (HMAC_IPAD_VAL
^ HMAC_OPAD_VAL
);
435 if (algo
== CRYPTO_MD5_HMAC
) {
437 MD5Update(&md5ctx
, key
, klen
);
438 MD5Update(&md5ctx
, hmac_opad_buffer
, MD5_HMAC_BLOCK_LEN
- klen
);
439 bcopy(md5ctx
.md5_st8
, ses
->ses_hmouter
, sizeof(md5ctx
.md5_st8
));
442 SHA1Update(&sha1ctx
, key
, klen
);
443 SHA1Update(&sha1ctx
, hmac_opad_buffer
,
444 SHA1_HMAC_BLOCK_LEN
- klen
);
445 bcopy(sha1ctx
.h
.b32
, ses
->ses_hmouter
, sizeof(sha1ctx
.h
.b32
));
448 for (i
= 0; i
< klen
; i
++)
449 key
[i
] ^= HMAC_OPAD_VAL
;
453 * this code prevents SHA working on a BE host,
454 * so it is obviously wrong. I think the byte
455 * swap setup we do with the chip fixes this for us
458 /* PE is little-endian, insure proper byte order */
459 for (i
= 0; i
< N(ses
->ses_hminner
); i
++) {
460 ses
->ses_hminner
[i
] = htole32(ses
->ses_hminner
[i
]);
461 ses
->ses_hmouter
[i
] = htole32(ses
->ses_hmouter
[i
]);
464 #else /* HMAC_HACK */
465 printk("safe: md5/sha not implemented\n");
466 #endif /* HMAC_HACK */
471 * Allocate a new 'session' and return an encoded session id. 'sidp'
472 * contains our registration id, and should contain an encoded session
473 * id on successful allocation.
476 safe_newsession(device_t dev
, u_int32_t
*sidp
, struct cryptoini
*cri
)
478 struct safe_softc
*sc
= device_get_softc(dev
);
479 struct cryptoini
*c
, *encini
= NULL
, *macini
= NULL
;
480 struct safe_session
*ses
= NULL
;
483 DPRINTF(("%s()\n", __FUNCTION__
));
485 if (sidp
== NULL
|| cri
== NULL
|| sc
== NULL
)
488 for (c
= cri
; c
!= NULL
; c
= c
->cri_next
) {
489 if (c
->cri_alg
== CRYPTO_MD5_HMAC
||
490 c
->cri_alg
== CRYPTO_SHA1_HMAC
||
491 c
->cri_alg
== CRYPTO_NULL_HMAC
) {
495 } else if (c
->cri_alg
== CRYPTO_DES_CBC
||
496 c
->cri_alg
== CRYPTO_3DES_CBC
||
497 c
->cri_alg
== CRYPTO_AES_CBC
||
498 c
->cri_alg
== CRYPTO_NULL_CBC
) {
505 if (encini
== NULL
&& macini
== NULL
)
507 if (encini
) { /* validate key length */
508 switch (encini
->cri_alg
) {
510 if (encini
->cri_klen
!= 64)
513 case CRYPTO_3DES_CBC
:
514 if (encini
->cri_klen
!= 192)
518 if (encini
->cri_klen
!= 128 &&
519 encini
->cri_klen
!= 192 &&
520 encini
->cri_klen
!= 256)
526 if (sc
->sc_sessions
== NULL
) {
527 ses
= sc
->sc_sessions
= (struct safe_session
*)
528 kmalloc(sizeof(struct safe_session
), SLAB_ATOMIC
);
531 memset(ses
, 0, sizeof(struct safe_session
));
533 sc
->sc_nsessions
= 1;
535 for (sesn
= 0; sesn
< sc
->sc_nsessions
; sesn
++) {
536 if (sc
->sc_sessions
[sesn
].ses_used
== 0) {
537 ses
= &sc
->sc_sessions
[sesn
];
543 sesn
= sc
->sc_nsessions
;
544 ses
= (struct safe_session
*)
545 kmalloc((sesn
+ 1) * sizeof(struct safe_session
), SLAB_ATOMIC
);
548 memset(ses
, 0, (sesn
+ 1) * sizeof(struct safe_session
));
549 bcopy(sc
->sc_sessions
, ses
, sesn
*
550 sizeof(struct safe_session
));
551 bzero(sc
->sc_sessions
, sesn
*
552 sizeof(struct safe_session
));
553 kfree(sc
->sc_sessions
);
554 sc
->sc_sessions
= ses
;
555 ses
= &sc
->sc_sessions
[sesn
];
560 bzero(ses
, sizeof(struct safe_session
));
565 /* XXX may read fewer than requested */
566 read_random(ses
->ses_iv
, sizeof(ses
->ses_iv
));
568 ses
->ses_klen
= encini
->cri_klen
;
569 if (encini
->cri_key
!= NULL
)
570 safe_setup_enckey(ses
, encini
->cri_key
);
574 ses
->ses_mlen
= macini
->cri_mlen
;
575 if (ses
->ses_mlen
== 0) {
576 if (macini
->cri_alg
== CRYPTO_MD5_HMAC
)
577 ses
->ses_mlen
= MD5_HASH_LEN
;
579 ses
->ses_mlen
= SHA1_HASH_LEN
;
582 if (macini
->cri_key
!= NULL
) {
583 safe_setup_mackey(ses
, macini
->cri_alg
, macini
->cri_key
,
584 macini
->cri_klen
/ 8);
588 *sidp
= SAFE_SID(device_get_unit(sc
->sc_dev
), sesn
);
593 * Deallocate a session.
596 safe_freesession(device_t dev
, u_int64_t tid
)
598 struct safe_softc
*sc
= device_get_softc(dev
);
600 u_int32_t sid
= ((u_int32_t
) tid
) & 0xffffffff;
602 DPRINTF(("%s()\n", __FUNCTION__
));
607 session
= SAFE_SESSION(sid
);
608 if (session
< sc
->sc_nsessions
) {
609 bzero(&sc
->sc_sessions
[session
], sizeof(sc
->sc_sessions
[session
]));
618 safe_process(device_t dev
, struct cryptop
*crp
, int hint
)
620 struct safe_softc
*sc
= device_get_softc(dev
);
621 int err
= 0, i
, nicealign
, uniform
;
622 struct cryptodesc
*crd1
, *crd2
, *maccrd
, *enccrd
;
623 int bypass
, oplen
, ivsize
;
626 struct safe_session
*ses
;
627 struct safe_ringentry
*re
;
628 struct safe_sarec
*sa
;
629 struct safe_pdesc
*pd
;
630 u_int32_t cmd0
, cmd1
, staterec
;
633 DPRINTF(("%s()\n", __FUNCTION__
));
635 if (crp
== NULL
|| crp
->crp_callback
== NULL
|| sc
== NULL
) {
636 safestats
.st_invalid
++;
639 if (SAFE_SESSION(crp
->crp_sid
) >= sc
->sc_nsessions
) {
640 safestats
.st_badsession
++;
644 spin_lock_irqsave(&sc
->sc_ringmtx
, flags
);
645 if (sc
->sc_front
== sc
->sc_back
&& sc
->sc_nqchip
!= 0) {
646 safestats
.st_ringfull
++;
647 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
648 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
653 staterec
= re
->re_sa
.sa_staterec
; /* save */
654 /* NB: zero everything but the PE descriptor */
655 bzero(&re
->re_sa
, sizeof(struct safe_ringentry
) - sizeof(re
->re_desc
));
656 re
->re_sa
.sa_staterec
= staterec
; /* restore */
659 re
->re_sesn
= SAFE_SESSION(crp
->crp_sid
);
661 re
->re_src
.nsegs
= 0;
662 re
->re_dst
.nsegs
= 0;
664 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
665 re
->re_src_skb
= (struct sk_buff
*)crp
->crp_buf
;
666 re
->re_dst_skb
= (struct sk_buff
*)crp
->crp_buf
;
667 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
668 re
->re_src_io
= (struct uio
*)crp
->crp_buf
;
669 re
->re_dst_io
= (struct uio
*)crp
->crp_buf
;
671 safestats
.st_badflags
++;
673 goto errout
; /* XXX we don't handle contiguous blocks! */
677 ses
= &sc
->sc_sessions
[re
->re_sesn
];
679 crd1
= crp
->crp_desc
;
681 safestats
.st_nodesc
++;
685 crd2
= crd1
->crd_next
;
687 cmd0
= SAFE_SA_CMD0_BASIC
; /* basic group operation */
690 if (crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
691 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
||
692 crd1
->crd_alg
== CRYPTO_NULL_HMAC
) {
695 cmd0
|= SAFE_SA_CMD0_OP_HASH
;
696 } else if (crd1
->crd_alg
== CRYPTO_DES_CBC
||
697 crd1
->crd_alg
== CRYPTO_3DES_CBC
||
698 crd1
->crd_alg
== CRYPTO_AES_CBC
||
699 crd1
->crd_alg
== CRYPTO_NULL_CBC
) {
702 cmd0
|= SAFE_SA_CMD0_OP_CRYPT
;
704 safestats
.st_badalg
++;
709 if ((crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
710 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
||
711 crd1
->crd_alg
== CRYPTO_NULL_HMAC
) &&
712 (crd2
->crd_alg
== CRYPTO_DES_CBC
||
713 crd2
->crd_alg
== CRYPTO_3DES_CBC
||
714 crd2
->crd_alg
== CRYPTO_AES_CBC
||
715 crd2
->crd_alg
== CRYPTO_NULL_CBC
) &&
716 ((crd2
->crd_flags
& CRD_F_ENCRYPT
) == 0)) {
719 } else if ((crd1
->crd_alg
== CRYPTO_DES_CBC
||
720 crd1
->crd_alg
== CRYPTO_3DES_CBC
||
721 crd1
->crd_alg
== CRYPTO_AES_CBC
||
722 crd1
->crd_alg
== CRYPTO_NULL_CBC
) &&
723 (crd2
->crd_alg
== CRYPTO_MD5_HMAC
||
724 crd2
->crd_alg
== CRYPTO_SHA1_HMAC
||
725 crd2
->crd_alg
== CRYPTO_NULL_HMAC
) &&
726 (crd1
->crd_flags
& CRD_F_ENCRYPT
)) {
730 safestats
.st_badalg
++;
734 cmd0
|= SAFE_SA_CMD0_OP_BOTH
;
738 if (enccrd
->crd_flags
& CRD_F_KEY_EXPLICIT
)
739 safe_setup_enckey(ses
, enccrd
->crd_key
);
741 if (enccrd
->crd_alg
== CRYPTO_DES_CBC
) {
742 cmd0
|= SAFE_SA_CMD0_DES
;
743 cmd1
|= SAFE_SA_CMD1_CBC
;
744 ivsize
= 2*sizeof(u_int32_t
);
745 } else if (enccrd
->crd_alg
== CRYPTO_3DES_CBC
) {
746 cmd0
|= SAFE_SA_CMD0_3DES
;
747 cmd1
|= SAFE_SA_CMD1_CBC
;
748 ivsize
= 2*sizeof(u_int32_t
);
749 } else if (enccrd
->crd_alg
== CRYPTO_AES_CBC
) {
750 cmd0
|= SAFE_SA_CMD0_AES
;
751 cmd1
|= SAFE_SA_CMD1_CBC
;
752 if (ses
->ses_klen
== 128)
753 cmd1
|= SAFE_SA_CMD1_AES128
;
754 else if (ses
->ses_klen
== 192)
755 cmd1
|= SAFE_SA_CMD1_AES192
;
757 cmd1
|= SAFE_SA_CMD1_AES256
;
758 ivsize
= 4*sizeof(u_int32_t
);
760 cmd0
|= SAFE_SA_CMD0_CRYPT_NULL
;
765 * Setup encrypt/decrypt state. When using basic ops
766 * we can't use an inline IV because hash/crypt offset
767 * must be from the end of the IV to the start of the
768 * crypt data and this leaves out the preceding header
769 * from the hash calculation. Instead we place the IV
770 * in the state record and set the hash/crypt offset to
771 * copy both the header+IV.
773 if (enccrd
->crd_flags
& CRD_F_ENCRYPT
) {
774 cmd0
|= SAFE_SA_CMD0_OUTBOUND
;
776 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
779 iv
= (caddr_t
) ses
->ses_iv
;
780 if ((enccrd
->crd_flags
& CRD_F_IV_PRESENT
) == 0) {
781 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
782 enccrd
->crd_inject
, ivsize
, iv
);
784 bcopy(iv
, re
->re_sastate
.sa_saved_iv
, ivsize
);
786 for (i
= 0; i
< ivsize
/sizeof(re
->re_sastate
.sa_saved_iv
[0]); i
++)
787 re
->re_sastate
.sa_saved_iv
[i
] =
788 cpu_to_le32(re
->re_sastate
.sa_saved_iv
[i
]);
789 cmd0
|= SAFE_SA_CMD0_IVLD_STATE
| SAFE_SA_CMD0_SAVEIV
;
790 re
->re_flags
|= SAFE_QFLAGS_COPYOUTIV
;
792 cmd0
|= SAFE_SA_CMD0_INBOUND
;
794 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
) {
795 bcopy(enccrd
->crd_iv
,
796 re
->re_sastate
.sa_saved_iv
, ivsize
);
798 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
799 enccrd
->crd_inject
, ivsize
,
800 (caddr_t
)re
->re_sastate
.sa_saved_iv
);
803 for (i
= 0; i
< ivsize
/sizeof(re
->re_sastate
.sa_saved_iv
[0]); i
++)
804 re
->re_sastate
.sa_saved_iv
[i
] =
805 cpu_to_le32(re
->re_sastate
.sa_saved_iv
[i
]);
806 cmd0
|= SAFE_SA_CMD0_IVLD_STATE
;
809 * For basic encryption use the zero pad algorithm.
810 * This pads results to an 8-byte boundary and
811 * suppresses padding verification for inbound (i.e.
812 * decrypt) operations.
814 * NB: Not sure if the 8-byte pad boundary is a problem.
816 cmd0
|= SAFE_SA_CMD0_PAD_ZERO
;
818 /* XXX assert key bufs have the same size */
819 bcopy(ses
->ses_key
, sa
->sa_key
, sizeof(sa
->sa_key
));
823 if (maccrd
->crd_flags
& CRD_F_KEY_EXPLICIT
) {
824 safe_setup_mackey(ses
, maccrd
->crd_alg
,
825 maccrd
->crd_key
, maccrd
->crd_klen
/ 8);
828 if (maccrd
->crd_alg
== CRYPTO_MD5_HMAC
) {
829 cmd0
|= SAFE_SA_CMD0_MD5
;
830 cmd1
|= SAFE_SA_CMD1_HMAC
; /* NB: enable HMAC */
831 } else if (maccrd
->crd_alg
== CRYPTO_SHA1_HMAC
) {
832 cmd0
|= SAFE_SA_CMD0_SHA1
;
833 cmd1
|= SAFE_SA_CMD1_HMAC
; /* NB: enable HMAC */
835 cmd0
|= SAFE_SA_CMD0_HASH_NULL
;
838 * Digest data is loaded from the SA and the hash
839 * result is saved to the state block where we
840 * retrieve it for return to the caller.
842 /* XXX assert digest bufs have the same size */
843 bcopy(ses
->ses_hminner
, sa
->sa_indigest
,
844 sizeof(sa
->sa_indigest
));
845 bcopy(ses
->ses_hmouter
, sa
->sa_outdigest
,
846 sizeof(sa
->sa_outdigest
));
848 cmd0
|= SAFE_SA_CMD0_HSLD_SA
| SAFE_SA_CMD0_SAVEHASH
;
849 re
->re_flags
|= SAFE_QFLAGS_COPYOUTICV
;
852 if (enccrd
&& maccrd
) {
854 * The offset from hash data to the start of
855 * crypt data is the difference in the skips.
857 bypass
= maccrd
->crd_skip
;
858 coffset
= enccrd
->crd_skip
- maccrd
->crd_skip
;
860 DPRINTF(("%s: hash does not precede crypt; "
861 "mac skip %u enc skip %u\n",
862 __func__
, maccrd
->crd_skip
, enccrd
->crd_skip
));
863 safestats
.st_skipmismatch
++;
867 oplen
= enccrd
->crd_skip
+ enccrd
->crd_len
;
868 if (maccrd
->crd_skip
+ maccrd
->crd_len
!= oplen
) {
869 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
870 __func__
, maccrd
->crd_skip
+ maccrd
->crd_len
,
872 safestats
.st_lenmismatch
++;
878 printf("mac: skip %d, len %d, inject %d\n",
879 maccrd
->crd_skip
, maccrd
->crd_len
,
881 printf("enc: skip %d, len %d, inject %d\n",
882 enccrd
->crd_skip
, enccrd
->crd_len
,
884 printf("bypass %d coffset %d oplen %d\n",
885 bypass
, coffset
, oplen
);
888 if (coffset
& 3) { /* offset must be 32-bit aligned */
889 DPRINTF(("%s: coffset %u misaligned\n",
891 safestats
.st_coffmisaligned
++;
896 if (coffset
> 255) { /* offset must be <256 dwords */
897 DPRINTF(("%s: coffset %u too big\n",
899 safestats
.st_cofftoobig
++;
904 * Tell the hardware to copy the header to the output.
905 * The header is defined as the data from the end of
906 * the bypass to the start of data to be encrypted.
907 * Typically this is the inline IV. Note that you need
908 * to do this even if src+dst are the same; it appears
909 * that w/o this bit the crypted data is written
910 * immediately after the bypass data.
912 cmd1
|= SAFE_SA_CMD1_HDRCOPY
;
914 * Disable IP header mutable bit handling. This is
915 * needed to get correct HMAC calculations.
917 cmd1
|= SAFE_SA_CMD1_MUTABLE
;
920 bypass
= enccrd
->crd_skip
;
921 oplen
= bypass
+ enccrd
->crd_len
;
923 bypass
= maccrd
->crd_skip
;
924 oplen
= bypass
+ maccrd
->crd_len
;
928 /* XXX verify multiple of 4 when using s/g */
929 if (bypass
> 96) { /* bypass offset must be <= 96 bytes */
930 DPRINTF(("%s: bypass %u too big\n", __func__
, bypass
));
931 safestats
.st_bypasstoobig
++;
936 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
937 if (pci_map_skb(sc
, &re
->re_src
, re
->re_src_skb
)) {
938 safestats
.st_noload
++;
942 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
943 if (pci_map_uio(sc
, &re
->re_src
, re
->re_src_io
)) {
944 safestats
.st_noload
++;
949 nicealign
= safe_dmamap_aligned(sc
, &re
->re_src
);
950 uniform
= safe_dmamap_uniform(sc
, &re
->re_src
);
952 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
953 nicealign
, uniform
, re
->re_src
.nsegs
));
954 if (re
->re_src
.nsegs
> 1) {
955 re
->re_desc
.d_src
= sc
->sc_spalloc
.dma_paddr
+
956 ((caddr_t
) sc
->sc_spfree
- (caddr_t
) sc
->sc_spring
);
957 for (i
= 0; i
< re
->re_src_nsegs
; i
++) {
958 /* NB: no need to check if there's space */
960 if (++(sc
->sc_spfree
) == sc
->sc_springtop
)
961 sc
->sc_spfree
= sc
->sc_spring
;
963 KASSERT((pd
->pd_flags
&3) == 0 ||
964 (pd
->pd_flags
&3) == SAFE_PD_DONE
,
965 ("bogus source particle descriptor; flags %x",
967 pd
->pd_addr
= re
->re_src_segs
[i
].ds_addr
;
968 pd
->pd_size
= re
->re_src_segs
[i
].ds_len
;
969 pd
->pd_flags
= SAFE_PD_READY
;
971 cmd0
|= SAFE_SA_CMD0_IGATHER
;
974 * No need for gather, reference the operand directly.
976 re
->re_desc
.d_src
= re
->re_src_segs
[0].ds_addr
;
979 if (enccrd
== NULL
&& maccrd
!= NULL
) {
981 * Hash op; no destination needed.
984 if (crp
->crp_flags
& (CRYPTO_F_IOV
|CRYPTO_F_SKBUF
)) {
986 safestats
.st_iovmisaligned
++;
991 device_printf(sc
->sc_dev
, "!uniform source\n");
994 * There's no way to handle the DMA
995 * requirements with this uio. We
996 * could create a separate DMA area for
997 * the result and then copy it back,
998 * but for now we just bail and return
999 * an error. Note that uio requests
1000 * > SAFE_MAX_DSIZE are handled because
1001 * the DMA map and segment list for the
1002 * destination wil result in a
1003 * destination particle list that does
1004 * the necessary scatter DMA.
1006 safestats
.st_iovnotuniform
++;
1011 re
->re_dst
= re
->re_src
;
1013 safestats
.st_badflags
++;
1018 if (re
->re_dst
.nsegs
> 1) {
1019 re
->re_desc
.d_dst
= sc
->sc_dpalloc
.dma_paddr
+
1020 ((caddr_t
) sc
->sc_dpfree
- (caddr_t
) sc
->sc_dpring
);
1021 for (i
= 0; i
< re
->re_dst_nsegs
; i
++) {
1023 KASSERT((pd
->pd_flags
&3) == 0 ||
1024 (pd
->pd_flags
&3) == SAFE_PD_DONE
,
1025 ("bogus dest particle descriptor; flags %x",
1027 if (++(sc
->sc_dpfree
) == sc
->sc_dpringtop
)
1028 sc
->sc_dpfree
= sc
->sc_dpring
;
1029 pd
->pd_addr
= re
->re_dst_segs
[i
].ds_addr
;
1030 pd
->pd_flags
= SAFE_PD_READY
;
1032 cmd0
|= SAFE_SA_CMD0_OSCATTER
;
1035 * No need for scatter, reference the operand directly.
1037 re
->re_desc
.d_dst
= re
->re_dst_segs
[0].ds_addr
;
1042 * All done with setup; fillin the SA command words
1043 * and the packet engine descriptor. The operation
1044 * is now ready for submission to the hardware.
1046 sa
->sa_cmd0
= cmd0
| SAFE_SA_CMD0_IPCI
| SAFE_SA_CMD0_OPCI
;
1048 | (coffset
<< SAFE_SA_CMD1_OFFSET_S
)
1049 | SAFE_SA_CMD1_SAREV1
/* Rev 1 SA data structure */
1050 | SAFE_SA_CMD1_SRPCI
1053 * NB: the order of writes is important here. In case the
1054 * chip is scanning the ring because of an outstanding request
1055 * it might nab this one too. In that case we need to make
1056 * sure the setup is complete before we write the length
1057 * field of the descriptor as it signals the descriptor is
1058 * ready for processing.
1060 re
->re_desc
.d_csr
= SAFE_PE_CSR_READY
| SAFE_PE_CSR_SAPCI
;
1062 re
->re_desc
.d_csr
|= SAFE_PE_CSR_LOADSA
| SAFE_PE_CSR_HASHFINAL
;
1064 re
->re_desc
.d_len
= oplen
1066 | (bypass
<< SAFE_PE_LEN_BYPASS_S
)
1069 safestats
.st_ipackets
++;
1070 safestats
.st_ibytes
+= oplen
;
1072 if (++(sc
->sc_front
) == sc
->sc_ringtop
)
1073 sc
->sc_front
= sc
->sc_ring
;
1075 /* XXX honor batching */
1077 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
1081 if (re
->re_src
.map
!= re
->re_dst
.map
)
1082 pci_unmap_operand(sc
, &re
->re_dst
);
1084 pci_unmap_operand(sc
, &re
->re_src
);
1085 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
1086 if (err
!= ERESTART
) {
1087 crp
->crp_etype
= err
;
1090 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
1096 safe_callback(struct safe_softc
*sc
, struct safe_ringentry
*re
)
1098 struct cryptop
*crp
= (struct cryptop
*)re
->re_crp
;
1099 struct cryptodesc
*crd
;
1101 DPRINTF(("%s()\n", __FUNCTION__
));
1103 safestats
.st_opackets
++;
1104 safestats
.st_obytes
+= re
->re_dst
.mapsize
;
1106 if (re
->re_desc
.d_csr
& SAFE_PE_CSR_STATUS
) {
1107 device_printf(sc
->sc_dev
, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1109 re
->re_sa
.sa_cmd0
, re
->re_sa
.sa_cmd1
);
1110 safestats
.st_peoperr
++;
1111 crp
->crp_etype
= EIO
; /* something more meaningful? */
1114 if (re
->re_dst
.map
!= NULL
&& re
->re_dst
.map
!= re
->re_src
.map
)
1115 pci_unmap_operand(sc
, &re
->re_dst
);
1116 pci_unmap_operand(sc
, &re
->re_src
);
1119 * If result was written to a differet mbuf chain, swap
1120 * it in as the return value and reclaim the original.
1122 if ((crp
->crp_flags
& CRYPTO_F_SKBUF
) && re
->re_src_skb
!= re
->re_dst_skb
) {
1123 device_printf(sc
->sc_dev
, "no CRYPTO_F_SKBUF swapping support\n");
1124 /* kfree_skb(skb) */
1125 /* crp->crp_buf = (caddr_t)re->re_dst_skb */
1129 if (re
->re_flags
& SAFE_QFLAGS_COPYOUTIV
) {
1130 /* copy out IV for future use */
1131 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
1135 if (crd
->crd_alg
== CRYPTO_DES_CBC
||
1136 crd
->crd_alg
== CRYPTO_3DES_CBC
) {
1137 ivsize
= 2*sizeof(u_int32_t
);
1138 } else if (crd
->crd_alg
== CRYPTO_AES_CBC
) {
1139 ivsize
= 4*sizeof(u_int32_t
);
1142 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
1143 crd
->crd_skip
+ crd
->crd_len
- ivsize
, ivsize
,
1144 (caddr_t
)sc
->sc_sessions
[re
->re_sesn
].ses_iv
);
1146 i
< ivsize
/sizeof(sc
->sc_sessions
[re
->re_sesn
].ses_iv
[0]);
1148 sc
->sc_sessions
[re
->re_sesn
].ses_iv
[i
] =
1149 cpu_to_le32(sc
->sc_sessions
[re
->re_sesn
].ses_iv
[i
]);
1154 if (re
->re_flags
& SAFE_QFLAGS_COPYOUTICV
) {
1155 /* copy out ICV result */
1156 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
1157 if (!(crd
->crd_alg
== CRYPTO_MD5_HMAC
||
1158 crd
->crd_alg
== CRYPTO_SHA1_HMAC
||
1159 crd
->crd_alg
== CRYPTO_NULL_HMAC
))
1161 if (crd
->crd_alg
== CRYPTO_SHA1_HMAC
) {
1163 * SHA-1 ICV's are byte-swapped; fix 'em up
1164 * before copy them to their destination.
1166 re
->re_sastate
.sa_saved_indigest
[0] =
1167 cpu_to_be32(re
->re_sastate
.sa_saved_indigest
[0]);
1168 re
->re_sastate
.sa_saved_indigest
[1] =
1169 cpu_to_be32(re
->re_sastate
.sa_saved_indigest
[1]);
1170 re
->re_sastate
.sa_saved_indigest
[2] =
1171 cpu_to_be32(re
->re_sastate
.sa_saved_indigest
[2]);
1173 re
->re_sastate
.sa_saved_indigest
[0] =
1174 cpu_to_le32(re
->re_sastate
.sa_saved_indigest
[0]);
1175 re
->re_sastate
.sa_saved_indigest
[1] =
1176 cpu_to_le32(re
->re_sastate
.sa_saved_indigest
[1]);
1177 re
->re_sastate
.sa_saved_indigest
[2] =
1178 cpu_to_le32(re
->re_sastate
.sa_saved_indigest
[2]);
1180 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
1182 sc
->sc_sessions
[re
->re_sesn
].ses_mlen
,
1183 (caddr_t
)re
->re_sastate
.sa_saved_indigest
);
1191 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
1192 #define SAFE_RNG_MAXWAIT 1000
1195 safe_rng_init(struct safe_softc
*sc
)
1200 DPRINTF(("%s()\n", __FUNCTION__
));
1202 WRITE_REG(sc
, SAFE_RNG_CTRL
, 0);
1203 /* use default value according to the manual */
1204 WRITE_REG(sc
, SAFE_RNG_CNFG
, 0x834); /* magic from SafeNet */
1205 WRITE_REG(sc
, SAFE_RNG_ALM_CNT
, 0);
1208 * There is a bug in rev 1.0 of the 1140 that when the RNG
1209 * is brought out of reset the ready status flag does not
1210 * work until the RNG has finished its internal initialization.
1212 * So in order to determine the device is through its
1213 * initialization we must read the data register, using the
1214 * status reg in the read in case it is initialized. Then read
1215 * the data register until it changes from the first read.
1216 * Once it changes read the data register until it changes
1217 * again. At this time the RNG is considered initialized.
1218 * This could take between 750ms - 1000ms in time.
1221 w
= READ_REG(sc
, SAFE_RNG_OUT
);
1223 v
= READ_REG(sc
, SAFE_RNG_OUT
);
1229 } while (++i
< SAFE_RNG_MAXWAIT
);
1231 /* Wait Until data changes again */
1234 v
= READ_REG(sc
, SAFE_RNG_OUT
);
1238 } while (++i
< SAFE_RNG_MAXWAIT
);
1241 static __inline
void
1242 safe_rng_disable_short_cycle(struct safe_softc
*sc
)
1244 DPRINTF(("%s()\n", __FUNCTION__
));
1246 WRITE_REG(sc
, SAFE_RNG_CTRL
,
1247 READ_REG(sc
, SAFE_RNG_CTRL
) &~ SAFE_RNG_CTRL_SHORTEN
);
1250 static __inline
void
1251 safe_rng_enable_short_cycle(struct safe_softc
*sc
)
1253 DPRINTF(("%s()\n", __FUNCTION__
));
1255 WRITE_REG(sc
, SAFE_RNG_CTRL
,
1256 READ_REG(sc
, SAFE_RNG_CTRL
) | SAFE_RNG_CTRL_SHORTEN
);
1259 static __inline u_int32_t
1260 safe_rng_read(struct safe_softc
*sc
)
1265 while (READ_REG(sc
, SAFE_RNG_STAT
) != 0 && ++i
< SAFE_RNG_MAXWAIT
)
1267 return READ_REG(sc
, SAFE_RNG_OUT
);
1271 safe_read_random(void *arg
, u_int32_t
*buf
, int maxwords
)
1273 struct safe_softc
*sc
= (struct safe_softc
*) arg
;
1276 DPRINTF(("%s()\n", __FUNCTION__
));
1280 * Fetch the next block of data.
1282 if (maxwords
> safe_rngbufsize
)
1283 maxwords
= safe_rngbufsize
;
1284 if (maxwords
> SAFE_RNG_MAXBUFSIZ
)
1285 maxwords
= SAFE_RNG_MAXBUFSIZ
;
1287 /* read as much as we can */
1288 for (rc
= 0; rc
< maxwords
; rc
++) {
1289 if (READ_REG(sc
, SAFE_RNG_STAT
) != 0)
1291 buf
[rc
] = READ_REG(sc
, SAFE_RNG_OUT
);
1296 * Check the comparator alarm count and reset the h/w if
1297 * it exceeds our threshold. This guards against the
1298 * hardware oscillators resonating with external signals.
1300 if (READ_REG(sc
, SAFE_RNG_ALM_CNT
) > safe_rngmaxalarm
) {
1301 u_int32_t freq_inc
, w
;
1303 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__
,
1304 (unsigned)READ_REG(sc
, SAFE_RNG_ALM_CNT
), safe_rngmaxalarm
));
1305 safestats
.st_rngalarm
++;
1306 safe_rng_enable_short_cycle(sc
);
1308 for (i
= 0; i
< 64; i
++) {
1309 w
= READ_REG(sc
, SAFE_RNG_CNFG
);
1310 freq_inc
= ((w
+ freq_inc
) & 0x3fL
);
1311 w
= ((w
& ~0x3fL
) | freq_inc
);
1312 WRITE_REG(sc
, SAFE_RNG_CNFG
, w
);
1314 WRITE_REG(sc
, SAFE_RNG_ALM_CNT
, 0);
1316 (void) safe_rng_read(sc
);
1319 if (READ_REG(sc
, SAFE_RNG_ALM_CNT
) == 0) {
1320 safe_rng_disable_short_cycle(sc
);
1325 safe_rng_disable_short_cycle(sc
);
1327 WRITE_REG(sc
, SAFE_RNG_ALM_CNT
, 0);
1331 #endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
1335 * Resets the board. Values in the regesters are left as is
1336 * from the reset (i.e. initial values are assigned elsewhere).
1339 safe_reset_board(struct safe_softc
*sc
)
1343 * Reset the device. The manual says no delay
1344 * is needed between marking and clearing reset.
1346 DPRINTF(("%s()\n", __FUNCTION__
));
1348 v
= READ_REG(sc
, SAFE_PE_DMACFG
) &~
1349 (SAFE_PE_DMACFG_PERESET
| SAFE_PE_DMACFG_PDRRESET
|
1350 SAFE_PE_DMACFG_SGRESET
);
1351 WRITE_REG(sc
, SAFE_PE_DMACFG
, v
1352 | SAFE_PE_DMACFG_PERESET
1353 | SAFE_PE_DMACFG_PDRRESET
1354 | SAFE_PE_DMACFG_SGRESET
);
1355 WRITE_REG(sc
, SAFE_PE_DMACFG
, v
);
1359 * Initialize registers we need to touch only once.
1362 safe_init_board(struct safe_softc
*sc
)
1364 u_int32_t v
, dwords
;
1366 DPRINTF(("%s()\n", __FUNCTION__
));
1368 v
= READ_REG(sc
, SAFE_PE_DMACFG
);
1369 v
&=~ ( SAFE_PE_DMACFG_PEMODE
1370 | SAFE_PE_DMACFG_FSENA
/* failsafe enable */
1371 | SAFE_PE_DMACFG_GPRPCI
/* gather ring on PCI */
1372 | SAFE_PE_DMACFG_SPRPCI
/* scatter ring on PCI */
1373 | SAFE_PE_DMACFG_ESDESC
/* endian-swap descriptors */
1374 | SAFE_PE_DMACFG_ESPDESC
/* endian-swap part. desc's */
1375 | SAFE_PE_DMACFG_ESSA
/* endian-swap SA's */
1376 | SAFE_PE_DMACFG_ESPACKET
/* swap the packet data */
1378 v
|= SAFE_PE_DMACFG_FSENA
/* failsafe enable */
1379 | SAFE_PE_DMACFG_GPRPCI
/* gather ring on PCI */
1380 | SAFE_PE_DMACFG_SPRPCI
/* scatter ring on PCI */
1381 | SAFE_PE_DMACFG_ESDESC
/* endian-swap descriptors */
1382 | SAFE_PE_DMACFG_ESPDESC
/* endian-swap part. desc's */
1383 | SAFE_PE_DMACFG_ESSA
/* endian-swap SA's */
1385 | SAFE_PE_DMACFG_ESPACKET
/* swap the packet data */
1388 WRITE_REG(sc
, SAFE_PE_DMACFG
, v
);
1391 /* tell the safenet that we are 4321 and not 1234 */
1392 WRITE_REG(sc
, SAFE_ENDIAN
, 0xe4e41b1b);
1395 if (sc
->sc_chiprev
== SAFE_REV(1,0)) {
1397 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1398 * "target mode transfers" done while the chip is DMA'ing
1399 * >1020 bytes cause the hardware to lockup. To avoid this
1400 * we reduce the max PCI transfer size and use small source
1401 * particle descriptors (<= 256 bytes).
1403 WRITE_REG(sc
, SAFE_DMA_CFG
, 256);
1404 device_printf(sc
->sc_dev
,
1405 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1406 (unsigned) ((READ_REG(sc
, SAFE_DMA_CFG
)>>2) & 0xff),
1407 (unsigned) SAFE_REV_MAJ(sc
->sc_chiprev
),
1408 (unsigned) SAFE_REV_MIN(sc
->sc_chiprev
));
1409 sc
->sc_max_dsize
= 256;
1411 sc
->sc_max_dsize
= SAFE_MAX_DSIZE
;
1414 /* NB: operands+results are overlaid */
1415 WRITE_REG(sc
, SAFE_PE_PDRBASE
, sc
->sc_ringalloc
.dma_paddr
);
1416 WRITE_REG(sc
, SAFE_PE_RDRBASE
, sc
->sc_ringalloc
.dma_paddr
);
1418 * Configure ring entry size and number of items in the ring.
1420 KASSERT((sizeof(struct safe_ringentry
) % sizeof(u_int32_t
)) == 0,
1421 ("PE ring entry not 32-bit aligned!"));
1422 dwords
= sizeof(struct safe_ringentry
) / sizeof(u_int32_t
);
1423 WRITE_REG(sc
, SAFE_PE_RINGCFG
,
1424 (dwords
<< SAFE_PE_RINGCFG_OFFSET_S
) | SAFE_MAX_NQUEUE
);
1425 WRITE_REG(sc
, SAFE_PE_RINGPOLL
, 0); /* disable polling */
1427 WRITE_REG(sc
, SAFE_PE_GRNGBASE
, sc
->sc_spalloc
.dma_paddr
);
1428 WRITE_REG(sc
, SAFE_PE_SRNGBASE
, sc
->sc_dpalloc
.dma_paddr
);
1429 WRITE_REG(sc
, SAFE_PE_PARTSIZE
,
1430 (SAFE_TOTAL_DPART
<<16) | SAFE_TOTAL_SPART
);
1432 * NB: destination particles are fixed size. We use
1433 * an mbuf cluster and require all results go to
1434 * clusters or smaller.
1436 WRITE_REG(sc
, SAFE_PE_PARTCFG
, sc
->sc_max_dsize
);
1438 /* it's now safe to enable PE mode, do it */
1439 WRITE_REG(sc
, SAFE_PE_DMACFG
, v
| SAFE_PE_DMACFG_PEMODE
);
1442 * Configure hardware to use level-triggered interrupts and
1443 * to interrupt after each descriptor is processed.
1445 WRITE_REG(sc
, SAFE_HI_CFG
, SAFE_HI_CFG_LEVEL
);
1446 WRITE_REG(sc
, SAFE_HI_CLR
, 0xffffffff);
1447 WRITE_REG(sc
, SAFE_HI_DESC_CNT
, 1);
1448 WRITE_REG(sc
, SAFE_HI_MASK
, SAFE_INT_PE_DDONE
| SAFE_INT_PE_ERROR
);
1453 * Clean up after a chip crash.
1454 * It is assumed that the caller in splimp()
1457 safe_cleanchip(struct safe_softc
*sc
)
1459 DPRINTF(("%s()\n", __FUNCTION__
));
1461 if (sc
->sc_nqchip
!= 0) {
1462 struct safe_ringentry
*re
= sc
->sc_back
;
1464 while (re
!= sc
->sc_front
) {
1465 if (re
->re_desc
.d_csr
!= 0)
1466 safe_free_entry(sc
, re
);
1467 if (++re
== sc
->sc_ringtop
)
1477 * It is assumed that the caller is within splimp().
1480 safe_free_entry(struct safe_softc
*sc
, struct safe_ringentry
*re
)
1482 struct cryptop
*crp
;
1484 DPRINTF(("%s()\n", __FUNCTION__
));
1489 if ((re
->re_dst_skb
!= NULL
) && (re
->re_src_skb
!= re
->re_dst_skb
))
1491 m_freem(re
->re_dst_m
);
1493 printk("%s,%d: SKB not supported\n", __FILE__
, __LINE__
);
1496 crp
= (struct cryptop
*)re
->re_crp
;
1498 re
->re_desc
.d_csr
= 0;
1500 crp
->crp_etype
= EFAULT
;
1506 * Routine to reset the chip and clean up.
1507 * It is assumed that the caller is in splimp()
1510 safe_totalreset(struct safe_softc
*sc
)
1512 DPRINTF(("%s()\n", __FUNCTION__
));
1514 safe_reset_board(sc
);
1515 safe_init_board(sc
);
1520 * Is the operand suitable aligned for direct DMA. Each
1521 * segment must be aligned on a 32-bit boundary and all
1522 * but the last segment must be a multiple of 4 bytes.
1525 safe_dmamap_aligned(struct safe_softc
*sc
, const struct safe_operand
*op
)
1529 DPRINTF(("%s()\n", __FUNCTION__
));
1531 for (i
= 0; i
< op
->nsegs
; i
++) {
1532 if (op
->segs
[i
].ds_addr
& 3)
1534 if (i
!= (op
->nsegs
- 1) && (op
->segs
[i
].ds_len
& 3))
1541 * Is the operand suitable for direct DMA as the destination
1542 * of an operation. The hardware requires that each ``particle''
1543 * but the last in an operation result have the same size. We
1544 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
1545 * 0 if some segment is not a multiple of of this size, 1 if all
1546 * segments are exactly this size, or 2 if segments are at worst
1547 * a multple of this size.
1550 safe_dmamap_uniform(struct safe_softc
*sc
, const struct safe_operand
*op
)
1554 DPRINTF(("%s()\n", __FUNCTION__
));
1556 if (op
->nsegs
> 0) {
1559 for (i
= 0; i
< op
->nsegs
-1; i
++) {
1560 if (op
->segs
[i
].ds_len
% sc
->sc_max_dsize
)
1562 if (op
->segs
[i
].ds_len
!= sc
->sc_max_dsize
)
1570 safe_kprocess(device_t dev
, struct cryptkop
*krp
, int hint
)
1572 struct safe_softc
*sc
= device_get_softc(dev
);
1574 unsigned long flags
;
1576 DPRINTF(("%s()\n", __FUNCTION__
));
1579 krp
->krp_status
= EINVAL
;
1583 if (krp
->krp_op
!= CRK_MOD_EXP
) {
1584 krp
->krp_status
= EOPNOTSUPP
;
1588 q
= (struct safe_pkq
*) kmalloc(sizeof(*q
), GFP_KERNEL
);
1590 krp
->krp_status
= ENOMEM
;
1593 memset(q
, 0, sizeof(*q
));
1595 INIT_LIST_HEAD(&q
->pkq_list
);
1597 spin_lock_irqsave(&sc
->sc_pkmtx
, flags
);
1598 list_add_tail(&q
->pkq_list
, &sc
->sc_pkq
);
1600 spin_unlock_irqrestore(&sc
->sc_pkmtx
, flags
);
1608 #define SAFE_CRK_PARAM_BASE 0
1609 #define SAFE_CRK_PARAM_EXP 1
1610 #define SAFE_CRK_PARAM_MOD 2
1613 safe_kstart(struct safe_softc
*sc
)
1615 struct cryptkop
*krp
= sc
->sc_pkq_cur
->pkq_krp
;
1616 int exp_bits
, mod_bits
, base_bits
;
1617 u_int32_t op
, a_off
, b_off
, c_off
, d_off
;
1619 DPRINTF(("%s()\n", __FUNCTION__
));
1621 if (krp
->krp_iparams
< 3 || krp
->krp_oparams
!= 1) {
1622 krp
->krp_status
= EINVAL
;
1626 base_bits
= safe_ksigbits(sc
, &krp
->krp_param
[SAFE_CRK_PARAM_BASE
]);
1627 if (base_bits
> 2048)
1629 if (base_bits
<= 0) /* 5. base not zero */
1632 exp_bits
= safe_ksigbits(sc
, &krp
->krp_param
[SAFE_CRK_PARAM_EXP
]);
1633 if (exp_bits
> 2048)
1635 if (exp_bits
<= 0) /* 1. exponent word length > 0 */
1636 goto too_small
; /* 4. exponent not zero */
1638 mod_bits
= safe_ksigbits(sc
, &krp
->krp_param
[SAFE_CRK_PARAM_MOD
]);
1639 if (mod_bits
> 2048)
1641 if (mod_bits
<= 32) /* 2. modulus word length > 1 */
1642 goto too_small
; /* 8. MSW of modulus != zero */
1643 if (mod_bits
< exp_bits
) /* 3 modulus len >= exponent len */
1645 if ((krp
->krp_param
[SAFE_CRK_PARAM_MOD
].crp_p
[0] & 1) == 0)
1646 goto bad_domain
; /* 6. modulus is odd */
1647 if (mod_bits
> krp
->krp_param
[krp
->krp_iparams
].crp_nbits
)
1648 goto too_small
; /* make sure result will fit */
1650 /* 7. modulus > base */
1651 if (mod_bits
< base_bits
)
1653 if (mod_bits
== base_bits
) {
1654 u_int8_t
*basep
, *modp
;
1657 basep
= krp
->krp_param
[SAFE_CRK_PARAM_BASE
].crp_p
+
1658 ((base_bits
+ 7) / 8) - 1;
1659 modp
= krp
->krp_param
[SAFE_CRK_PARAM_MOD
].crp_p
+
1660 ((mod_bits
+ 7) / 8) - 1;
1662 for (i
= 0; i
< (mod_bits
+ 7) / 8; i
++, basep
--, modp
--) {
1670 /* And on the 9th step, he rested. */
1672 WRITE_REG(sc
, SAFE_PK_A_LEN
, (exp_bits
+ 31) / 32);
1673 WRITE_REG(sc
, SAFE_PK_B_LEN
, (mod_bits
+ 31) / 32);
1674 if (mod_bits
> 1024) {
1675 op
= SAFE_PK_FUNC_EXP4
;
1681 op
= SAFE_PK_FUNC_EXP16
;
1687 sc
->sc_pk_reslen
= b_off
- a_off
;
1688 sc
->sc_pk_resoff
= d_off
;
1690 /* A is exponent, B is modulus, C is base, D is result */
1691 safe_kload_reg(sc
, a_off
, b_off
- a_off
,
1692 &krp
->krp_param
[SAFE_CRK_PARAM_EXP
]);
1693 WRITE_REG(sc
, SAFE_PK_A_ADDR
, a_off
>> 2);
1694 safe_kload_reg(sc
, b_off
, b_off
- a_off
,
1695 &krp
->krp_param
[SAFE_CRK_PARAM_MOD
]);
1696 WRITE_REG(sc
, SAFE_PK_B_ADDR
, b_off
>> 2);
1697 safe_kload_reg(sc
, c_off
, b_off
- a_off
,
1698 &krp
->krp_param
[SAFE_CRK_PARAM_BASE
]);
1699 WRITE_REG(sc
, SAFE_PK_C_ADDR
, c_off
>> 2);
1700 WRITE_REG(sc
, SAFE_PK_D_ADDR
, d_off
>> 2);
1702 WRITE_REG(sc
, SAFE_PK_FUNC
, op
| SAFE_PK_FUNC_RUN
);
1707 krp
->krp_status
= E2BIG
;
1710 krp
->krp_status
= ERANGE
;
1713 krp
->krp_status
= EDOM
;
1718 safe_ksigbits(struct safe_softc
*sc
, struct crparam
*cr
)
1720 u_int plen
= (cr
->crp_nbits
+ 7) / 8;
1721 int i
, sig
= plen
* 8;
1722 u_int8_t c
, *p
= cr
->crp_p
;
1724 DPRINTF(("%s()\n", __FUNCTION__
));
1726 for (i
= plen
- 1; i
>= 0; i
--) {
1729 while ((c
& 0x80) == 0) {
1741 safe_kfeed(struct safe_softc
*sc
)
1743 struct safe_pkq
*q
, *tmp
;
1745 DPRINTF(("%s()\n", __FUNCTION__
));
1747 if (list_empty(&sc
->sc_pkq
) && sc
->sc_pkq_cur
== NULL
)
1749 if (sc
->sc_pkq_cur
!= NULL
)
1751 list_for_each_entry_safe(q
, tmp
, &sc
->sc_pkq
, pkq_list
) {
1753 list_del(&q
->pkq_list
);
1754 if (safe_kstart(sc
) != 0) {
1755 crypto_kdone(q
->pkq_krp
);
1757 sc
->sc_pkq_cur
= NULL
;
1759 /* op started, start polling */
1760 mod_timer(&sc
->sc_pkto
, jiffies
+ 1);
1767 safe_kpoll(unsigned long arg
)
1769 struct safe_softc
*sc
= NULL
;
1771 struct crparam
*res
;
1774 unsigned long flags
;
1776 DPRINTF(("%s()\n", __FUNCTION__
));
1778 if (arg
>= SAFE_MAX_CHIPS
)
1780 sc
= safe_chip_idx
[arg
];
1782 DPRINTF(("%s() - bad callback\n", __FUNCTION__
));
1786 spin_lock_irqsave(&sc
->sc_pkmtx
, flags
);
1787 if (sc
->sc_pkq_cur
== NULL
)
1789 if (READ_REG(sc
, SAFE_PK_FUNC
) & SAFE_PK_FUNC_RUN
) {
1790 /* still running, check back later */
1791 mod_timer(&sc
->sc_pkto
, jiffies
+ 1);
1796 res
= &q
->pkq_krp
->krp_param
[q
->pkq_krp
->krp_iparams
];
1797 bzero(buf
, sizeof(buf
));
1798 bzero(res
->crp_p
, (res
->crp_nbits
+ 7) / 8);
1799 for (i
= 0; i
< sc
->sc_pk_reslen
>> 2; i
++)
1800 buf
[i
] = le32_to_cpu(READ_REG(sc
, SAFE_PK_RAM_START
+
1801 sc
->sc_pk_resoff
+ (i
<< 2)));
1802 bcopy(buf
, res
->crp_p
, (res
->crp_nbits
+ 7) / 8);
1804 * reduce the bits that need copying if possible
1806 res
->crp_nbits
= min(res
->crp_nbits
,sc
->sc_pk_reslen
* 8);
1807 res
->crp_nbits
= safe_ksigbits(sc
, res
);
1809 for (i
= SAFE_PK_RAM_START
; i
< SAFE_PK_RAM_END
; i
+= 4)
1810 WRITE_REG(sc
, i
, 0);
1812 crypto_kdone(q
->pkq_krp
);
1814 sc
->sc_pkq_cur
= NULL
;
1818 spin_unlock_irqrestore(&sc
->sc_pkmtx
, flags
);
1822 safe_kload_reg(struct safe_softc
*sc
, u_int32_t off
, u_int32_t len
,
1825 u_int32_t buf
[64], i
;
1827 DPRINTF(("%s()\n", __FUNCTION__
));
1829 bzero(buf
, sizeof(buf
));
1830 bcopy(n
->crp_p
, buf
, (n
->crp_nbits
+ 7) / 8);
1832 for (i
= 0; i
< len
>> 2; i
++)
1833 WRITE_REG(sc
, SAFE_PK_RAM_START
+ off
+ (i
<< 2),
1834 cpu_to_le32(buf
[i
]));
1839 safe_dump_dmastatus(struct safe_softc
*sc
, const char *tag
)
1841 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
1843 , READ_REG(sc
, SAFE_DMA_ENDIAN
)
1844 , READ_REG(sc
, SAFE_DMA_SRCADDR
)
1845 , READ_REG(sc
, SAFE_DMA_DSTADDR
)
1846 , READ_REG(sc
, SAFE_DMA_STAT
)
1851 safe_dump_intrstate(struct safe_softc
*sc
, const char *tag
)
1853 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
1855 , READ_REG(sc
, SAFE_HI_CFG
)
1856 , READ_REG(sc
, SAFE_HI_MASK
)
1857 , READ_REG(sc
, SAFE_HI_DESC_CNT
)
1858 , READ_REG(sc
, SAFE_HU_STAT
)
1859 , READ_REG(sc
, SAFE_HM_STAT
)
1864 safe_dump_ringstate(struct safe_softc
*sc
, const char *tag
)
1866 u_int32_t estat
= READ_REG(sc
, SAFE_PE_ERNGSTAT
);
1868 /* NB: assume caller has lock on ring */
1869 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
1871 estat
, (estat
>> SAFE_PE_ERNGSTAT_NEXT_S
),
1872 (unsigned long)(sc
->sc_back
- sc
->sc_ring
),
1873 (unsigned long)(sc
->sc_front
- sc
->sc_ring
));
1877 safe_dump_request(struct safe_softc
*sc
, const char* tag
, struct safe_ringentry
*re
)
1881 ix
= re
- sc
->sc_ring
;
1882 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
1891 if (re
->re_src
.nsegs
> 1) {
1892 ix
= (re
->re_desc
.d_src
- sc
->sc_spalloc
.dma_paddr
) /
1893 sizeof(struct safe_pdesc
);
1894 for (nsegs
= re
->re_src
.nsegs
; nsegs
; nsegs
--) {
1895 printf(" spd[%u] %p: %p size %u flags %x"
1896 , ix
, &sc
->sc_spring
[ix
]
1897 , (caddr_t
)(uintptr_t) sc
->sc_spring
[ix
].pd_addr
1898 , sc
->sc_spring
[ix
].pd_size
1899 , sc
->sc_spring
[ix
].pd_flags
1901 if (sc
->sc_spring
[ix
].pd_size
== 0)
1904 if (++ix
== SAFE_TOTAL_SPART
)
1908 if (re
->re_dst
.nsegs
> 1) {
1909 ix
= (re
->re_desc
.d_dst
- sc
->sc_dpalloc
.dma_paddr
) /
1910 sizeof(struct safe_pdesc
);
1911 for (nsegs
= re
->re_dst
.nsegs
; nsegs
; nsegs
--) {
1912 printf(" dpd[%u] %p: %p flags %x\n"
1913 , ix
, &sc
->sc_dpring
[ix
]
1914 , (caddr_t
)(uintptr_t) sc
->sc_dpring
[ix
].pd_addr
1915 , sc
->sc_dpring
[ix
].pd_flags
1917 if (++ix
== SAFE_TOTAL_DPART
)
1921 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
1922 re
->re_sa
.sa_cmd0
, re
->re_sa
.sa_cmd1
, re
->re_sa
.sa_staterec
);
1923 printf("sa: key %x %x %x %x %x %x %x %x\n"
1924 , re
->re_sa
.sa_key
[0]
1925 , re
->re_sa
.sa_key
[1]
1926 , re
->re_sa
.sa_key
[2]
1927 , re
->re_sa
.sa_key
[3]
1928 , re
->re_sa
.sa_key
[4]
1929 , re
->re_sa
.sa_key
[5]
1930 , re
->re_sa
.sa_key
[6]
1931 , re
->re_sa
.sa_key
[7]
1933 printf("sa: indigest %x %x %x %x %x\n"
1934 , re
->re_sa
.sa_indigest
[0]
1935 , re
->re_sa
.sa_indigest
[1]
1936 , re
->re_sa
.sa_indigest
[2]
1937 , re
->re_sa
.sa_indigest
[3]
1938 , re
->re_sa
.sa_indigest
[4]
1940 printf("sa: outdigest %x %x %x %x %x\n"
1941 , re
->re_sa
.sa_outdigest
[0]
1942 , re
->re_sa
.sa_outdigest
[1]
1943 , re
->re_sa
.sa_outdigest
[2]
1944 , re
->re_sa
.sa_outdigest
[3]
1945 , re
->re_sa
.sa_outdigest
[4]
1947 printf("sr: iv %x %x %x %x\n"
1948 , re
->re_sastate
.sa_saved_iv
[0]
1949 , re
->re_sastate
.sa_saved_iv
[1]
1950 , re
->re_sastate
.sa_saved_iv
[2]
1951 , re
->re_sastate
.sa_saved_iv
[3]
1953 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
1954 , re
->re_sastate
.sa_saved_hashbc
1955 , re
->re_sastate
.sa_saved_indigest
[0]
1956 , re
->re_sastate
.sa_saved_indigest
[1]
1957 , re
->re_sastate
.sa_saved_indigest
[2]
1958 , re
->re_sastate
.sa_saved_indigest
[3]
1959 , re
->re_sastate
.sa_saved_indigest
[4]
1964 safe_dump_ring(struct safe_softc
*sc
, const char *tag
)
1966 unsigned long flags
;
1968 spin_lock_irqsave(&sc
->sc_ringmtx
, flags
);
1969 printf("\nSafeNet Ring State:\n");
1970 safe_dump_intrstate(sc
, tag
);
1971 safe_dump_dmastatus(sc
, tag
);
1972 safe_dump_ringstate(sc
, tag
);
1973 if (sc
->sc_nqchip
) {
1974 struct safe_ringentry
*re
= sc
->sc_back
;
1976 safe_dump_request(sc
, tag
, re
);
1977 if (++re
== sc
->sc_ringtop
)
1979 } while (re
!= sc
->sc_front
);
1981 spin_unlock_irqrestore(&sc
->sc_ringmtx
, flags
);
1983 #endif /* SAFE_DEBUG */
1986 static int safe_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
)
1988 struct safe_softc
*sc
= NULL
;
1989 u32 mem_start
, mem_len
, cmd
;
1992 static int num_chips
= 0;
1994 DPRINTF(("%s()\n", __FUNCTION__
));
1996 if (pci_enable_device(dev
) < 0)
2000 printk("safe: found device with no IRQ assigned. check BIOS settings!");
2001 pci_disable_device(dev
);
2005 if (pci_set_mwi(dev
)) {
2006 printk("safe: pci_set_mwi failed!");
2010 sc
= (struct safe_softc
*) kmalloc(sizeof(*sc
), GFP_KERNEL
);
2013 memset(sc
, 0, sizeof(*sc
));
2015 softc_device_init(sc
, "safe", num_chips
, safe_methods
);
2019 sc
->sc_pcidev
= dev
;
2020 if (num_chips
< SAFE_MAX_CHIPS
) {
2021 safe_chip_idx
[device_get_unit(sc
->sc_dev
)] = sc
;
2025 INIT_LIST_HEAD(&sc
->sc_pkq
);
2026 spin_lock_init(&sc
->sc_pkmtx
);
2028 pci_set_drvdata(sc
->sc_pcidev
, sc
);
2030 /* we read its hardware registers as memory */
2031 mem_start
= pci_resource_start(sc
->sc_pcidev
, 0);
2032 mem_len
= pci_resource_len(sc
->sc_pcidev
, 0);
2034 sc
->sc_base_addr
= (ocf_iomem_t
) ioremap(mem_start
, mem_len
);
2035 if (!sc
->sc_base_addr
) {
2036 device_printf(sc
->sc_dev
, "failed to ioremap 0x%x-0x%x\n",
2037 mem_start
, mem_start
+ mem_len
- 1);
2041 /* fix up the bus size */
2042 if (pci_set_dma_mask(sc
->sc_pcidev
, DMA_32BIT_MASK
)) {
2043 device_printf(sc
->sc_dev
, "No usable DMA configuration, aborting.\n");
2046 if (pci_set_consistent_dma_mask(sc
->sc_pcidev
, DMA_32BIT_MASK
)) {
2047 device_printf(sc
->sc_dev
, "No usable consistent DMA configuration, aborting.\n");
2051 pci_set_master(sc
->sc_pcidev
);
2053 pci_read_config_dword(sc
->sc_pcidev
, PCI_COMMAND
, &cmd
);
2055 if (!(cmd
& PCI_COMMAND_MEMORY
)) {
2056 device_printf(sc
->sc_dev
, "failed to enable memory mapping\n");
2060 if (!(cmd
& PCI_COMMAND_MASTER
)) {
2061 device_printf(sc
->sc_dev
, "failed to enable bus mastering\n");
2065 rc
= request_irq(dev
->irq
, safe_intr
, IRQF_SHARED
, "safe", sc
);
2067 device_printf(sc
->sc_dev
, "failed to hook irq %d\n", sc
->sc_irq
);
2070 sc
->sc_irq
= dev
->irq
;
2072 sc
->sc_chiprev
= READ_REG(sc
, SAFE_DEVINFO
) &
2073 (SAFE_DEVINFO_REV_MAJ
| SAFE_DEVINFO_REV_MIN
);
2076 * Allocate packet engine descriptors.
2078 sc
->sc_ringalloc
.dma_vaddr
= pci_alloc_consistent(sc
->sc_pcidev
,
2079 SAFE_MAX_NQUEUE
* sizeof (struct safe_ringentry
),
2080 &sc
->sc_ringalloc
.dma_paddr
);
2081 if (!sc
->sc_ringalloc
.dma_vaddr
) {
2082 device_printf(sc
->sc_dev
, "cannot allocate PE descriptor ring\n");
2087 * Hookup the static portion of all our data structures.
2089 sc
->sc_ring
= (struct safe_ringentry
*) sc
->sc_ringalloc
.dma_vaddr
;
2090 sc
->sc_ringtop
= sc
->sc_ring
+ SAFE_MAX_NQUEUE
;
2091 sc
->sc_front
= sc
->sc_ring
;
2092 sc
->sc_back
= sc
->sc_ring
;
2093 raddr
= sc
->sc_ringalloc
.dma_paddr
;
2094 bzero(sc
->sc_ring
, SAFE_MAX_NQUEUE
* sizeof(struct safe_ringentry
));
2095 for (i
= 0; i
< SAFE_MAX_NQUEUE
; i
++) {
2096 struct safe_ringentry
*re
= &sc
->sc_ring
[i
];
2098 re
->re_desc
.d_sa
= raddr
+
2099 offsetof(struct safe_ringentry
, re_sa
);
2100 re
->re_sa
.sa_staterec
= raddr
+
2101 offsetof(struct safe_ringentry
, re_sastate
);
2103 raddr
+= sizeof (struct safe_ringentry
);
2105 spin_lock_init(&sc
->sc_ringmtx
);
2108 * Allocate scatter and gather particle descriptors.
2110 sc
->sc_spalloc
.dma_vaddr
= pci_alloc_consistent(sc
->sc_pcidev
,
2111 SAFE_TOTAL_SPART
* sizeof (struct safe_pdesc
),
2112 &sc
->sc_spalloc
.dma_paddr
);
2113 if (!sc
->sc_spalloc
.dma_vaddr
) {
2114 device_printf(sc
->sc_dev
, "cannot allocate source particle descriptor ring\n");
2117 sc
->sc_spring
= (struct safe_pdesc
*) sc
->sc_spalloc
.dma_vaddr
;
2118 sc
->sc_springtop
= sc
->sc_spring
+ SAFE_TOTAL_SPART
;
2119 sc
->sc_spfree
= sc
->sc_spring
;
2120 bzero(sc
->sc_spring
, SAFE_TOTAL_SPART
* sizeof(struct safe_pdesc
));
2122 sc
->sc_dpalloc
.dma_vaddr
= pci_alloc_consistent(sc
->sc_pcidev
,
2123 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2124 &sc
->sc_dpalloc
.dma_paddr
);
2125 if (!sc
->sc_dpalloc
.dma_vaddr
) {
2126 device_printf(sc
->sc_dev
, "cannot allocate destination particle descriptor ring\n");
2129 sc
->sc_dpring
= (struct safe_pdesc
*) sc
->sc_dpalloc
.dma_vaddr
;
2130 sc
->sc_dpringtop
= sc
->sc_dpring
+ SAFE_TOTAL_DPART
;
2131 sc
->sc_dpfree
= sc
->sc_dpring
;
2132 bzero(sc
->sc_dpring
, SAFE_TOTAL_DPART
* sizeof(struct safe_pdesc
));
2134 sc
->sc_cid
= crypto_get_driverid(softc_get_device(sc
), CRYPTOCAP_F_HARDWARE
);
2135 if (sc
->sc_cid
< 0) {
2136 device_printf(sc
->sc_dev
, "could not get crypto driver id\n");
2140 printf("%s:", device_get_nameunit(sc
->sc_dev
));
2142 devinfo
= READ_REG(sc
, SAFE_DEVINFO
);
2143 if (devinfo
& SAFE_DEVINFO_RNG
) {
2144 sc
->sc_flags
|= SAFE_FLAGS_RNG
;
2147 if (devinfo
& SAFE_DEVINFO_PKEY
) {
2149 sc
->sc_flags
|= SAFE_FLAGS_KEY
;
2150 crypto_kregister(sc
->sc_cid
, CRK_MOD_EXP
, 0);
2152 crypto_kregister(sc
->sc_cid
, CRK_MOD_EXP_CRT
, 0);
2154 init_timer(&sc
->sc_pkto
);
2155 sc
->sc_pkto
.function
= safe_kpoll
;
2156 sc
->sc_pkto
.data
= (unsigned long) device_get_unit(sc
->sc_dev
);
2158 if (devinfo
& SAFE_DEVINFO_DES
) {
2159 printf(" des/3des");
2160 crypto_register(sc
->sc_cid
, CRYPTO_3DES_CBC
, 0, 0);
2161 crypto_register(sc
->sc_cid
, CRYPTO_DES_CBC
, 0, 0);
2163 if (devinfo
& SAFE_DEVINFO_AES
) {
2165 crypto_register(sc
->sc_cid
, CRYPTO_AES_CBC
, 0, 0);
2167 if (devinfo
& SAFE_DEVINFO_MD5
) {
2169 crypto_register(sc
->sc_cid
, CRYPTO_MD5_HMAC
, 0, 0);
2171 if (devinfo
& SAFE_DEVINFO_SHA1
) {
2173 crypto_register(sc
->sc_cid
, CRYPTO_SHA1_HMAC
, 0, 0);
2176 crypto_register(sc
->sc_cid
, CRYPTO_NULL_CBC
, 0, 0);
2177 crypto_register(sc
->sc_cid
, CRYPTO_NULL_HMAC
, 0, 0);
2178 /* XXX other supported algorithms */
2181 safe_reset_board(sc
); /* reset h/w */
2182 safe_init_board(sc
); /* init h/w */
2184 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
2185 if (sc
->sc_flags
& SAFE_FLAGS_RNG
) {
2187 crypto_rregister(sc
->sc_cid
, safe_read_random
, sc
);
2189 #endif /* SAFE_NO_RNG */
2194 if (sc
->sc_cid
>= 0)
2195 crypto_unregister_all(sc
->sc_cid
);
2196 if (sc
->sc_irq
!= -1)
2197 free_irq(sc
->sc_irq
, sc
);
2198 if (sc
->sc_ringalloc
.dma_vaddr
)
2199 pci_free_consistent(sc
->sc_pcidev
,
2200 SAFE_MAX_NQUEUE
* sizeof (struct safe_ringentry
),
2201 sc
->sc_ringalloc
.dma_vaddr
, sc
->sc_ringalloc
.dma_paddr
);
2202 if (sc
->sc_spalloc
.dma_vaddr
)
2203 pci_free_consistent(sc
->sc_pcidev
,
2204 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2205 sc
->sc_spalloc
.dma_vaddr
, sc
->sc_spalloc
.dma_paddr
);
2206 if (sc
->sc_dpalloc
.dma_vaddr
)
2207 pci_free_consistent(sc
->sc_pcidev
,
2208 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2209 sc
->sc_dpalloc
.dma_vaddr
, sc
->sc_dpalloc
.dma_paddr
);
2214 static void safe_remove(struct pci_dev
*dev
)
2216 struct safe_softc
*sc
= pci_get_drvdata(dev
);
2218 DPRINTF(("%s()\n", __FUNCTION__
));
2220 /* XXX wait/abort active ops */
2222 WRITE_REG(sc
, SAFE_HI_MASK
, 0); /* disable interrupts */
2224 del_timer_sync(&sc
->sc_pkto
);
2226 crypto_unregister_all(sc
->sc_cid
);
2230 if (sc
->sc_irq
!= -1)
2231 free_irq(sc
->sc_irq
, sc
);
2232 if (sc
->sc_ringalloc
.dma_vaddr
)
2233 pci_free_consistent(sc
->sc_pcidev
,
2234 SAFE_MAX_NQUEUE
* sizeof (struct safe_ringentry
),
2235 sc
->sc_ringalloc
.dma_vaddr
, sc
->sc_ringalloc
.dma_paddr
);
2236 if (sc
->sc_spalloc
.dma_vaddr
)
2237 pci_free_consistent(sc
->sc_pcidev
,
2238 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2239 sc
->sc_spalloc
.dma_vaddr
, sc
->sc_spalloc
.dma_paddr
);
2240 if (sc
->sc_dpalloc
.dma_vaddr
)
2241 pci_free_consistent(sc
->sc_pcidev
,
2242 SAFE_TOTAL_DPART
* sizeof (struct safe_pdesc
),
2243 sc
->sc_dpalloc
.dma_vaddr
, sc
->sc_dpalloc
.dma_paddr
);
2245 sc
->sc_ringalloc
.dma_vaddr
= NULL
;
2246 sc
->sc_spalloc
.dma_vaddr
= NULL
;
2247 sc
->sc_dpalloc
.dma_vaddr
= NULL
;
2250 static struct pci_device_id safe_pci_tbl
[] = {
2251 { PCI_VENDOR_SAFENET
, PCI_PRODUCT_SAFEXCEL
,
2252 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2255 MODULE_DEVICE_TABLE(pci
, safe_pci_tbl
);
2257 static struct pci_driver safe_driver
= {
2259 .id_table
= safe_pci_tbl
,
2260 .probe
= safe_probe
,
2261 .remove
= safe_remove
,
2262 /* add PM stuff here one day */
2265 static int __init
safe_init (void)
2267 struct safe_softc
*sc
= NULL
;
2270 DPRINTF(("%s(%p)\n", __FUNCTION__
, safe_init
));
2272 rc
= pci_register_driver(&safe_driver
);
2273 pci_register_driver_compat(&safe_driver
, rc
);
2278 static void __exit
safe_exit (void)
2280 pci_unregister_driver(&safe_driver
);
2283 module_init(safe_init
);
2284 module_exit(safe_exit
);
2286 MODULE_LICENSE("BSD");
2287 MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
2288 MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");