1 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
4 * Invertex AEON / Hifn 7751 driver
5 * Copyright (c) 1999 Invertex Inc. All rights reserved.
6 * Copyright (c) 1999 Theo de Raadt
7 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8 * http://www.netsec.net
9 * Copyright (c) 2003 Hifn Inc.
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested: Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 __FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
47 * Driver for various Hifn encryption processors.
49 #ifndef AUTOCONF_INCLUDED
50 #include <linux/config.h>
52 #include <linux/module.h>
53 #include <linux/init.h>
54 #include <linux/list.h>
55 #include <linux/slab.h>
56 #include <linux/wait.h>
57 #include <linux/sched.h>
58 #include <linux/pci.h>
59 #include <linux/delay.h>
60 #include <linux/interrupt.h>
61 #include <linux/spinlock.h>
62 #include <linux/random.h>
63 #include <linux/version.h>
64 #include <linux/skbuff.h>
67 #include <cryptodev.h>
69 #include <hifn/hifn7751reg.h>
70 #include <hifn/hifn7751var.h>
73 #define DPRINTF(a...) if (hifn_debug) { \
75 device_get_nameunit(sc->sc_dev) : "hifn"); \
83 pci_get_revid(struct pci_dev
*dev
)
86 pci_read_config_byte(dev
, PCI_REVISION_ID
, &rid
);
90 static struct hifn_stats hifnstats
;
92 #define debug hifn_debug
94 module_param(hifn_debug
, int, 0644);
95 MODULE_PARM_DESC(hifn_debug
, "Enable debug");
97 int hifn_maxbatch
= 1;
98 module_param(hifn_maxbatch
, int, 0644);
99 MODULE_PARM_DESC(hifn_maxbatch
, "max ops to batch w/o interrupt");
102 char *hifn_pllconfig
= NULL
;
103 MODULE_PARM(hifn_pllconfig
, "s");
105 char hifn_pllconfig
[32]; /* This setting is RO after loading */
106 module_param_string(hifn_pllconfig
, hifn_pllconfig
, 32, 0444);
108 MODULE_PARM_DESC(hifn_pllconfig
, "PLL config, ie., pci66, ext33, ...");
110 #ifdef HIFN_VULCANDEV
111 #include <sys/conf.h>
114 static struct cdevsw vulcanpk_cdevsw
; /* forward declaration */
118 * Prototypes and count for the pci_device structure
120 static int hifn_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
);
121 static void hifn_remove(struct pci_dev
*dev
);
123 static int hifn_newsession(device_t
, u_int32_t
*, struct cryptoini
*);
124 static int hifn_freesession(device_t
, u_int64_t
);
125 static int hifn_process(device_t
, struct cryptop
*, int);
127 static device_method_t hifn_methods
= {
128 /* crypto device methods */
129 DEVMETHOD(cryptodev_newsession
, hifn_newsession
),
130 DEVMETHOD(cryptodev_freesession
,hifn_freesession
),
131 DEVMETHOD(cryptodev_process
, hifn_process
),
134 static void hifn_reset_board(struct hifn_softc
*, int);
135 static void hifn_reset_puc(struct hifn_softc
*);
136 static void hifn_puc_wait(struct hifn_softc
*);
137 static int hifn_enable_crypto(struct hifn_softc
*);
138 static void hifn_set_retry(struct hifn_softc
*sc
);
139 static void hifn_init_dma(struct hifn_softc
*);
140 static void hifn_init_pci_registers(struct hifn_softc
*);
141 static int hifn_sramsize(struct hifn_softc
*);
142 static int hifn_dramsize(struct hifn_softc
*);
143 static int hifn_ramtype(struct hifn_softc
*);
144 static void hifn_sessions(struct hifn_softc
*);
145 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
146 static irqreturn_t
hifn_intr(int irq
, void *arg
);
148 static irqreturn_t
hifn_intr(int irq
, void *arg
, struct pt_regs
*regs
);
150 static u_int
hifn_write_command(struct hifn_command
*, u_int8_t
*);
151 static u_int32_t
hifn_next_signature(u_int32_t a
, u_int cnt
);
152 static void hifn_callback(struct hifn_softc
*, struct hifn_command
*, u_int8_t
*);
153 static int hifn_crypto(struct hifn_softc
*, struct hifn_command
*, struct cryptop
*, int);
154 static int hifn_readramaddr(struct hifn_softc
*, int, u_int8_t
*);
155 static int hifn_writeramaddr(struct hifn_softc
*, int, u_int8_t
*);
156 static int hifn_dmamap_load_src(struct hifn_softc
*, struct hifn_command
*);
157 static int hifn_dmamap_load_dst(struct hifn_softc
*, struct hifn_command
*);
158 static int hifn_init_pubrng(struct hifn_softc
*);
159 static void hifn_tick(unsigned long arg
);
160 static void hifn_abort(struct hifn_softc
*);
161 static void hifn_alloc_slot(struct hifn_softc
*, int *, int *, int *, int *);
163 static void hifn_write_reg_0(struct hifn_softc
*, bus_size_t
, u_int32_t
);
164 static void hifn_write_reg_1(struct hifn_softc
*, bus_size_t
, u_int32_t
);
166 #ifdef CONFIG_OCF_RANDOMHARVEST
167 static int hifn_read_random(void *arg
, u_int32_t
*buf
, int len
);
170 #define HIFN_MAX_CHIPS 8
171 static struct hifn_softc
*hifn_chip_idx
[HIFN_MAX_CHIPS
];
173 static __inline u_int32_t
174 READ_REG_0(struct hifn_softc
*sc
, bus_size_t reg
)
176 u_int32_t v
= readl(sc
->sc_bar0
+ reg
);
177 sc
->sc_bar0_lastreg
= (bus_size_t
) -1;
180 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
182 static __inline u_int32_t
183 READ_REG_1(struct hifn_softc
*sc
, bus_size_t reg
)
185 u_int32_t v
= readl(sc
->sc_bar1
+ reg
);
186 sc
->sc_bar1_lastreg
= (bus_size_t
) -1;
189 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
192 * map in a given buffer (great on some arches :-)
196 pci_map_uio(struct hifn_softc
*sc
, struct hifn_operand
*buf
, struct uio
*uio
)
198 struct iovec
*iov
= uio
->uio_iov
;
200 DPRINTF("%s()\n", __FUNCTION__
);
203 for (buf
->nsegs
= 0; buf
->nsegs
< uio
->uio_iovcnt
; ) {
204 buf
->segs
[buf
->nsegs
].ds_addr
= pci_map_single(sc
->sc_pcidev
,
205 iov
->iov_base
, iov
->iov_len
,
206 PCI_DMA_BIDIRECTIONAL
);
207 buf
->segs
[buf
->nsegs
].ds_len
= iov
->iov_len
;
208 buf
->mapsize
+= iov
->iov_len
;
212 /* identify this buffer by the first segment */
213 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
218 * map in a given sk_buff
222 pci_map_skb(struct hifn_softc
*sc
,struct hifn_operand
*buf
,struct sk_buff
*skb
)
226 DPRINTF("%s()\n", __FUNCTION__
);
230 buf
->segs
[0].ds_addr
= pci_map_single(sc
->sc_pcidev
,
231 skb
->data
, skb_headlen(skb
), PCI_DMA_BIDIRECTIONAL
);
232 buf
->segs
[0].ds_len
= skb_headlen(skb
);
233 buf
->mapsize
+= buf
->segs
[0].ds_len
;
237 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ) {
238 buf
->segs
[buf
->nsegs
].ds_len
= skb_shinfo(skb
)->frags
[i
].size
;
239 buf
->segs
[buf
->nsegs
].ds_addr
= pci_map_single(sc
->sc_pcidev
,
240 page_address(skb_shinfo(skb
)->frags
[i
].page
) +
241 skb_shinfo(skb
)->frags
[i
].page_offset
,
242 buf
->segs
[buf
->nsegs
].ds_len
, PCI_DMA_BIDIRECTIONAL
);
243 buf
->mapsize
+= buf
->segs
[buf
->nsegs
].ds_len
;
247 /* identify this buffer by the first segment */
248 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
253 * map in a given contiguous buffer
257 pci_map_buf(struct hifn_softc
*sc
,struct hifn_operand
*buf
, void *b
, int len
)
259 DPRINTF("%s()\n", __FUNCTION__
);
262 buf
->segs
[0].ds_addr
= pci_map_single(sc
->sc_pcidev
,
263 b
, len
, PCI_DMA_BIDIRECTIONAL
);
264 buf
->segs
[0].ds_len
= len
;
265 buf
->mapsize
+= buf
->segs
[0].ds_len
;
268 /* identify this buffer by the first segment */
269 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
273 #if 0 /* not needed at this time */
275 pci_sync_iov(struct hifn_softc
*sc
, struct hifn_operand
*buf
)
279 DPRINTF("%s()\n", __FUNCTION__
);
280 for (i
= 0; i
< buf
->nsegs
; i
++)
281 pci_dma_sync_single_for_cpu(sc
->sc_pcidev
, buf
->segs
[i
].ds_addr
,
282 buf
->segs
[i
].ds_len
, PCI_DMA_BIDIRECTIONAL
);
287 pci_unmap_buf(struct hifn_softc
*sc
, struct hifn_operand
*buf
)
290 DPRINTF("%s()\n", __FUNCTION__
);
291 for (i
= 0; i
< buf
->nsegs
; i
++) {
292 pci_unmap_single(sc
->sc_pcidev
, buf
->segs
[i
].ds_addr
,
293 buf
->segs
[i
].ds_len
, PCI_DMA_BIDIRECTIONAL
);
294 buf
->segs
[i
].ds_addr
= 0;
295 buf
->segs
[i
].ds_len
= 0;
303 hifn_partname(struct hifn_softc
*sc
)
305 /* XXX sprintf numbers when not decoded */
306 switch (pci_get_vendor(sc
->sc_pcidev
)) {
307 case PCI_VENDOR_HIFN
:
308 switch (pci_get_device(sc
->sc_pcidev
)) {
309 case PCI_PRODUCT_HIFN_6500
: return "Hifn 6500";
310 case PCI_PRODUCT_HIFN_7751
: return "Hifn 7751";
311 case PCI_PRODUCT_HIFN_7811
: return "Hifn 7811";
312 case PCI_PRODUCT_HIFN_7951
: return "Hifn 7951";
313 case PCI_PRODUCT_HIFN_7955
: return "Hifn 7955";
314 case PCI_PRODUCT_HIFN_7956
: return "Hifn 7956";
316 return "Hifn unknown-part";
317 case PCI_VENDOR_INVERTEX
:
318 switch (pci_get_device(sc
->sc_pcidev
)) {
319 case PCI_PRODUCT_INVERTEX_AEON
: return "Invertex AEON";
321 return "Invertex unknown-part";
322 case PCI_VENDOR_NETSEC
:
323 switch (pci_get_device(sc
->sc_pcidev
)) {
324 case PCI_PRODUCT_NETSEC_7751
: return "NetSec 7751";
326 return "NetSec unknown-part";
328 return "Unknown-vendor unknown-part";
332 checkmaxmin(struct pci_dev
*dev
, const char *what
, u_int v
, u_int min
, u_int max
)
334 struct hifn_softc
*sc
= pci_get_drvdata(dev
);
336 device_printf(sc
->sc_dev
, "Warning, %s %u out of range, "
337 "using max %u\n", what
, v
, max
);
339 } else if (v
< min
) {
340 device_printf(sc
->sc_dev
, "Warning, %s %u out of range, "
341 "using min %u\n", what
, v
, min
);
348 * Select PLL configuration for 795x parts. This is complicated in
349 * that we cannot determine the optimal parameters without user input.
350 * The reference clock is derived from an external clock through a
351 * multiplier. The external clock is either the host bus (i.e. PCI)
352 * or an external clock generator. When using the PCI bus we assume
353 * the clock is either 33 or 66 MHz; for an external source we cannot
356 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
357 * for an external source, followed by the frequency. We calculate
358 * the appropriate multiplier and PLL register contents accordingly.
359 * When no configuration is given we default to "pci66" since that
360 * always will allow the card to work. If a card is using the PCI
361 * bus clock and in a 33MHz slot then it will be operating at half
362 * speed until the correct information is provided.
364 * We use a default setting of "ext66" because according to Mike Ham
365 * of HiFn, almost every board in existence has an external crystal
366 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
367 * because PCI33 can have clocks from 0 to 33Mhz, and some have
368 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
371 hifn_getpllconfig(struct pci_dev
*dev
, u_int
*pll
)
373 const char *pllspec
= hifn_pllconfig
;
374 u_int freq
, mul
, fl
, fh
;
382 if (strncmp(pllspec
, "ext", 3) == 0) {
384 pllconfig
|= HIFN_PLL_REF_SEL
;
385 switch (pci_get_device(dev
)) {
386 case PCI_PRODUCT_HIFN_7955
:
387 case PCI_PRODUCT_HIFN_7956
:
391 case PCI_PRODUCT_HIFN_7954
:
396 } else if (strncmp(pllspec
, "pci", 3) == 0)
398 freq
= strtoul(pllspec
, &nxt
, 10);
402 freq
= checkmaxmin(dev
, "frequency", freq
, fl
, fh
);
404 * Calculate multiplier. We target a Fck of 266 MHz,
405 * allowing only even values, possibly rounded down.
406 * Multipliers > 8 must set the charge pump current.
408 mul
= checkmaxmin(dev
, "PLL divisor", (266 / freq
) &~ 1, 2, 12);
409 pllconfig
|= (mul
/ 2 - 1) << HIFN_PLL_ND_SHIFT
;
411 pllconfig
|= HIFN_PLL_IS
;
416 * Attach an interface that successfully probed.
419 hifn_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
)
421 struct hifn_softc
*sc
= NULL
;
425 unsigned long mem_start
, mem_len
;
426 static int num_chips
= 0;
428 DPRINTF("%s()\n", __FUNCTION__
);
430 if (pci_enable_device(dev
) < 0)
433 if (pci_set_mwi(dev
))
437 printk("hifn: found device with no IRQ assigned. check BIOS settings!");
438 pci_disable_device(dev
);
442 sc
= (struct hifn_softc
*) kmalloc(sizeof(*sc
), GFP_KERNEL
);
445 memset(sc
, 0, sizeof(*sc
));
447 softc_device_init(sc
, "hifn", num_chips
, hifn_methods
);
452 sc
->sc_num
= num_chips
++;
453 if (sc
->sc_num
< HIFN_MAX_CHIPS
)
454 hifn_chip_idx
[sc
->sc_num
] = sc
;
456 pci_set_drvdata(sc
->sc_pcidev
, sc
);
458 spin_lock_init(&sc
->sc_mtx
);
460 /* XXX handle power management */
463 * The 7951 and 795x have a random number generator and
464 * public key support; note this.
466 if (pci_get_vendor(dev
) == PCI_VENDOR_HIFN
&&
467 (pci_get_device(dev
) == PCI_PRODUCT_HIFN_7951
||
468 pci_get_device(dev
) == PCI_PRODUCT_HIFN_7955
||
469 pci_get_device(dev
) == PCI_PRODUCT_HIFN_7956
))
470 sc
->sc_flags
= HIFN_HAS_RNG
| HIFN_HAS_PUBLIC
;
472 * The 7811 has a random number generator and
473 * we also note it's identity 'cuz of some quirks.
475 if (pci_get_vendor(dev
) == PCI_VENDOR_HIFN
&&
476 pci_get_device(dev
) == PCI_PRODUCT_HIFN_7811
)
477 sc
->sc_flags
|= HIFN_IS_7811
| HIFN_HAS_RNG
;
480 * The 795x parts support AES.
482 if (pci_get_vendor(dev
) == PCI_VENDOR_HIFN
&&
483 (pci_get_device(dev
) == PCI_PRODUCT_HIFN_7955
||
484 pci_get_device(dev
) == PCI_PRODUCT_HIFN_7956
)) {
485 sc
->sc_flags
|= HIFN_IS_7956
| HIFN_HAS_AES
;
487 * Select PLL configuration. This depends on the
488 * bus and board design and must be manually configured
489 * if the default setting is unacceptable.
491 hifn_getpllconfig(dev
, &sc
->sc_pllconfig
);
495 * Setup PCI resources. Note that we record the bus
496 * tag and handle for each register mapping, this is
497 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
498 * and WRITE_REG_1 macros throughout the driver.
500 mem_start
= pci_resource_start(sc
->sc_pcidev
, 0);
501 mem_len
= pci_resource_len(sc
->sc_pcidev
, 0);
502 sc
->sc_bar0
= (ocf_iomem_t
) ioremap(mem_start
, mem_len
);
504 device_printf(sc
->sc_dev
, "cannot map bar%d register space\n", 0);
507 sc
->sc_bar0_lastreg
= (bus_size_t
) -1;
509 mem_start
= pci_resource_start(sc
->sc_pcidev
, 1);
510 mem_len
= pci_resource_len(sc
->sc_pcidev
, 1);
511 sc
->sc_bar1
= (ocf_iomem_t
) ioremap(mem_start
, mem_len
);
513 device_printf(sc
->sc_dev
, "cannot map bar%d register space\n", 1);
516 sc
->sc_bar1_lastreg
= (bus_size_t
) -1;
518 /* fix up the bus size */
519 if (pci_set_dma_mask(dev
, DMA_32BIT_MASK
)) {
520 device_printf(sc
->sc_dev
, "No usable DMA configuration, aborting.\n");
523 if (pci_set_consistent_dma_mask(dev
, DMA_32BIT_MASK
)) {
524 device_printf(sc
->sc_dev
,
525 "No usable consistent DMA configuration, aborting.\n");
532 * Setup the area where the Hifn DMA's descriptors
533 * and associated data structures.
535 sc
->sc_dma
= (struct hifn_dma
*) pci_alloc_consistent(dev
,
537 &sc
->sc_dma_physaddr
);
539 device_printf(sc
->sc_dev
, "cannot alloc sc_dma\n");
542 bzero(sc
->sc_dma
, sizeof(*sc
->sc_dma
));
545 * Reset the board and do the ``secret handshake''
546 * to enable the crypto support. Then complete the
547 * initialization procedure by setting up the interrupt
548 * and hooking in to the system crypto support so we'll
549 * get used for system services like the crypto device,
550 * IPsec, RNG device, etc.
552 hifn_reset_board(sc
, 0);
554 if (hifn_enable_crypto(sc
) != 0) {
555 device_printf(sc
->sc_dev
, "crypto enabling failed\n");
561 hifn_init_pci_registers(sc
);
563 pci_set_master(sc
->sc_pcidev
);
565 /* XXX can't dynamically determine ram type for 795x; force dram */
566 if (sc
->sc_flags
& HIFN_IS_7956
)
567 sc
->sc_drammodel
= 1;
568 else if (hifn_ramtype(sc
))
571 if (sc
->sc_drammodel
== 0)
577 * Workaround for NetSec 7751 rev A: half ram size because two
578 * of the address lines were left floating
580 if (pci_get_vendor(dev
) == PCI_VENDOR_NETSEC
&&
581 pci_get_device(dev
) == PCI_PRODUCT_NETSEC_7751
&&
582 pci_get_revid(dev
) == 0x61) /*XXX???*/
583 sc
->sc_ramsize
>>= 1;
586 * Arrange the interrupt line.
588 rc
= request_irq(dev
->irq
, hifn_intr
, IRQF_SHARED
, "hifn", sc
);
590 device_printf(sc
->sc_dev
, "could not map interrupt: %d\n", rc
);
593 sc
->sc_irq
= dev
->irq
;
598 * NB: Keep only the low 16 bits; this masks the chip id
601 rev
= READ_REG_1(sc
, HIFN_1_REVID
) & 0xffff;
603 rseg
= sc
->sc_ramsize
/ 1024;
605 if (sc
->sc_ramsize
>= (1024 * 1024)) {
609 device_printf(sc
->sc_dev
, "%s, rev %u, %d%cB %cram",
610 hifn_partname(sc
), rev
,
611 rseg
, rbase
, sc
->sc_drammodel
? 'd' : 's');
612 if (sc
->sc_flags
& HIFN_IS_7956
)
613 printf(", pll=0x%x<%s clk, %ux mult>",
615 sc
->sc_pllconfig
& HIFN_PLL_REF_SEL
? "ext" : "pci",
616 2 + 2*((sc
->sc_pllconfig
& HIFN_PLL_ND
) >> 11));
619 sc
->sc_cid
= crypto_get_driverid(softc_get_device(sc
),CRYPTOCAP_F_HARDWARE
);
620 if (sc
->sc_cid
< 0) {
621 device_printf(sc
->sc_dev
, "could not get crypto driver id\n");
625 WRITE_REG_0(sc
, HIFN_0_PUCNFG
,
626 READ_REG_0(sc
, HIFN_0_PUCNFG
) | HIFN_PUCNFG_CHIPID
);
627 ena
= READ_REG_0(sc
, HIFN_0_PUSTAT
) & HIFN_PUSTAT_CHIPENA
;
630 case HIFN_PUSTAT_ENA_2
:
631 crypto_register(sc
->sc_cid
, CRYPTO_3DES_CBC
, 0, 0);
632 crypto_register(sc
->sc_cid
, CRYPTO_ARC4
, 0, 0);
633 if (sc
->sc_flags
& HIFN_HAS_AES
)
634 crypto_register(sc
->sc_cid
, CRYPTO_AES_CBC
, 0, 0);
636 case HIFN_PUSTAT_ENA_1
:
637 crypto_register(sc
->sc_cid
, CRYPTO_MD5
, 0, 0);
638 crypto_register(sc
->sc_cid
, CRYPTO_SHA1
, 0, 0);
639 crypto_register(sc
->sc_cid
, CRYPTO_MD5_HMAC
, 0, 0);
640 crypto_register(sc
->sc_cid
, CRYPTO_SHA1_HMAC
, 0, 0);
641 crypto_register(sc
->sc_cid
, CRYPTO_DES_CBC
, 0, 0);
645 if (sc
->sc_flags
& (HIFN_HAS_PUBLIC
| HIFN_HAS_RNG
))
646 hifn_init_pubrng(sc
);
648 init_timer(&sc
->sc_tickto
);
649 sc
->sc_tickto
.function
= hifn_tick
;
650 sc
->sc_tickto
.data
= (unsigned long) sc
->sc_num
;
651 mod_timer(&sc
->sc_tickto
, jiffies
+ HZ
);
657 crypto_unregister_all(sc
->sc_cid
);
658 if (sc
->sc_irq
!= -1)
659 free_irq(sc
->sc_irq
, sc
);
661 /* Turn off DMA polling */
662 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
663 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
665 pci_free_consistent(sc
->sc_pcidev
,
667 sc
->sc_dma
, sc
->sc_dma_physaddr
);
674 * Detach an interface that successfully probed.
677 hifn_remove(struct pci_dev
*dev
)
679 struct hifn_softc
*sc
= pci_get_drvdata(dev
);
680 unsigned long l_flags
;
682 DPRINTF("%s()\n", __FUNCTION__
);
684 KASSERT(sc
!= NULL
, ("hifn_detach: null software carrier!"));
686 /* disable interrupts */
688 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, 0);
691 /*XXX other resources */
692 del_timer_sync(&sc
->sc_tickto
);
694 /* Turn off DMA polling */
695 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
696 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
698 crypto_unregister_all(sc
->sc_cid
);
700 free_irq(sc
->sc_irq
, sc
);
702 pci_free_consistent(sc
->sc_pcidev
, sizeof(*sc
->sc_dma
),
703 sc
->sc_dma
, sc
->sc_dma_physaddr
);
708 hifn_init_pubrng(struct hifn_softc
*sc
)
712 DPRINTF("%s()\n", __FUNCTION__
);
714 if ((sc
->sc_flags
& HIFN_IS_7811
) == 0) {
715 /* Reset 7951 public key/rng engine */
716 WRITE_REG_1(sc
, HIFN_1_PUB_RESET
,
717 READ_REG_1(sc
, HIFN_1_PUB_RESET
) | HIFN_PUBRST_RESET
);
719 for (i
= 0; i
< 100; i
++) {
721 if ((READ_REG_1(sc
, HIFN_1_PUB_RESET
) &
722 HIFN_PUBRST_RESET
) == 0)
727 device_printf(sc
->sc_dev
, "public key init failed\n");
732 /* Enable the rng, if available */
733 #ifdef CONFIG_OCF_RANDOMHARVEST
734 if (sc
->sc_flags
& HIFN_HAS_RNG
) {
735 if (sc
->sc_flags
& HIFN_IS_7811
) {
737 r
= READ_REG_1(sc
, HIFN_1_7811_RNGENA
);
738 if (r
& HIFN_7811_RNGENA_ENA
) {
739 r
&= ~HIFN_7811_RNGENA_ENA
;
740 WRITE_REG_1(sc
, HIFN_1_7811_RNGENA
, r
);
742 WRITE_REG_1(sc
, HIFN_1_7811_RNGCFG
,
743 HIFN_7811_RNGCFG_DEFL
);
744 r
|= HIFN_7811_RNGENA_ENA
;
745 WRITE_REG_1(sc
, HIFN_1_7811_RNGENA
, r
);
747 WRITE_REG_1(sc
, HIFN_1_RNG_CONFIG
,
748 READ_REG_1(sc
, HIFN_1_RNG_CONFIG
) |
752 crypto_rregister(sc
->sc_cid
, hifn_read_random
, sc
);
756 /* Enable public key engine, if available */
757 if (sc
->sc_flags
& HIFN_HAS_PUBLIC
) {
758 WRITE_REG_1(sc
, HIFN_1_PUB_IEN
, HIFN_PUBIEN_DONE
);
759 sc
->sc_dmaier
|= HIFN_DMAIER_PUBDONE
;
760 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, sc
->sc_dmaier
);
761 #ifdef HIFN_VULCANDEV
762 sc
->sc_pkdev
= make_dev(&vulcanpk_cdevsw
, 0,
763 UID_ROOT
, GID_WHEEL
, 0666,
765 sc
->sc_pkdev
->si_drv1
= sc
;
772 #ifdef CONFIG_OCF_RANDOMHARVEST
774 hifn_read_random(void *arg
, u_int32_t
*buf
, int len
)
776 struct hifn_softc
*sc
= (struct hifn_softc
*) arg
;
783 if (sc
->sc_flags
& HIFN_IS_7811
) {
784 /* ONLY VALID ON 7811!!!! */
785 for (i
= 0; i
< 5; i
++) {
786 sts
= READ_REG_1(sc
, HIFN_1_7811_RNGSTS
);
787 if (sts
& HIFN_7811_RNGSTS_UFL
) {
788 device_printf(sc
->sc_dev
,
789 "RNG underflow: disabling\n");
790 /* DAVIDM perhaps return -1 */
793 if ((sts
& HIFN_7811_RNGSTS_RDY
) == 0)
797 * There are at least two words in the RNG FIFO
801 buf
[rc
++] = READ_REG_1(sc
, HIFN_1_7811_RNGDAT
);
803 buf
[rc
++] = READ_REG_1(sc
, HIFN_1_7811_RNGDAT
);
806 buf
[rc
++] = READ_REG_1(sc
, HIFN_1_RNG_DATA
);
808 /* NB: discard first data read */
809 if (sc
->sc_rngfirst
) {
816 #endif /* CONFIG_OCF_RANDOMHARVEST */
819 hifn_puc_wait(struct hifn_softc
*sc
)
822 int reg
= HIFN_0_PUCTRL
;
824 if (sc
->sc_flags
& HIFN_IS_7956
) {
825 reg
= HIFN_0_PUCTRL2
;
828 for (i
= 5000; i
> 0; i
--) {
830 if (!(READ_REG_0(sc
, reg
) & HIFN_PUCTRL_RESET
))
834 device_printf(sc
->sc_dev
, "proc unit did not reset(0x%x)\n",
835 READ_REG_0(sc
, HIFN_0_PUCTRL
));
839 * Reset the processing unit.
842 hifn_reset_puc(struct hifn_softc
*sc
)
844 /* Reset processing unit */
845 int reg
= HIFN_0_PUCTRL
;
847 if (sc
->sc_flags
& HIFN_IS_7956
) {
848 reg
= HIFN_0_PUCTRL2
;
850 WRITE_REG_0(sc
, reg
, HIFN_PUCTRL_DMAENA
);
856 * Set the Retry and TRDY registers; note that we set them to
857 * zero because the 7811 locks up when forced to retry (section
858 * 3.6 of "Specification Update SU-0014-04". Not clear if we
859 * should do this for all Hifn parts, but it doesn't seem to hurt.
862 hifn_set_retry(struct hifn_softc
*sc
)
864 DPRINTF("%s()\n", __FUNCTION__
);
865 /* NB: RETRY only responds to 8-bit reads/writes */
866 pci_write_config_byte(sc
->sc_pcidev
, HIFN_RETRY_TIMEOUT
, 0);
867 pci_write_config_dword(sc
->sc_pcidev
, HIFN_TRDY_TIMEOUT
, 0);
871 * Resets the board. Values in the regesters are left as is
872 * from the reset (i.e. initial values are assigned elsewhere).
875 hifn_reset_board(struct hifn_softc
*sc
, int full
)
879 DPRINTF("%s()\n", __FUNCTION__
);
881 * Set polling in the DMA configuration register to zero. 0x7 avoids
882 * resetting the board and zeros out the other fields.
884 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
885 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
888 * Now that polling has been disabled, we have to wait 1 ms
889 * before resetting the board.
893 /* Reset the DMA unit */
895 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MODE
);
898 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
,
899 HIFN_DMACNFG_MODE
| HIFN_DMACNFG_MSTRESET
);
903 KASSERT(sc
->sc_dma
!= NULL
, ("hifn_reset_board: null DMA tag!"));
904 bzero(sc
->sc_dma
, sizeof(*sc
->sc_dma
));
906 /* Bring dma unit out of reset */
907 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
908 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
913 if (sc
->sc_flags
& HIFN_IS_7811
) {
914 for (reg
= 0; reg
< 1000; reg
++) {
915 if (READ_REG_1(sc
, HIFN_1_7811_MIPSRST
) &
916 HIFN_MIPSRST_CRAMINIT
)
921 device_printf(sc
->sc_dev
, ": cram init timeout\n");
923 /* set up DMA configuration register #2 */
924 /* turn off all PK and BAR0 swaps */
925 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG2
,
926 (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT
)|
927 (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT
)|
928 (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT
)|
929 (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT
));
934 hifn_next_signature(u_int32_t a
, u_int cnt
)
939 for (i
= 0; i
< cnt
; i
++) {
949 a
= (v
& 1) ^ (a
<< 1);
957 * Checks to see if crypto is already enabled. If crypto isn't enable,
958 * "hifn_enable_crypto" is called to enable it. The check is important,
959 * as enabling crypto twice will lock the board.
962 hifn_enable_crypto(struct hifn_softc
*sc
)
964 u_int32_t dmacfg
, ramcfg
, encl
, addr
, i
;
965 char offtbl
[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00 };
968 DPRINTF("%s()\n", __FUNCTION__
);
970 ramcfg
= READ_REG_0(sc
, HIFN_0_PUCNFG
);
971 dmacfg
= READ_REG_1(sc
, HIFN_1_DMA_CNFG
);
974 * The RAM config register's encrypt level bit needs to be set before
975 * every read performed on the encryption level register.
977 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, ramcfg
| HIFN_PUCNFG_CHIPID
);
979 encl
= READ_REG_0(sc
, HIFN_0_PUSTAT
) & HIFN_PUSTAT_CHIPENA
;
982 * Make sure we don't re-unlock. Two unlocks kills chip until the
985 if (encl
== HIFN_PUSTAT_ENA_1
|| encl
== HIFN_PUSTAT_ENA_2
) {
988 device_printf(sc
->sc_dev
,
989 "Strong crypto already enabled!\n");
994 if (encl
!= 0 && encl
!= HIFN_PUSTAT_ENA_0
) {
997 device_printf(sc
->sc_dev
,
998 "Unknown encryption level 0x%x\n", encl
);
1003 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_UNLOCK
|
1004 HIFN_DMACNFG_MSTRESET
| HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
1006 addr
= READ_REG_1(sc
, HIFN_UNLOCK_SECRET1
);
1008 WRITE_REG_1(sc
, HIFN_UNLOCK_SECRET2
, 0);
1011 for (i
= 0; i
<= 12; i
++) {
1012 addr
= hifn_next_signature(addr
, offtbl
[i
] + 0x101);
1013 WRITE_REG_1(sc
, HIFN_UNLOCK_SECRET2
, addr
);
1018 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, ramcfg
| HIFN_PUCNFG_CHIPID
);
1019 encl
= READ_REG_0(sc
, HIFN_0_PUSTAT
) & HIFN_PUSTAT_CHIPENA
;
1023 if (encl
!= HIFN_PUSTAT_ENA_1
&& encl
!= HIFN_PUSTAT_ENA_2
)
1024 device_printf(sc
->sc_dev
, "Engine is permanently "
1025 "locked until next system reset!\n");
1027 device_printf(sc
->sc_dev
, "Engine enabled "
1033 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, ramcfg
);
1034 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, dmacfg
);
1037 case HIFN_PUSTAT_ENA_1
:
1038 case HIFN_PUSTAT_ENA_2
:
1040 case HIFN_PUSTAT_ENA_0
:
1042 device_printf(sc
->sc_dev
, "disabled\n");
1050 * Give initial values to the registers listed in the "Register Space"
1051 * section of the HIFN Software Development reference manual.
1054 hifn_init_pci_registers(struct hifn_softc
*sc
)
1056 DPRINTF("%s()\n", __FUNCTION__
);
1058 /* write fixed values needed by the Initialization registers */
1059 WRITE_REG_0(sc
, HIFN_0_PUCTRL
, HIFN_PUCTRL_DMAENA
);
1060 WRITE_REG_0(sc
, HIFN_0_FIFOCNFG
, HIFN_FIFOCNFG_THRESHOLD
);
1061 WRITE_REG_0(sc
, HIFN_0_PUIER
, HIFN_PUIER_DSTOVER
);
1063 /* write all 4 ring address registers */
1064 WRITE_REG_1(sc
, HIFN_1_DMA_CRAR
, sc
->sc_dma_physaddr
+
1065 offsetof(struct hifn_dma
, cmdr
[0]));
1066 WRITE_REG_1(sc
, HIFN_1_DMA_SRAR
, sc
->sc_dma_physaddr
+
1067 offsetof(struct hifn_dma
, srcr
[0]));
1068 WRITE_REG_1(sc
, HIFN_1_DMA_DRAR
, sc
->sc_dma_physaddr
+
1069 offsetof(struct hifn_dma
, dstr
[0]));
1070 WRITE_REG_1(sc
, HIFN_1_DMA_RRAR
, sc
->sc_dma_physaddr
+
1071 offsetof(struct hifn_dma
, resr
[0]));
1075 /* write status register */
1076 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1077 HIFN_DMACSR_D_CTRL_DIS
| HIFN_DMACSR_R_CTRL_DIS
|
1078 HIFN_DMACSR_S_CTRL_DIS
| HIFN_DMACSR_C_CTRL_DIS
|
1079 HIFN_DMACSR_D_ABORT
| HIFN_DMACSR_D_DONE
| HIFN_DMACSR_D_LAST
|
1080 HIFN_DMACSR_D_WAIT
| HIFN_DMACSR_D_OVER
|
1081 HIFN_DMACSR_R_ABORT
| HIFN_DMACSR_R_DONE
| HIFN_DMACSR_R_LAST
|
1082 HIFN_DMACSR_R_WAIT
| HIFN_DMACSR_R_OVER
|
1083 HIFN_DMACSR_S_ABORT
| HIFN_DMACSR_S_DONE
| HIFN_DMACSR_S_LAST
|
1084 HIFN_DMACSR_S_WAIT
|
1085 HIFN_DMACSR_C_ABORT
| HIFN_DMACSR_C_DONE
| HIFN_DMACSR_C_LAST
|
1086 HIFN_DMACSR_C_WAIT
|
1087 HIFN_DMACSR_ENGINE
|
1088 ((sc
->sc_flags
& HIFN_HAS_PUBLIC
) ?
1089 HIFN_DMACSR_PUBDONE
: 0) |
1090 ((sc
->sc_flags
& HIFN_IS_7811
) ?
1091 HIFN_DMACSR_ILLW
| HIFN_DMACSR_ILLR
: 0));
1093 sc
->sc_d_busy
= sc
->sc_r_busy
= sc
->sc_s_busy
= sc
->sc_c_busy
= 0;
1094 sc
->sc_dmaier
|= HIFN_DMAIER_R_DONE
| HIFN_DMAIER_C_ABORT
|
1095 HIFN_DMAIER_D_OVER
| HIFN_DMAIER_R_OVER
|
1096 HIFN_DMAIER_S_ABORT
| HIFN_DMAIER_D_ABORT
| HIFN_DMAIER_R_ABORT
|
1097 ((sc
->sc_flags
& HIFN_IS_7811
) ?
1098 HIFN_DMAIER_ILLW
| HIFN_DMAIER_ILLR
: 0);
1099 sc
->sc_dmaier
&= ~HIFN_DMAIER_C_WAIT
;
1100 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, sc
->sc_dmaier
);
1103 if (sc
->sc_flags
& HIFN_IS_7956
) {
1106 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, HIFN_PUCNFG_COMPSING
|
1107 HIFN_PUCNFG_TCALLPHASES
|
1108 HIFN_PUCNFG_TCDRVTOTEM
| HIFN_PUCNFG_BUS32
);
1110 /* turn off the clocks and insure bypass is set */
1111 pll
= READ_REG_1(sc
, HIFN_1_PLL
);
1112 pll
= (pll
&~ (HIFN_PLL_PK_CLK_SEL
| HIFN_PLL_PE_CLK_SEL
))
1113 | HIFN_PLL_BP
| HIFN_PLL_MBSET
;
1114 WRITE_REG_1(sc
, HIFN_1_PLL
, pll
);
1115 DELAY(10*1000); /* 10ms */
1117 /* change configuration */
1118 pll
= (pll
&~ HIFN_PLL_CONFIG
) | sc
->sc_pllconfig
;
1119 WRITE_REG_1(sc
, HIFN_1_PLL
, pll
);
1120 DELAY(10*1000); /* 10ms */
1122 /* disable bypass */
1123 pll
&= ~HIFN_PLL_BP
;
1124 WRITE_REG_1(sc
, HIFN_1_PLL
, pll
);
1125 /* enable clocks with new configuration */
1126 pll
|= HIFN_PLL_PK_CLK_SEL
| HIFN_PLL_PE_CLK_SEL
;
1127 WRITE_REG_1(sc
, HIFN_1_PLL
, pll
);
1129 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, HIFN_PUCNFG_COMPSING
|
1130 HIFN_PUCNFG_DRFR_128
| HIFN_PUCNFG_TCALLPHASES
|
1131 HIFN_PUCNFG_TCDRVTOTEM
| HIFN_PUCNFG_BUS32
|
1132 (sc
->sc_drammodel
? HIFN_PUCNFG_DRAM
: HIFN_PUCNFG_SRAM
));
1135 WRITE_REG_0(sc
, HIFN_0_PUISR
, HIFN_PUISR_DSTOVER
);
1136 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
1137 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
| HIFN_DMACNFG_LAST
|
1138 ((HIFN_POLL_FREQUENCY
<< 16 ) & HIFN_DMACNFG_POLLFREQ
) |
1139 ((HIFN_POLL_SCALAR
<< 8) & HIFN_DMACNFG_POLLINVAL
));
1143 * The maximum number of sessions supported by the card
1144 * is dependent on the amount of context ram, which
1145 * encryption algorithms are enabled, and how compression
1146 * is configured. This should be configured before this
1147 * routine is called.
1150 hifn_sessions(struct hifn_softc
*sc
)
1155 DPRINTF("%s()\n", __FUNCTION__
);
1157 pucnfg
= READ_REG_0(sc
, HIFN_0_PUCNFG
);
1159 if (pucnfg
& HIFN_PUCNFG_COMPSING
) {
1160 if (pucnfg
& HIFN_PUCNFG_ENCCNFG
)
1165 * 7955/7956 has internal context memory of 32K
1167 if (sc
->sc_flags
& HIFN_IS_7956
)
1168 sc
->sc_maxses
= 32768 / ctxsize
;
1171 ((sc
->sc_ramsize
- 32768) / ctxsize
);
1173 sc
->sc_maxses
= sc
->sc_ramsize
/ 16384;
1175 if (sc
->sc_maxses
> 2048)
1176 sc
->sc_maxses
= 2048;
1180 * Determine ram type (sram or dram). Board should be just out of a reset
1181 * state when this is called.
1184 hifn_ramtype(struct hifn_softc
*sc
)
1186 u_int8_t data
[8], dataexpect
[8];
1189 for (i
= 0; i
< sizeof(data
); i
++)
1190 data
[i
] = dataexpect
[i
] = 0x55;
1191 if (hifn_writeramaddr(sc
, 0, data
))
1193 if (hifn_readramaddr(sc
, 0, data
))
1195 if (bcmp(data
, dataexpect
, sizeof(data
)) != 0) {
1196 sc
->sc_drammodel
= 1;
1200 for (i
= 0; i
< sizeof(data
); i
++)
1201 data
[i
] = dataexpect
[i
] = 0xaa;
1202 if (hifn_writeramaddr(sc
, 0, data
))
1204 if (hifn_readramaddr(sc
, 0, data
))
1206 if (bcmp(data
, dataexpect
, sizeof(data
)) != 0) {
1207 sc
->sc_drammodel
= 1;
1214 #define HIFN_SRAM_MAX (32 << 20)
1215 #define HIFN_SRAM_STEP_SIZE 16384
1216 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1219 hifn_sramsize(struct hifn_softc
*sc
)
1223 u_int8_t dataexpect
[sizeof(data
)];
1226 for (i
= 0; i
< sizeof(data
); i
++)
1227 data
[i
] = dataexpect
[i
] = i
^ 0x5a;
1229 for (i
= HIFN_SRAM_GRANULARITY
- 1; i
>= 0; i
--) {
1230 a
= i
* HIFN_SRAM_STEP_SIZE
;
1231 bcopy(&i
, data
, sizeof(i
));
1232 hifn_writeramaddr(sc
, a
, data
);
1235 for (i
= 0; i
< HIFN_SRAM_GRANULARITY
; i
++) {
1236 a
= i
* HIFN_SRAM_STEP_SIZE
;
1237 bcopy(&i
, dataexpect
, sizeof(i
));
1238 if (hifn_readramaddr(sc
, a
, data
) < 0)
1240 if (bcmp(data
, dataexpect
, sizeof(data
)) != 0)
1242 sc
->sc_ramsize
= a
+ HIFN_SRAM_STEP_SIZE
;
1249 * XXX For dram boards, one should really try all of the
1250 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1251 * is already set up correctly.
1254 hifn_dramsize(struct hifn_softc
*sc
)
1258 if (sc
->sc_flags
& HIFN_IS_7956
) {
1260 * 7955/7956 have a fixed internal ram of only 32K.
1262 sc
->sc_ramsize
= 32768;
1264 cnfg
= READ_REG_0(sc
, HIFN_0_PUCNFG
) &
1265 HIFN_PUCNFG_DRAMMASK
;
1266 sc
->sc_ramsize
= 1 << ((cnfg
>> 13) + 18);
1272 hifn_alloc_slot(struct hifn_softc
*sc
, int *cmdp
, int *srcp
, int *dstp
, int *resp
)
1274 struct hifn_dma
*dma
= sc
->sc_dma
;
1276 DPRINTF("%s()\n", __FUNCTION__
);
1278 if (dma
->cmdi
== HIFN_D_CMD_RSIZE
) {
1280 dma
->cmdr
[HIFN_D_CMD_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1282 dma
->cmdr
[HIFN_D_CMD_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1283 HIFN_CMDR_SYNC(sc
, HIFN_D_CMD_RSIZE
,
1284 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1286 *cmdp
= dma
->cmdi
++;
1287 dma
->cmdk
= dma
->cmdi
;
1289 if (dma
->srci
== HIFN_D_SRC_RSIZE
) {
1291 dma
->srcr
[HIFN_D_SRC_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1293 dma
->srcr
[HIFN_D_SRC_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1294 HIFN_SRCR_SYNC(sc
, HIFN_D_SRC_RSIZE
,
1295 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1297 *srcp
= dma
->srci
++;
1298 dma
->srck
= dma
->srci
;
1300 if (dma
->dsti
== HIFN_D_DST_RSIZE
) {
1302 dma
->dstr
[HIFN_D_DST_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1304 dma
->dstr
[HIFN_D_DST_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1305 HIFN_DSTR_SYNC(sc
, HIFN_D_DST_RSIZE
,
1306 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1308 *dstp
= dma
->dsti
++;
1309 dma
->dstk
= dma
->dsti
;
1311 if (dma
->resi
== HIFN_D_RES_RSIZE
) {
1313 dma
->resr
[HIFN_D_RES_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1315 dma
->resr
[HIFN_D_RES_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1316 HIFN_RESR_SYNC(sc
, HIFN_D_RES_RSIZE
,
1317 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1319 *resp
= dma
->resi
++;
1320 dma
->resk
= dma
->resi
;
1324 hifn_writeramaddr(struct hifn_softc
*sc
, int addr
, u_int8_t
*data
)
1326 struct hifn_dma
*dma
= sc
->sc_dma
;
1327 hifn_base_command_t wc
;
1328 const u_int32_t masks
= HIFN_D_VALID
| HIFN_D_LAST
| HIFN_D_MASKDONEIRQ
;
1329 int r
, cmdi
, resi
, srci
, dsti
;
1331 DPRINTF("%s()\n", __FUNCTION__
);
1333 wc
.masks
= htole16(3 << 13);
1334 wc
.session_num
= htole16(addr
>> 14);
1335 wc
.total_source_count
= htole16(8);
1336 wc
.total_dest_count
= htole16(addr
& 0x3fff);
1338 hifn_alloc_slot(sc
, &cmdi
, &srci
, &dsti
, &resi
);
1340 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1341 HIFN_DMACSR_C_CTRL_ENA
| HIFN_DMACSR_S_CTRL_ENA
|
1342 HIFN_DMACSR_D_CTRL_ENA
| HIFN_DMACSR_R_CTRL_ENA
);
1344 /* build write command */
1345 bzero(dma
->command_bufs
[cmdi
], HIFN_MAX_COMMAND
);
1346 *(hifn_base_command_t
*)dma
->command_bufs
[cmdi
] = wc
;
1347 bcopy(data
, &dma
->test_src
, sizeof(dma
->test_src
));
1349 dma
->srcr
[srci
].p
= htole32(sc
->sc_dma_physaddr
1350 + offsetof(struct hifn_dma
, test_src
));
1351 dma
->dstr
[dsti
].p
= htole32(sc
->sc_dma_physaddr
1352 + offsetof(struct hifn_dma
, test_dst
));
1354 dma
->cmdr
[cmdi
].l
= htole32(16 | masks
);
1355 dma
->srcr
[srci
].l
= htole32(8 | masks
);
1356 dma
->dstr
[dsti
].l
= htole32(4 | masks
);
1357 dma
->resr
[resi
].l
= htole32(4 | masks
);
1359 for (r
= 10000; r
>= 0; r
--) {
1361 if ((dma
->resr
[resi
].l
& htole32(HIFN_D_VALID
)) == 0)
1365 device_printf(sc
->sc_dev
, "writeramaddr -- "
1366 "result[%d](addr %d) still valid\n", resi
, addr
);
1372 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1373 HIFN_DMACSR_C_CTRL_DIS
| HIFN_DMACSR_S_CTRL_DIS
|
1374 HIFN_DMACSR_D_CTRL_DIS
| HIFN_DMACSR_R_CTRL_DIS
);
1380 hifn_readramaddr(struct hifn_softc
*sc
, int addr
, u_int8_t
*data
)
1382 struct hifn_dma
*dma
= sc
->sc_dma
;
1383 hifn_base_command_t rc
;
1384 const u_int32_t masks
= HIFN_D_VALID
| HIFN_D_LAST
| HIFN_D_MASKDONEIRQ
;
1385 int r
, cmdi
, srci
, dsti
, resi
;
1387 DPRINTF("%s()\n", __FUNCTION__
);
1389 rc
.masks
= htole16(2 << 13);
1390 rc
.session_num
= htole16(addr
>> 14);
1391 rc
.total_source_count
= htole16(addr
& 0x3fff);
1392 rc
.total_dest_count
= htole16(8);
1394 hifn_alloc_slot(sc
, &cmdi
, &srci
, &dsti
, &resi
);
1396 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1397 HIFN_DMACSR_C_CTRL_ENA
| HIFN_DMACSR_S_CTRL_ENA
|
1398 HIFN_DMACSR_D_CTRL_ENA
| HIFN_DMACSR_R_CTRL_ENA
);
1400 bzero(dma
->command_bufs
[cmdi
], HIFN_MAX_COMMAND
);
1401 *(hifn_base_command_t
*)dma
->command_bufs
[cmdi
] = rc
;
1403 dma
->srcr
[srci
].p
= htole32(sc
->sc_dma_physaddr
+
1404 offsetof(struct hifn_dma
, test_src
));
1406 dma
->dstr
[dsti
].p
= htole32(sc
->sc_dma_physaddr
+
1407 offsetof(struct hifn_dma
, test_dst
));
1409 dma
->cmdr
[cmdi
].l
= htole32(8 | masks
);
1410 dma
->srcr
[srci
].l
= htole32(8 | masks
);
1411 dma
->dstr
[dsti
].l
= htole32(8 | masks
);
1412 dma
->resr
[resi
].l
= htole32(HIFN_MAX_RESULT
| masks
);
1414 for (r
= 10000; r
>= 0; r
--) {
1416 if ((dma
->resr
[resi
].l
& htole32(HIFN_D_VALID
)) == 0)
1420 device_printf(sc
->sc_dev
, "readramaddr -- "
1421 "result[%d](addr %d) still valid\n", resi
, addr
);
1425 bcopy(&dma
->test_dst
, data
, sizeof(dma
->test_dst
));
1428 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1429 HIFN_DMACSR_C_CTRL_DIS
| HIFN_DMACSR_S_CTRL_DIS
|
1430 HIFN_DMACSR_D_CTRL_DIS
| HIFN_DMACSR_R_CTRL_DIS
);
1436 * Initialize the descriptor rings.
1439 hifn_init_dma(struct hifn_softc
*sc
)
1441 struct hifn_dma
*dma
= sc
->sc_dma
;
1444 DPRINTF("%s()\n", __FUNCTION__
);
1448 /* initialize static pointer values */
1449 for (i
= 0; i
< HIFN_D_CMD_RSIZE
; i
++)
1450 dma
->cmdr
[i
].p
= htole32(sc
->sc_dma_physaddr
+
1451 offsetof(struct hifn_dma
, command_bufs
[i
][0]));
1452 for (i
= 0; i
< HIFN_D_RES_RSIZE
; i
++)
1453 dma
->resr
[i
].p
= htole32(sc
->sc_dma_physaddr
+
1454 offsetof(struct hifn_dma
, result_bufs
[i
][0]));
1456 dma
->cmdr
[HIFN_D_CMD_RSIZE
].p
=
1457 htole32(sc
->sc_dma_physaddr
+ offsetof(struct hifn_dma
, cmdr
[0]));
1458 dma
->srcr
[HIFN_D_SRC_RSIZE
].p
=
1459 htole32(sc
->sc_dma_physaddr
+ offsetof(struct hifn_dma
, srcr
[0]));
1460 dma
->dstr
[HIFN_D_DST_RSIZE
].p
=
1461 htole32(sc
->sc_dma_physaddr
+ offsetof(struct hifn_dma
, dstr
[0]));
1462 dma
->resr
[HIFN_D_RES_RSIZE
].p
=
1463 htole32(sc
->sc_dma_physaddr
+ offsetof(struct hifn_dma
, resr
[0]));
1465 dma
->cmdu
= dma
->srcu
= dma
->dstu
= dma
->resu
= 0;
1466 dma
->cmdi
= dma
->srci
= dma
->dsti
= dma
->resi
= 0;
1467 dma
->cmdk
= dma
->srck
= dma
->dstk
= dma
->resk
= 0;
1471 * Writes out the raw command buffer space. Returns the
1472 * command buffer size.
1475 hifn_write_command(struct hifn_command
*cmd
, u_int8_t
*buf
)
1477 struct hifn_softc
*sc
= NULL
;
1479 hifn_base_command_t
*base_cmd
;
1480 hifn_mac_command_t
*mac_cmd
;
1481 hifn_crypt_command_t
*cry_cmd
;
1482 int using_mac
, using_crypt
, len
, ivlen
;
1483 u_int32_t dlen
, slen
;
1485 DPRINTF("%s()\n", __FUNCTION__
);
1488 using_mac
= cmd
->base_masks
& HIFN_BASE_CMD_MAC
;
1489 using_crypt
= cmd
->base_masks
& HIFN_BASE_CMD_CRYPT
;
1491 base_cmd
= (hifn_base_command_t
*)buf_pos
;
1492 base_cmd
->masks
= htole16(cmd
->base_masks
);
1493 slen
= cmd
->src_mapsize
;
1495 dlen
= cmd
->dst_mapsize
- cmd
->sloplen
+ sizeof(u_int32_t
);
1497 dlen
= cmd
->dst_mapsize
;
1498 base_cmd
->total_source_count
= htole16(slen
& HIFN_BASE_CMD_LENMASK_LO
);
1499 base_cmd
->total_dest_count
= htole16(dlen
& HIFN_BASE_CMD_LENMASK_LO
);
1502 base_cmd
->session_num
= htole16(
1503 ((slen
<< HIFN_BASE_CMD_SRCLEN_S
) & HIFN_BASE_CMD_SRCLEN_M
) |
1504 ((dlen
<< HIFN_BASE_CMD_DSTLEN_S
) & HIFN_BASE_CMD_DSTLEN_M
));
1505 buf_pos
+= sizeof(hifn_base_command_t
);
1508 mac_cmd
= (hifn_mac_command_t
*)buf_pos
;
1509 dlen
= cmd
->maccrd
->crd_len
;
1510 mac_cmd
->source_count
= htole16(dlen
& 0xffff);
1512 mac_cmd
->masks
= htole16(cmd
->mac_masks
|
1513 ((dlen
<< HIFN_MAC_CMD_SRCLEN_S
) & HIFN_MAC_CMD_SRCLEN_M
));
1514 mac_cmd
->header_skip
= htole16(cmd
->maccrd
->crd_skip
);
1515 mac_cmd
->reserved
= 0;
1516 buf_pos
+= sizeof(hifn_mac_command_t
);
1520 cry_cmd
= (hifn_crypt_command_t
*)buf_pos
;
1521 dlen
= cmd
->enccrd
->crd_len
;
1522 cry_cmd
->source_count
= htole16(dlen
& 0xffff);
1524 cry_cmd
->masks
= htole16(cmd
->cry_masks
|
1525 ((dlen
<< HIFN_CRYPT_CMD_SRCLEN_S
) & HIFN_CRYPT_CMD_SRCLEN_M
));
1526 cry_cmd
->header_skip
= htole16(cmd
->enccrd
->crd_skip
);
1527 cry_cmd
->reserved
= 0;
1528 buf_pos
+= sizeof(hifn_crypt_command_t
);
1531 if (using_mac
&& cmd
->mac_masks
& HIFN_MAC_CMD_NEW_KEY
) {
1532 bcopy(cmd
->mac
, buf_pos
, HIFN_MAC_KEY_LENGTH
);
1533 buf_pos
+= HIFN_MAC_KEY_LENGTH
;
1536 if (using_crypt
&& cmd
->cry_masks
& HIFN_CRYPT_CMD_NEW_KEY
) {
1537 switch (cmd
->cry_masks
& HIFN_CRYPT_CMD_ALG_MASK
) {
1538 case HIFN_CRYPT_CMD_ALG_3DES
:
1539 bcopy(cmd
->ck
, buf_pos
, HIFN_3DES_KEY_LENGTH
);
1540 buf_pos
+= HIFN_3DES_KEY_LENGTH
;
1542 case HIFN_CRYPT_CMD_ALG_DES
:
1543 bcopy(cmd
->ck
, buf_pos
, HIFN_DES_KEY_LENGTH
);
1544 buf_pos
+= HIFN_DES_KEY_LENGTH
;
1546 case HIFN_CRYPT_CMD_ALG_RC4
:
1551 clen
= MIN(cmd
->cklen
, len
);
1552 bcopy(cmd
->ck
, buf_pos
, clen
);
1559 case HIFN_CRYPT_CMD_ALG_AES
:
1561 * AES keys are variable 128, 192 and
1562 * 256 bits (16, 24 and 32 bytes).
1564 bcopy(cmd
->ck
, buf_pos
, cmd
->cklen
);
1565 buf_pos
+= cmd
->cklen
;
1570 if (using_crypt
&& cmd
->cry_masks
& HIFN_CRYPT_CMD_NEW_IV
) {
1571 switch (cmd
->cry_masks
& HIFN_CRYPT_CMD_ALG_MASK
) {
1572 case HIFN_CRYPT_CMD_ALG_AES
:
1573 ivlen
= HIFN_AES_IV_LENGTH
;
1576 ivlen
= HIFN_IV_LENGTH
;
1579 bcopy(cmd
->iv
, buf_pos
, ivlen
);
1583 if ((cmd
->base_masks
& (HIFN_BASE_CMD_MAC
|HIFN_BASE_CMD_CRYPT
)) == 0) {
1588 return (buf_pos
- buf
);
1592 hifn_dmamap_aligned(struct hifn_operand
*op
)
1594 struct hifn_softc
*sc
= NULL
;
1597 DPRINTF("%s()\n", __FUNCTION__
);
1599 for (i
= 0; i
< op
->nsegs
; i
++) {
1600 if (op
->segs
[i
].ds_addr
& 3)
1602 if ((i
!= (op
->nsegs
- 1)) && (op
->segs
[i
].ds_len
& 3))
1609 hifn_dmamap_dstwrap(struct hifn_softc
*sc
, int idx
)
1611 struct hifn_dma
*dma
= sc
->sc_dma
;
1613 if (++idx
== HIFN_D_DST_RSIZE
) {
1614 dma
->dstr
[idx
].l
= htole32(HIFN_D_VALID
| HIFN_D_JUMP
|
1615 HIFN_D_MASKDONEIRQ
);
1616 HIFN_DSTR_SYNC(sc
, idx
,
1617 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1624 hifn_dmamap_load_dst(struct hifn_softc
*sc
, struct hifn_command
*cmd
)
1626 struct hifn_dma
*dma
= sc
->sc_dma
;
1627 struct hifn_operand
*dst
= &cmd
->dst
;
1629 int idx
, used
= 0, i
;
1631 DPRINTF("%s()\n", __FUNCTION__
);
1634 for (i
= 0; i
< dst
->nsegs
- 1; i
++) {
1635 dma
->dstr
[idx
].p
= htole32(dst
->segs
[i
].ds_addr
);
1636 dma
->dstr
[idx
].l
= htole32(HIFN_D_MASKDONEIRQ
| dst
->segs
[i
].ds_len
);
1638 dma
->dstr
[idx
].l
|= htole32(HIFN_D_VALID
);
1639 HIFN_DSTR_SYNC(sc
, idx
,
1640 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1643 idx
= hifn_dmamap_dstwrap(sc
, idx
);
1646 if (cmd
->sloplen
== 0) {
1647 p
= dst
->segs
[i
].ds_addr
;
1648 l
= HIFN_D_MASKDONEIRQ
| HIFN_D_LAST
|
1649 dst
->segs
[i
].ds_len
;
1651 p
= sc
->sc_dma_physaddr
+
1652 offsetof(struct hifn_dma
, slop
[cmd
->slopidx
]);
1653 l
= HIFN_D_MASKDONEIRQ
| HIFN_D_LAST
|
1656 if ((dst
->segs
[i
].ds_len
- cmd
->sloplen
) != 0) {
1657 dma
->dstr
[idx
].p
= htole32(dst
->segs
[i
].ds_addr
);
1658 dma
->dstr
[idx
].l
= htole32(HIFN_D_MASKDONEIRQ
|
1659 (dst
->segs
[i
].ds_len
- cmd
->sloplen
));
1661 dma
->dstr
[idx
].l
|= htole32(HIFN_D_VALID
);
1662 HIFN_DSTR_SYNC(sc
, idx
,
1663 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1666 idx
= hifn_dmamap_dstwrap(sc
, idx
);
1669 dma
->dstr
[idx
].p
= htole32(p
);
1670 dma
->dstr
[idx
].l
= htole32(l
);
1672 dma
->dstr
[idx
].l
|= htole32(HIFN_D_VALID
);
1673 HIFN_DSTR_SYNC(sc
, idx
, BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1676 idx
= hifn_dmamap_dstwrap(sc
, idx
);
1684 hifn_dmamap_srcwrap(struct hifn_softc
*sc
, int idx
)
1686 struct hifn_dma
*dma
= sc
->sc_dma
;
1688 if (++idx
== HIFN_D_SRC_RSIZE
) {
1689 dma
->srcr
[idx
].l
= htole32(HIFN_D_VALID
|
1690 HIFN_D_JUMP
| HIFN_D_MASKDONEIRQ
);
1691 HIFN_SRCR_SYNC(sc
, HIFN_D_SRC_RSIZE
,
1692 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1699 hifn_dmamap_load_src(struct hifn_softc
*sc
, struct hifn_command
*cmd
)
1701 struct hifn_dma
*dma
= sc
->sc_dma
;
1702 struct hifn_operand
*src
= &cmd
->src
;
1706 DPRINTF("%s()\n", __FUNCTION__
);
1709 for (i
= 0; i
< src
->nsegs
; i
++) {
1710 if (i
== src
->nsegs
- 1)
1713 dma
->srcr
[idx
].p
= htole32(src
->segs
[i
].ds_addr
);
1714 dma
->srcr
[idx
].l
= htole32(src
->segs
[i
].ds_len
|
1715 HIFN_D_MASKDONEIRQ
| last
);
1717 dma
->srcr
[idx
].l
|= htole32(HIFN_D_VALID
);
1718 HIFN_SRCR_SYNC(sc
, idx
,
1719 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1721 idx
= hifn_dmamap_srcwrap(sc
, idx
);
1724 dma
->srcu
+= src
->nsegs
;
1731 struct hifn_softc
*sc
,
1732 struct hifn_command
*cmd
,
1733 struct cryptop
*crp
,
1736 struct hifn_dma
*dma
= sc
->sc_dma
;
1737 u_int32_t cmdlen
, csr
;
1738 int cmdi
, resi
, err
= 0;
1739 unsigned long l_flags
;
1741 DPRINTF("%s()\n", __FUNCTION__
);
1744 * need 1 cmd, and 1 res
1746 * NB: check this first since it's easy.
1749 if ((dma
->cmdu
+ 1) > HIFN_D_CMD_RSIZE
||
1750 (dma
->resu
+ 1) > HIFN_D_RES_RSIZE
) {
1753 device_printf(sc
->sc_dev
,
1754 "cmd/result exhaustion, cmdu %u resu %u\n",
1755 dma
->cmdu
, dma
->resu
);
1758 hifnstats
.hst_nomem_cr
++;
1759 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
1764 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
1765 if (pci_map_skb(sc
, &cmd
->src
, cmd
->src_skb
)) {
1766 hifnstats
.hst_nomem_load
++;
1770 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1771 if (pci_map_uio(sc
, &cmd
->src
, cmd
->src_io
)) {
1772 hifnstats
.hst_nomem_load
++;
1777 if (pci_map_buf(sc
, &cmd
->src
, cmd
->src_buf
, crp
->crp_ilen
)) {
1778 hifnstats
.hst_nomem_load
++;
1784 if (hifn_dmamap_aligned(&cmd
->src
)) {
1785 cmd
->sloplen
= cmd
->src_mapsize
& 3;
1786 cmd
->dst
= cmd
->src
;
1788 if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1789 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
1792 } else if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
1795 struct mbuf
*m
, *m0
, *mlast
;
1797 KASSERT(cmd
->dst_m
== cmd
->src_m
,
1798 ("hifn_crypto: dst_m initialized improperly"));
1799 hifnstats
.hst_unaligned
++;
1801 * Source is not aligned on a longword boundary.
1802 * Copy the data to insure alignment. If we fail
1803 * to allocate mbufs or clusters while doing this
1804 * we return ERESTART so the operation is requeued
1805 * at the crypto later, but only if there are
1806 * ops already posted to the hardware; otherwise we
1807 * have no guarantee that we'll be re-entered.
1809 totlen
= cmd
->src_mapsize
;
1810 if (cmd
->src_m
->m_flags
& M_PKTHDR
) {
1812 MGETHDR(m0
, M_DONTWAIT
, MT_DATA
);
1813 if (m0
&& !m_dup_pkthdr(m0
, cmd
->src_m
, M_DONTWAIT
)) {
1819 MGET(m0
, M_DONTWAIT
, MT_DATA
);
1822 hifnstats
.hst_nomem_mbuf
++;
1823 err
= dma
->cmdu
? ERESTART
: ENOMEM
;
1826 if (totlen
>= MINCLSIZE
) {
1827 MCLGET(m0
, M_DONTWAIT
);
1828 if ((m0
->m_flags
& M_EXT
) == 0) {
1829 hifnstats
.hst_nomem_mcl
++;
1830 err
= dma
->cmdu
? ERESTART
: ENOMEM
;
1837 m0
->m_pkthdr
.len
= m0
->m_len
= len
;
1840 while (totlen
> 0) {
1841 MGET(m
, M_DONTWAIT
, MT_DATA
);
1843 hifnstats
.hst_nomem_mbuf
++;
1844 err
= dma
->cmdu
? ERESTART
: ENOMEM
;
1849 if (totlen
>= MINCLSIZE
) {
1850 MCLGET(m
, M_DONTWAIT
);
1851 if ((m
->m_flags
& M_EXT
) == 0) {
1852 hifnstats
.hst_nomem_mcl
++;
1853 err
= dma
->cmdu
? ERESTART
: ENOMEM
;
1862 m0
->m_pkthdr
.len
+= len
;
1870 device_printf(sc
->sc_dev
,
1871 "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
1872 __FILE__
, __LINE__
);
1877 device_printf(sc
->sc_dev
,
1878 "%s,%d: unaligned contig buffers not implemented\n",
1879 __FILE__
, __LINE__
);
1885 if (cmd
->dst_map
== NULL
) {
1886 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
1887 if (pci_map_skb(sc
, &cmd
->dst
, cmd
->dst_skb
)) {
1888 hifnstats
.hst_nomem_map
++;
1892 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1893 if (pci_map_uio(sc
, &cmd
->dst
, cmd
->dst_io
)) {
1894 hifnstats
.hst_nomem_load
++;
1899 if (pci_map_buf(sc
, &cmd
->dst
, cmd
->dst_buf
, crp
->crp_ilen
)) {
1900 hifnstats
.hst_nomem_load
++;
1909 device_printf(sc
->sc_dev
,
1910 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1911 READ_REG_1(sc
, HIFN_1_DMA_CSR
),
1912 READ_REG_1(sc
, HIFN_1_DMA_IER
),
1913 dma
->cmdu
, dma
->srcu
, dma
->dstu
, dma
->resu
,
1914 cmd
->src_nsegs
, cmd
->dst_nsegs
);
1919 if (cmd
->src_map
== cmd
->dst_map
) {
1920 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
1921 BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_PREREAD
);
1923 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
1924 BUS_DMASYNC_PREWRITE
);
1925 bus_dmamap_sync(sc
->sc_dmat
, cmd
->dst_map
,
1926 BUS_DMASYNC_PREREAD
);
1931 * need N src, and N dst
1933 if ((dma
->srcu
+ cmd
->src_nsegs
) > HIFN_D_SRC_RSIZE
||
1934 (dma
->dstu
+ cmd
->dst_nsegs
+ 1) > HIFN_D_DST_RSIZE
) {
1937 device_printf(sc
->sc_dev
,
1938 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1939 dma
->srcu
, cmd
->src_nsegs
,
1940 dma
->dstu
, cmd
->dst_nsegs
);
1943 hifnstats
.hst_nomem_sd
++;
1948 if (dma
->cmdi
== HIFN_D_CMD_RSIZE
) {
1950 dma
->cmdr
[HIFN_D_CMD_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1952 dma
->cmdr
[HIFN_D_CMD_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1953 HIFN_CMDR_SYNC(sc
, HIFN_D_CMD_RSIZE
,
1954 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1957 cmdlen
= hifn_write_command(cmd
, dma
->command_bufs
[cmdi
]);
1958 HIFN_CMD_SYNC(sc
, cmdi
, BUS_DMASYNC_PREWRITE
);
1960 /* .p for command/result already set */
1961 dma
->cmdr
[cmdi
].l
= htole32(cmdlen
| HIFN_D_LAST
|
1962 HIFN_D_MASKDONEIRQ
);
1964 dma
->cmdr
[cmdi
].l
|= htole32(HIFN_D_VALID
);
1965 HIFN_CMDR_SYNC(sc
, cmdi
,
1966 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1970 * We don't worry about missing an interrupt (which a "command wait"
1971 * interrupt salvages us from), unless there is more than one command
1974 if (dma
->cmdu
> 1) {
1975 sc
->sc_dmaier
|= HIFN_DMAIER_C_WAIT
;
1976 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, sc
->sc_dmaier
);
1979 hifnstats
.hst_ipackets
++;
1980 hifnstats
.hst_ibytes
+= cmd
->src_mapsize
;
1982 hifn_dmamap_load_src(sc
, cmd
);
1985 * Unlike other descriptors, we don't mask done interrupt from
1986 * result descriptor.
1990 device_printf(sc
->sc_dev
, "load res\n");
1992 if (dma
->resi
== HIFN_D_RES_RSIZE
) {
1994 dma
->resr
[HIFN_D_RES_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1996 dma
->resr
[HIFN_D_RES_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1997 HIFN_RESR_SYNC(sc
, HIFN_D_RES_RSIZE
,
1998 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2001 KASSERT(dma
->hifn_commands
[resi
] == NULL
,
2002 ("hifn_crypto: command slot %u busy", resi
));
2003 dma
->hifn_commands
[resi
] = cmd
;
2004 HIFN_RES_SYNC(sc
, resi
, BUS_DMASYNC_PREREAD
);
2005 if ((hint
& CRYPTO_HINT_MORE
) && sc
->sc_curbatch
< hifn_maxbatch
) {
2006 dma
->resr
[resi
].l
= htole32(HIFN_MAX_RESULT
|
2007 HIFN_D_LAST
| HIFN_D_MASKDONEIRQ
);
2009 dma
->resr
[resi
].l
|= htole32(HIFN_D_VALID
);
2011 if (sc
->sc_curbatch
> hifnstats
.hst_maxbatch
)
2012 hifnstats
.hst_maxbatch
= sc
->sc_curbatch
;
2013 hifnstats
.hst_totbatch
++;
2015 dma
->resr
[resi
].l
= htole32(HIFN_MAX_RESULT
| HIFN_D_LAST
);
2017 dma
->resr
[resi
].l
|= htole32(HIFN_D_VALID
);
2018 sc
->sc_curbatch
= 0;
2020 HIFN_RESR_SYNC(sc
, resi
,
2021 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2025 cmd
->slopidx
= resi
;
2027 hifn_dmamap_load_dst(sc
, cmd
);
2030 if (sc
->sc_c_busy
== 0) {
2031 csr
|= HIFN_DMACSR_C_CTRL_ENA
;
2034 if (sc
->sc_s_busy
== 0) {
2035 csr
|= HIFN_DMACSR_S_CTRL_ENA
;
2038 if (sc
->sc_r_busy
== 0) {
2039 csr
|= HIFN_DMACSR_R_CTRL_ENA
;
2042 if (sc
->sc_d_busy
== 0) {
2043 csr
|= HIFN_DMACSR_D_CTRL_ENA
;
2047 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
, csr
);
2051 device_printf(sc
->sc_dev
, "command: stat %8x ier %8x\n",
2052 READ_REG_1(sc
, HIFN_1_DMA_CSR
),
2053 READ_REG_1(sc
, HIFN_1_DMA_IER
));
2059 KASSERT(err
== 0, ("hifn_crypto: success with error %u", err
));
2060 return (err
); /* success */
2063 if (cmd
->src_map
!= cmd
->dst_map
)
2064 pci_unmap_buf(sc
, &cmd
->dst
);
2067 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
2068 if (cmd
->src_skb
!= cmd
->dst_skb
)
2070 m_freem(cmd
->dst_m
);
2072 device_printf(sc
->sc_dev
,
2073 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2074 __FILE__
, __LINE__
);
2077 pci_unmap_buf(sc
, &cmd
->src
);
2084 hifn_tick(unsigned long arg
)
2086 struct hifn_softc
*sc
;
2087 unsigned long l_flags
;
2089 if (arg
>= HIFN_MAX_CHIPS
)
2091 sc
= hifn_chip_idx
[arg
];
2096 if (sc
->sc_active
== 0) {
2097 struct hifn_dma
*dma
= sc
->sc_dma
;
2100 if (dma
->cmdu
== 0 && sc
->sc_c_busy
) {
2102 r
|= HIFN_DMACSR_C_CTRL_DIS
;
2104 if (dma
->srcu
== 0 && sc
->sc_s_busy
) {
2106 r
|= HIFN_DMACSR_S_CTRL_DIS
;
2108 if (dma
->dstu
== 0 && sc
->sc_d_busy
) {
2110 r
|= HIFN_DMACSR_D_CTRL_DIS
;
2112 if (dma
->resu
== 0 && sc
->sc_r_busy
) {
2114 r
|= HIFN_DMACSR_R_CTRL_DIS
;
2117 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
, r
);
2121 mod_timer(&sc
->sc_tickto
, jiffies
+ HZ
);
2125 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
2126 hifn_intr(int irq
, void *arg
)
2128 hifn_intr(int irq
, void *arg
, struct pt_regs
*regs
)
2131 struct hifn_softc
*sc
= arg
;
2132 struct hifn_dma
*dma
;
2133 u_int32_t dmacsr
, restart
;
2135 unsigned long l_flags
;
2137 dmacsr
= READ_REG_1(sc
, HIFN_1_DMA_CSR
);
2139 /* Nothing in the DMA unit interrupted */
2140 if ((dmacsr
& sc
->sc_dmaier
) == 0)
2149 device_printf(sc
->sc_dev
,
2150 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2151 dmacsr
, READ_REG_1(sc
, HIFN_1_DMA_IER
), sc
->sc_dmaier
,
2152 dma
->cmdi
, dma
->srci
, dma
->dsti
, dma
->resi
,
2153 dma
->cmdk
, dma
->srck
, dma
->dstk
, dma
->resk
,
2154 dma
->cmdu
, dma
->srcu
, dma
->dstu
, dma
->resu
);
2158 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
, dmacsr
& sc
->sc_dmaier
);
2160 if ((sc
->sc_flags
& HIFN_HAS_PUBLIC
) &&
2161 (dmacsr
& HIFN_DMACSR_PUBDONE
))
2162 WRITE_REG_1(sc
, HIFN_1_PUB_STATUS
,
2163 READ_REG_1(sc
, HIFN_1_PUB_STATUS
) | HIFN_PUBSTS_DONE
);
2165 restart
= dmacsr
& (HIFN_DMACSR_D_OVER
| HIFN_DMACSR_R_OVER
);
2167 device_printf(sc
->sc_dev
, "overrun %x\n", dmacsr
);
2169 if (sc
->sc_flags
& HIFN_IS_7811
) {
2170 if (dmacsr
& HIFN_DMACSR_ILLR
)
2171 device_printf(sc
->sc_dev
, "illegal read\n");
2172 if (dmacsr
& HIFN_DMACSR_ILLW
)
2173 device_printf(sc
->sc_dev
, "illegal write\n");
2176 restart
= dmacsr
& (HIFN_DMACSR_C_ABORT
| HIFN_DMACSR_S_ABORT
|
2177 HIFN_DMACSR_D_ABORT
| HIFN_DMACSR_R_ABORT
);
2179 device_printf(sc
->sc_dev
, "abort, resetting.\n");
2180 hifnstats
.hst_abort
++;
2186 if ((dmacsr
& HIFN_DMACSR_C_WAIT
) && (dma
->cmdu
== 0)) {
2188 * If no slots to process and we receive a "waiting on
2189 * command" interrupt, we disable the "waiting on command"
2192 sc
->sc_dmaier
&= ~HIFN_DMAIER_C_WAIT
;
2193 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, sc
->sc_dmaier
);
2196 /* clear the rings */
2197 i
= dma
->resk
; u
= dma
->resu
;
2199 HIFN_RESR_SYNC(sc
, i
,
2200 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
2201 if (dma
->resr
[i
].l
& htole32(HIFN_D_VALID
)) {
2202 HIFN_RESR_SYNC(sc
, i
,
2203 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2207 if (i
!= HIFN_D_RES_RSIZE
) {
2208 struct hifn_command
*cmd
;
2209 u_int8_t
*macbuf
= NULL
;
2211 HIFN_RES_SYNC(sc
, i
, BUS_DMASYNC_POSTREAD
);
2212 cmd
= dma
->hifn_commands
[i
];
2213 KASSERT(cmd
!= NULL
,
2214 ("hifn_intr: null command slot %u", i
));
2215 dma
->hifn_commands
[i
] = NULL
;
2217 if (cmd
->base_masks
& HIFN_BASE_CMD_MAC
) {
2218 macbuf
= dma
->result_bufs
[i
];
2222 hifn_callback(sc
, cmd
, macbuf
);
2223 hifnstats
.hst_opackets
++;
2227 if (++i
== (HIFN_D_RES_RSIZE
+ 1))
2230 dma
->resk
= i
; dma
->resu
= u
;
2232 i
= dma
->srck
; u
= dma
->srcu
;
2234 if (i
== HIFN_D_SRC_RSIZE
)
2236 HIFN_SRCR_SYNC(sc
, i
,
2237 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
2238 if (dma
->srcr
[i
].l
& htole32(HIFN_D_VALID
)) {
2239 HIFN_SRCR_SYNC(sc
, i
,
2240 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2245 dma
->srck
= i
; dma
->srcu
= u
;
2247 i
= dma
->cmdk
; u
= dma
->cmdu
;
2249 HIFN_CMDR_SYNC(sc
, i
,
2250 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
2251 if (dma
->cmdr
[i
].l
& htole32(HIFN_D_VALID
)) {
2252 HIFN_CMDR_SYNC(sc
, i
,
2253 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2256 if (i
!= HIFN_D_CMD_RSIZE
) {
2258 HIFN_CMD_SYNC(sc
, i
, BUS_DMASYNC_POSTWRITE
);
2260 if (++i
== (HIFN_D_CMD_RSIZE
+ 1))
2263 dma
->cmdk
= i
; dma
->cmdu
= u
;
2267 if (sc
->sc_needwakeup
) { /* XXX check high watermark */
2268 int wakeup
= sc
->sc_needwakeup
& (CRYPTO_SYMQ
|CRYPTO_ASYMQ
);
2271 device_printf(sc
->sc_dev
,
2272 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2274 dma
->cmdu
, dma
->srcu
, dma
->dstu
, dma
->resu
);
2276 sc
->sc_needwakeup
&= ~wakeup
;
2277 crypto_unblock(sc
->sc_cid
, wakeup
);
2284 * Allocate a new 'session' and return an encoded session id. 'sidp'
2285 * contains our registration id, and should contain an encoded session
2286 * id on successful allocation.
2289 hifn_newsession(device_t dev
, u_int32_t
*sidp
, struct cryptoini
*cri
)
2291 struct hifn_softc
*sc
= device_get_softc(dev
);
2292 struct cryptoini
*c
;
2293 int mac
= 0, cry
= 0, sesn
;
2294 struct hifn_session
*ses
= NULL
;
2295 unsigned long l_flags
;
2297 DPRINTF("%s()\n", __FUNCTION__
);
2299 KASSERT(sc
!= NULL
, ("hifn_newsession: null softc"));
2300 if (sidp
== NULL
|| cri
== NULL
|| sc
== NULL
) {
2301 DPRINTF("%s,%d: %s - EINVAL\n", __FILE__
, __LINE__
, __FUNCTION__
);
2306 if (sc
->sc_sessions
== NULL
) {
2307 ses
= sc
->sc_sessions
= (struct hifn_session
*)kmalloc(sizeof(*ses
),
2314 sc
->sc_nsessions
= 1;
2316 for (sesn
= 0; sesn
< sc
->sc_nsessions
; sesn
++) {
2317 if (!sc
->sc_sessions
[sesn
].hs_used
) {
2318 ses
= &sc
->sc_sessions
[sesn
];
2324 sesn
= sc
->sc_nsessions
;
2325 ses
= (struct hifn_session
*)kmalloc((sesn
+ 1) * sizeof(*ses
),
2331 bcopy(sc
->sc_sessions
, ses
, sesn
* sizeof(*ses
));
2332 bzero(sc
->sc_sessions
, sesn
* sizeof(*ses
));
2333 kfree(sc
->sc_sessions
);
2334 sc
->sc_sessions
= ses
;
2335 ses
= &sc
->sc_sessions
[sesn
];
2341 bzero(ses
, sizeof(*ses
));
2344 for (c
= cri
; c
!= NULL
; c
= c
->cri_next
) {
2345 switch (c
->cri_alg
) {
2348 case CRYPTO_MD5_HMAC
:
2349 case CRYPTO_SHA1_HMAC
:
2351 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2355 ses
->hs_mlen
= c
->cri_mlen
;
2356 if (ses
->hs_mlen
== 0) {
2357 switch (c
->cri_alg
) {
2359 case CRYPTO_MD5_HMAC
:
2363 case CRYPTO_SHA1_HMAC
:
2369 case CRYPTO_DES_CBC
:
2370 case CRYPTO_3DES_CBC
:
2371 case CRYPTO_AES_CBC
:
2372 /* XXX this may read fewer, does it matter? */
2373 read_random(ses
->hs_iv
,
2374 c
->cri_alg
== CRYPTO_AES_CBC
?
2375 HIFN_AES_IV_LENGTH
: HIFN_IV_LENGTH
);
2379 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2385 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2389 if (mac
== 0 && cry
== 0) {
2390 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2394 *sidp
= HIFN_SID(device_get_unit(sc
->sc_dev
), sesn
);
2400 * Deallocate a session.
2401 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2402 * XXX to blow away any keys already stored there.
2405 hifn_freesession(device_t dev
, u_int64_t tid
)
2407 struct hifn_softc
*sc
= device_get_softc(dev
);
2409 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
2410 unsigned long l_flags
;
2412 DPRINTF("%s()\n", __FUNCTION__
);
2414 KASSERT(sc
!= NULL
, ("hifn_freesession: null softc"));
2416 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2421 session
= HIFN_SESSION(sid
);
2422 if (session
< sc
->sc_nsessions
) {
2423 bzero(&sc
->sc_sessions
[session
], sizeof(struct hifn_session
));
2426 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2435 hifn_process(device_t dev
, struct cryptop
*crp
, int hint
)
2437 struct hifn_softc
*sc
= device_get_softc(dev
);
2438 struct hifn_command
*cmd
= NULL
;
2439 int session
, err
, ivlen
;
2440 struct cryptodesc
*crd1
, *crd2
, *maccrd
, *enccrd
;
2442 DPRINTF("%s()\n", __FUNCTION__
);
2444 if (crp
== NULL
|| crp
->crp_callback
== NULL
) {
2445 hifnstats
.hst_invalid
++;
2446 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2449 session
= HIFN_SESSION(crp
->crp_sid
);
2451 if (sc
== NULL
|| session
>= sc
->sc_nsessions
) {
2452 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2457 cmd
= kmalloc(sizeof(struct hifn_command
), SLAB_ATOMIC
);
2459 hifnstats
.hst_nomem
++;
2463 memset(cmd
, 0, sizeof(*cmd
));
2465 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
2466 cmd
->src_skb
= (struct sk_buff
*)crp
->crp_buf
;
2467 cmd
->dst_skb
= (struct sk_buff
*)crp
->crp_buf
;
2468 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
2469 cmd
->src_io
= (struct uio
*)crp
->crp_buf
;
2470 cmd
->dst_io
= (struct uio
*)crp
->crp_buf
;
2472 cmd
->src_buf
= crp
->crp_buf
;
2473 cmd
->dst_buf
= crp
->crp_buf
;
2476 crd1
= crp
->crp_desc
;
2478 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2482 crd2
= crd1
->crd_next
;
2485 if (crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
2486 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
||
2487 crd1
->crd_alg
== CRYPTO_SHA1
||
2488 crd1
->crd_alg
== CRYPTO_MD5
) {
2491 } else if (crd1
->crd_alg
== CRYPTO_DES_CBC
||
2492 crd1
->crd_alg
== CRYPTO_3DES_CBC
||
2493 crd1
->crd_alg
== CRYPTO_AES_CBC
||
2494 crd1
->crd_alg
== CRYPTO_ARC4
) {
2495 if ((crd1
->crd_flags
& CRD_F_ENCRYPT
) == 0)
2496 cmd
->base_masks
|= HIFN_BASE_CMD_DECODE
;
2500 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2505 if ((crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
2506 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
||
2507 crd1
->crd_alg
== CRYPTO_MD5
||
2508 crd1
->crd_alg
== CRYPTO_SHA1
) &&
2509 (crd2
->crd_alg
== CRYPTO_DES_CBC
||
2510 crd2
->crd_alg
== CRYPTO_3DES_CBC
||
2511 crd2
->crd_alg
== CRYPTO_AES_CBC
||
2512 crd2
->crd_alg
== CRYPTO_ARC4
) &&
2513 ((crd2
->crd_flags
& CRD_F_ENCRYPT
) == 0)) {
2514 cmd
->base_masks
= HIFN_BASE_CMD_DECODE
;
2517 } else if ((crd1
->crd_alg
== CRYPTO_DES_CBC
||
2518 crd1
->crd_alg
== CRYPTO_ARC4
||
2519 crd1
->crd_alg
== CRYPTO_3DES_CBC
||
2520 crd1
->crd_alg
== CRYPTO_AES_CBC
) &&
2521 (crd2
->crd_alg
== CRYPTO_MD5_HMAC
||
2522 crd2
->crd_alg
== CRYPTO_SHA1_HMAC
||
2523 crd2
->crd_alg
== CRYPTO_MD5
||
2524 crd2
->crd_alg
== CRYPTO_SHA1
) &&
2525 (crd1
->crd_flags
& CRD_F_ENCRYPT
)) {
2530 * We cannot order the 7751 as requested
2532 DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
, crd1
->crd_alg
, crd2
->crd_alg
, crd1
->crd_flags
& CRD_F_ENCRYPT
);
2539 cmd
->enccrd
= enccrd
;
2540 cmd
->base_masks
|= HIFN_BASE_CMD_CRYPT
;
2541 switch (enccrd
->crd_alg
) {
2543 cmd
->cry_masks
|= HIFN_CRYPT_CMD_ALG_RC4
;
2545 case CRYPTO_DES_CBC
:
2546 cmd
->cry_masks
|= HIFN_CRYPT_CMD_ALG_DES
|
2547 HIFN_CRYPT_CMD_MODE_CBC
|
2548 HIFN_CRYPT_CMD_NEW_IV
;
2550 case CRYPTO_3DES_CBC
:
2551 cmd
->cry_masks
|= HIFN_CRYPT_CMD_ALG_3DES
|
2552 HIFN_CRYPT_CMD_MODE_CBC
|
2553 HIFN_CRYPT_CMD_NEW_IV
;
2555 case CRYPTO_AES_CBC
:
2556 cmd
->cry_masks
|= HIFN_CRYPT_CMD_ALG_AES
|
2557 HIFN_CRYPT_CMD_MODE_CBC
|
2558 HIFN_CRYPT_CMD_NEW_IV
;
2561 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2565 if (enccrd
->crd_alg
!= CRYPTO_ARC4
) {
2566 ivlen
= ((enccrd
->crd_alg
== CRYPTO_AES_CBC
) ?
2567 HIFN_AES_IV_LENGTH
: HIFN_IV_LENGTH
);
2568 if (enccrd
->crd_flags
& CRD_F_ENCRYPT
) {
2569 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
2570 bcopy(enccrd
->crd_iv
, cmd
->iv
, ivlen
);
2572 bcopy(sc
->sc_sessions
[session
].hs_iv
,
2575 if ((enccrd
->crd_flags
& CRD_F_IV_PRESENT
)
2577 crypto_copyback(crp
->crp_flags
,
2578 crp
->crp_buf
, enccrd
->crd_inject
,
2582 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
2583 bcopy(enccrd
->crd_iv
, cmd
->iv
, ivlen
);
2585 crypto_copydata(crp
->crp_flags
,
2586 crp
->crp_buf
, enccrd
->crd_inject
,
2592 if (enccrd
->crd_flags
& CRD_F_KEY_EXPLICIT
)
2593 cmd
->cry_masks
|= HIFN_CRYPT_CMD_NEW_KEY
;
2594 cmd
->ck
= enccrd
->crd_key
;
2595 cmd
->cklen
= enccrd
->crd_klen
>> 3;
2596 cmd
->cry_masks
|= HIFN_CRYPT_CMD_NEW_KEY
;
2599 * Need to specify the size for the AES key in the masks.
2601 if ((cmd
->cry_masks
& HIFN_CRYPT_CMD_ALG_MASK
) ==
2602 HIFN_CRYPT_CMD_ALG_AES
) {
2603 switch (cmd
->cklen
) {
2605 cmd
->cry_masks
|= HIFN_CRYPT_CMD_KSZ_128
;
2608 cmd
->cry_masks
|= HIFN_CRYPT_CMD_KSZ_192
;
2611 cmd
->cry_masks
|= HIFN_CRYPT_CMD_KSZ_256
;
2614 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2622 cmd
->maccrd
= maccrd
;
2623 cmd
->base_masks
|= HIFN_BASE_CMD_MAC
;
2625 switch (maccrd
->crd_alg
) {
2627 cmd
->mac_masks
|= HIFN_MAC_CMD_ALG_MD5
|
2628 HIFN_MAC_CMD_RESULT
| HIFN_MAC_CMD_MODE_HASH
|
2629 HIFN_MAC_CMD_POS_IPSEC
;
2631 case CRYPTO_MD5_HMAC
:
2632 cmd
->mac_masks
|= HIFN_MAC_CMD_ALG_MD5
|
2633 HIFN_MAC_CMD_RESULT
| HIFN_MAC_CMD_MODE_HMAC
|
2634 HIFN_MAC_CMD_POS_IPSEC
| HIFN_MAC_CMD_TRUNC
;
2637 cmd
->mac_masks
|= HIFN_MAC_CMD_ALG_SHA1
|
2638 HIFN_MAC_CMD_RESULT
| HIFN_MAC_CMD_MODE_HASH
|
2639 HIFN_MAC_CMD_POS_IPSEC
;
2641 case CRYPTO_SHA1_HMAC
:
2642 cmd
->mac_masks
|= HIFN_MAC_CMD_ALG_SHA1
|
2643 HIFN_MAC_CMD_RESULT
| HIFN_MAC_CMD_MODE_HMAC
|
2644 HIFN_MAC_CMD_POS_IPSEC
| HIFN_MAC_CMD_TRUNC
;
2648 if (maccrd
->crd_alg
== CRYPTO_SHA1_HMAC
||
2649 maccrd
->crd_alg
== CRYPTO_MD5_HMAC
) {
2650 cmd
->mac_masks
|= HIFN_MAC_CMD_NEW_KEY
;
2651 bcopy(maccrd
->crd_key
, cmd
->mac
, maccrd
->crd_klen
>> 3);
2652 bzero(cmd
->mac
+ (maccrd
->crd_klen
>> 3),
2653 HIFN_MAC_KEY_LENGTH
- (maccrd
->crd_klen
>> 3));
2658 cmd
->session_num
= session
;
2661 err
= hifn_crypto(sc
, cmd
, crp
, hint
);
2664 } else if (err
== ERESTART
) {
2666 * There weren't enough resources to dispatch the request
2667 * to the part. Notify the caller so they'll requeue this
2668 * request and resubmit it again soon.
2672 device_printf(sc
->sc_dev
, "requeue request\n");
2675 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
2683 hifnstats
.hst_invalid
++;
2685 hifnstats
.hst_nomem
++;
2686 crp
->crp_etype
= err
;
2692 hifn_abort(struct hifn_softc
*sc
)
2694 struct hifn_dma
*dma
= sc
->sc_dma
;
2695 struct hifn_command
*cmd
;
2696 struct cryptop
*crp
;
2699 DPRINTF("%s()\n", __FUNCTION__
);
2701 i
= dma
->resk
; u
= dma
->resu
;
2703 cmd
= dma
->hifn_commands
[i
];
2704 KASSERT(cmd
!= NULL
, ("hifn_abort: null command slot %u", i
));
2705 dma
->hifn_commands
[i
] = NULL
;
2708 if ((dma
->resr
[i
].l
& htole32(HIFN_D_VALID
)) == 0) {
2709 /* Salvage what we can. */
2712 if (cmd
->base_masks
& HIFN_BASE_CMD_MAC
) {
2713 macbuf
= dma
->result_bufs
[i
];
2717 hifnstats
.hst_opackets
++;
2718 hifn_callback(sc
, cmd
, macbuf
);
2721 if (cmd
->src_map
== cmd
->dst_map
) {
2722 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
2723 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
2725 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
2726 BUS_DMASYNC_POSTWRITE
);
2727 bus_dmamap_sync(sc
->sc_dmat
, cmd
->dst_map
,
2728 BUS_DMASYNC_POSTREAD
);
2732 if (cmd
->src_skb
!= cmd
->dst_skb
) {
2734 m_freem(cmd
->src_m
);
2735 crp
->crp_buf
= (caddr_t
)cmd
->dst_m
;
2737 device_printf(sc
->sc_dev
,
2738 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2739 __FILE__
, __LINE__
);
2743 /* non-shared buffers cannot be restarted */
2744 if (cmd
->src_map
!= cmd
->dst_map
) {
2746 * XXX should be EAGAIN, delayed until
2749 crp
->crp_etype
= ENOMEM
;
2750 pci_unmap_buf(sc
, &cmd
->dst
);
2752 crp
->crp_etype
= ENOMEM
;
2754 pci_unmap_buf(sc
, &cmd
->src
);
2757 if (crp
->crp_etype
!= EAGAIN
)
2761 if (++i
== HIFN_D_RES_RSIZE
)
2765 dma
->resk
= i
; dma
->resu
= u
;
2767 hifn_reset_board(sc
, 1);
2769 hifn_init_pci_registers(sc
);
2773 hifn_callback(struct hifn_softc
*sc
, struct hifn_command
*cmd
, u_int8_t
*macbuf
)
2775 struct hifn_dma
*dma
= sc
->sc_dma
;
2776 struct cryptop
*crp
= cmd
->crp
;
2777 struct cryptodesc
*crd
;
2780 DPRINTF("%s()\n", __FUNCTION__
);
2783 if (cmd
->src_map
== cmd
->dst_map
) {
2784 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
2785 BUS_DMASYNC_POSTWRITE
| BUS_DMASYNC_POSTREAD
);
2787 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
2788 BUS_DMASYNC_POSTWRITE
);
2789 bus_dmamap_sync(sc
->sc_dmat
, cmd
->dst_map
,
2790 BUS_DMASYNC_POSTREAD
);
2794 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
2795 if (cmd
->src_skb
!= cmd
->dst_skb
) {
2797 crp
->crp_buf
= (caddr_t
)cmd
->dst_m
;
2798 totlen
= cmd
->src_mapsize
;
2799 for (m
= cmd
->dst_m
; m
!= NULL
; m
= m
->m_next
) {
2800 if (totlen
< m
->m_len
) {
2806 cmd
->dst_m
->m_pkthdr
.len
= cmd
->src_m
->m_pkthdr
.len
;
2807 m_freem(cmd
->src_m
);
2809 device_printf(sc
->sc_dev
,
2810 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2811 __FILE__
, __LINE__
);
2816 if (cmd
->sloplen
!= 0) {
2817 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
2818 cmd
->src_mapsize
- cmd
->sloplen
, cmd
->sloplen
,
2819 (caddr_t
)&dma
->slop
[cmd
->slopidx
]);
2822 i
= dma
->dstk
; u
= dma
->dstu
;
2824 if (i
== HIFN_D_DST_RSIZE
)
2827 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
,
2828 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
2830 if (dma
->dstr
[i
].l
& htole32(HIFN_D_VALID
)) {
2832 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
,
2833 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2839 dma
->dstk
= i
; dma
->dstu
= u
;
2841 hifnstats
.hst_obytes
+= cmd
->dst_mapsize
;
2843 if ((cmd
->base_masks
& (HIFN_BASE_CMD_CRYPT
| HIFN_BASE_CMD_DECODE
)) ==
2844 HIFN_BASE_CMD_CRYPT
) {
2845 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
2846 if (crd
->crd_alg
!= CRYPTO_DES_CBC
&&
2847 crd
->crd_alg
!= CRYPTO_3DES_CBC
&&
2848 crd
->crd_alg
!= CRYPTO_AES_CBC
)
2850 ivlen
= ((crd
->crd_alg
== CRYPTO_AES_CBC
) ?
2851 HIFN_AES_IV_LENGTH
: HIFN_IV_LENGTH
);
2852 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
2853 crd
->crd_skip
+ crd
->crd_len
- ivlen
, ivlen
,
2854 cmd
->softc
->sc_sessions
[cmd
->session_num
].hs_iv
);
2859 if (macbuf
!= NULL
) {
2860 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
2863 if (crd
->crd_alg
!= CRYPTO_MD5
&&
2864 crd
->crd_alg
!= CRYPTO_SHA1
&&
2865 crd
->crd_alg
!= CRYPTO_MD5_HMAC
&&
2866 crd
->crd_alg
!= CRYPTO_SHA1_HMAC
) {
2869 len
= cmd
->softc
->sc_sessions
[cmd
->session_num
].hs_mlen
;
2870 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
2871 crd
->crd_inject
, len
, macbuf
);
2876 if (cmd
->src_map
!= cmd
->dst_map
)
2877 pci_unmap_buf(sc
, &cmd
->dst
);
2878 pci_unmap_buf(sc
, &cmd
->src
);
2884 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2885 * and Group 1 registers; avoid conditions that could create
2886 * burst writes by doing a read in between the writes.
2888 * NB: The read we interpose is always to the same register;
2889 * we do this because reading from an arbitrary (e.g. last)
2890 * register may not always work.
2893 hifn_write_reg_0(struct hifn_softc
*sc
, bus_size_t reg
, u_int32_t val
)
2895 if (sc
->sc_flags
& HIFN_IS_7811
) {
2896 if (sc
->sc_bar0_lastreg
== reg
- 4)
2897 readl(sc
->sc_bar0
+ HIFN_0_PUCNFG
);
2898 sc
->sc_bar0_lastreg
= reg
;
2900 writel(val
, sc
->sc_bar0
+ reg
);
2904 hifn_write_reg_1(struct hifn_softc
*sc
, bus_size_t reg
, u_int32_t val
)
2906 if (sc
->sc_flags
& HIFN_IS_7811
) {
2907 if (sc
->sc_bar1_lastreg
== reg
- 4)
2908 readl(sc
->sc_bar1
+ HIFN_1_REVID
);
2909 sc
->sc_bar1_lastreg
= reg
;
2911 writel(val
, sc
->sc_bar1
+ reg
);
2915 static struct pci_device_id hifn_pci_tbl
[] = {
2916 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7951
,
2917 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2918 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7955
,
2919 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2920 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7956
,
2921 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2922 { PCI_VENDOR_NETSEC
, PCI_PRODUCT_NETSEC_7751
,
2923 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2924 { PCI_VENDOR_INVERTEX
, PCI_PRODUCT_INVERTEX_AEON
,
2925 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2926 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7811
,
2927 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2929 * Other vendors share this PCI ID as well, such as
2930 * http://www.powercrypt.com, and obviously they also
2933 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7751
,
2934 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2935 { 0, 0, 0, 0, 0, 0, }
2937 MODULE_DEVICE_TABLE(pci
, hifn_pci_tbl
);
2939 static struct pci_driver hifn_driver
= {
2941 .id_table
= hifn_pci_tbl
,
2942 .probe
= hifn_probe
,
2943 .remove
= hifn_remove
,
2944 /* add PM stuff here one day */
2947 static int __init
hifn_init (void)
2949 struct hifn_softc
*sc
= NULL
;
2952 DPRINTF("%s(%p)\n", __FUNCTION__
, hifn_init
);
2954 rc
= pci_register_driver(&hifn_driver
);
2955 pci_register_driver_compat(&hifn_driver
, rc
);
2960 static void __exit
hifn_exit (void)
2962 pci_unregister_driver(&hifn_driver
);
2965 module_init(hifn_init
);
2966 module_exit(hifn_exit
);
2968 MODULE_LICENSE("BSD");
2969 MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
2970 MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");