1 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
4 * Invertex AEON / Hifn 7751 driver
5 * Copyright (c) 1999 Invertex Inc. All rights reserved.
6 * Copyright (c) 1999 Theo de Raadt
7 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8 * http://www.netsec.net
9 * Copyright (c) 2003 Hifn Inc.
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested: Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43 __FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
47 * Driver for various Hifn encryption processors.
49 #include <linux/version.h>
50 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
51 #include <generated/autoconf.h>
53 #include <linux/autoconf.h>
55 #include <linux/module.h>
56 #include <linux/init.h>
57 #include <linux/list.h>
58 #include <linux/slab.h>
59 #include <linux/wait.h>
60 #include <linux/sched.h>
61 #include <linux/pci.h>
62 #include <linux/delay.h>
63 #include <linux/interrupt.h>
64 #include <linux/spinlock.h>
65 #include <linux/random.h>
66 #include <linux/version.h>
67 #include <linux/skbuff.h>
70 #include <cryptodev.h>
72 #include <hifn/hifn7751reg.h>
73 #include <hifn/hifn7751var.h>
76 #define DPRINTF(a...) if (hifn_debug) { \
78 device_get_nameunit(sc->sc_dev) : "hifn"); \
86 pci_get_revid(struct pci_dev
*dev
)
89 pci_read_config_byte(dev
, PCI_REVISION_ID
, &rid
);
93 static struct hifn_stats hifnstats
;
95 #define debug hifn_debug
97 module_param(hifn_debug
, int, 0644);
98 MODULE_PARM_DESC(hifn_debug
, "Enable debug");
100 int hifn_maxbatch
= 1;
101 module_param(hifn_maxbatch
, int, 0644);
102 MODULE_PARM_DESC(hifn_maxbatch
, "max ops to batch w/o interrupt");
104 int hifn_cache_linesize
= 0x10;
105 module_param(hifn_cache_linesize
, int, 0444);
106 MODULE_PARM_DESC(hifn_cache_linesize
, "PCI config cache line size");
109 char *hifn_pllconfig
= NULL
;
110 MODULE_PARM(hifn_pllconfig
, "s");
112 char hifn_pllconfig
[32]; /* This setting is RO after loading */
113 module_param_string(hifn_pllconfig
, hifn_pllconfig
, 32, 0444);
115 MODULE_PARM_DESC(hifn_pllconfig
, "PLL config, ie., pci66, ext33, ...");
117 #ifdef HIFN_VULCANDEV
118 #include <sys/conf.h>
121 static struct cdevsw vulcanpk_cdevsw
; /* forward declaration */
125 * Prototypes and count for the pci_device structure
127 static int hifn_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
);
128 static void hifn_remove(struct pci_dev
*dev
);
130 static int hifn_newsession(device_t
, u_int32_t
*, struct cryptoini
*);
131 static int hifn_freesession(device_t
, u_int64_t
);
132 static int hifn_process(device_t
, struct cryptop
*, int);
134 static device_method_t hifn_methods
= {
135 /* crypto device methods */
136 DEVMETHOD(cryptodev_newsession
, hifn_newsession
),
137 DEVMETHOD(cryptodev_freesession
,hifn_freesession
),
138 DEVMETHOD(cryptodev_process
, hifn_process
),
141 static void hifn_reset_board(struct hifn_softc
*, int);
142 static void hifn_reset_puc(struct hifn_softc
*);
143 static void hifn_puc_wait(struct hifn_softc
*);
144 static int hifn_enable_crypto(struct hifn_softc
*);
145 static void hifn_set_retry(struct hifn_softc
*sc
);
146 static void hifn_init_dma(struct hifn_softc
*);
147 static void hifn_init_pci_registers(struct hifn_softc
*);
148 static int hifn_sramsize(struct hifn_softc
*);
149 static int hifn_dramsize(struct hifn_softc
*);
150 static int hifn_ramtype(struct hifn_softc
*);
151 static void hifn_sessions(struct hifn_softc
*);
152 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
153 static irqreturn_t
hifn_intr(int irq
, void *arg
);
155 static irqreturn_t
hifn_intr(int irq
, void *arg
, struct pt_regs
*regs
);
157 static u_int
hifn_write_command(struct hifn_command
*, u_int8_t
*);
158 static u_int32_t
hifn_next_signature(u_int32_t a
, u_int cnt
);
159 static void hifn_callback(struct hifn_softc
*, struct hifn_command
*, u_int8_t
*);
160 static int hifn_crypto(struct hifn_softc
*, struct hifn_command
*, struct cryptop
*, int);
161 static int hifn_readramaddr(struct hifn_softc
*, int, u_int8_t
*);
162 static int hifn_writeramaddr(struct hifn_softc
*, int, u_int8_t
*);
163 static int hifn_dmamap_load_src(struct hifn_softc
*, struct hifn_command
*);
164 static int hifn_dmamap_load_dst(struct hifn_softc
*, struct hifn_command
*);
165 static int hifn_init_pubrng(struct hifn_softc
*);
166 static void hifn_tick(unsigned long arg
);
167 static void hifn_abort(struct hifn_softc
*);
168 static void hifn_alloc_slot(struct hifn_softc
*, int *, int *, int *, int *);
170 static void hifn_write_reg_0(struct hifn_softc
*, bus_size_t
, u_int32_t
);
171 static void hifn_write_reg_1(struct hifn_softc
*, bus_size_t
, u_int32_t
);
173 #ifdef CONFIG_OCF_RANDOMHARVEST
174 static int hifn_read_random(void *arg
, u_int32_t
*buf
, int len
);
177 #define HIFN_MAX_CHIPS 8
178 static struct hifn_softc
*hifn_chip_idx
[HIFN_MAX_CHIPS
];
180 static __inline u_int32_t
181 READ_REG_0(struct hifn_softc
*sc
, bus_size_t reg
)
183 u_int32_t v
= readl(sc
->sc_bar0
+ reg
);
184 sc
->sc_bar0_lastreg
= (bus_size_t
) -1;
187 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
189 static __inline u_int32_t
190 READ_REG_1(struct hifn_softc
*sc
, bus_size_t reg
)
192 u_int32_t v
= readl(sc
->sc_bar1
+ reg
);
193 sc
->sc_bar1_lastreg
= (bus_size_t
) -1;
196 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
199 * map in a given buffer (great on some arches :-)
203 pci_map_uio(struct hifn_softc
*sc
, struct hifn_operand
*buf
, struct uio
*uio
)
205 struct iovec
*iov
= uio
->uio_iov
;
207 DPRINTF("%s()\n", __FUNCTION__
);
210 for (buf
->nsegs
= 0; buf
->nsegs
< uio
->uio_iovcnt
; ) {
211 buf
->segs
[buf
->nsegs
].ds_addr
= pci_map_single(sc
->sc_pcidev
,
212 iov
->iov_base
, iov
->iov_len
,
213 PCI_DMA_BIDIRECTIONAL
);
214 buf
->segs
[buf
->nsegs
].ds_len
= iov
->iov_len
;
215 buf
->mapsize
+= iov
->iov_len
;
219 /* identify this buffer by the first segment */
220 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
225 * map in a given sk_buff
229 pci_map_skb(struct hifn_softc
*sc
,struct hifn_operand
*buf
,struct sk_buff
*skb
)
233 DPRINTF("%s()\n", __FUNCTION__
);
237 buf
->segs
[0].ds_addr
= pci_map_single(sc
->sc_pcidev
,
238 skb
->data
, skb_headlen(skb
), PCI_DMA_BIDIRECTIONAL
);
239 buf
->segs
[0].ds_len
= skb_headlen(skb
);
240 buf
->mapsize
+= buf
->segs
[0].ds_len
;
244 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ) {
245 buf
->segs
[buf
->nsegs
].ds_len
= skb_shinfo(skb
)->frags
[i
].size
;
246 buf
->segs
[buf
->nsegs
].ds_addr
= pci_map_single(sc
->sc_pcidev
,
247 page_address(skb_shinfo(skb
)->frags
[i
].page
) +
248 skb_shinfo(skb
)->frags
[i
].page_offset
,
249 buf
->segs
[buf
->nsegs
].ds_len
, PCI_DMA_BIDIRECTIONAL
);
250 buf
->mapsize
+= buf
->segs
[buf
->nsegs
].ds_len
;
254 /* identify this buffer by the first segment */
255 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
260 * map in a given contiguous buffer
264 pci_map_buf(struct hifn_softc
*sc
,struct hifn_operand
*buf
, void *b
, int len
)
266 DPRINTF("%s()\n", __FUNCTION__
);
269 buf
->segs
[0].ds_addr
= pci_map_single(sc
->sc_pcidev
,
270 b
, len
, PCI_DMA_BIDIRECTIONAL
);
271 buf
->segs
[0].ds_len
= len
;
272 buf
->mapsize
+= buf
->segs
[0].ds_len
;
275 /* identify this buffer by the first segment */
276 buf
->map
= (void *) buf
->segs
[0].ds_addr
;
280 #if 0 /* not needed at this time */
282 pci_sync_iov(struct hifn_softc
*sc
, struct hifn_operand
*buf
)
286 DPRINTF("%s()\n", __FUNCTION__
);
287 for (i
= 0; i
< buf
->nsegs
; i
++)
288 pci_dma_sync_single_for_cpu(sc
->sc_pcidev
, buf
->segs
[i
].ds_addr
,
289 buf
->segs
[i
].ds_len
, PCI_DMA_BIDIRECTIONAL
);
294 pci_unmap_buf(struct hifn_softc
*sc
, struct hifn_operand
*buf
)
297 DPRINTF("%s()\n", __FUNCTION__
);
298 for (i
= 0; i
< buf
->nsegs
; i
++) {
299 pci_unmap_single(sc
->sc_pcidev
, buf
->segs
[i
].ds_addr
,
300 buf
->segs
[i
].ds_len
, PCI_DMA_BIDIRECTIONAL
);
301 buf
->segs
[i
].ds_addr
= 0;
302 buf
->segs
[i
].ds_len
= 0;
310 hifn_partname(struct hifn_softc
*sc
)
312 /* XXX sprintf numbers when not decoded */
313 switch (pci_get_vendor(sc
->sc_pcidev
)) {
314 case PCI_VENDOR_HIFN
:
315 switch (pci_get_device(sc
->sc_pcidev
)) {
316 case PCI_PRODUCT_HIFN_6500
: return "Hifn 6500";
317 case PCI_PRODUCT_HIFN_7751
: return "Hifn 7751";
318 case PCI_PRODUCT_HIFN_7811
: return "Hifn 7811";
319 case PCI_PRODUCT_HIFN_7951
: return "Hifn 7951";
320 case PCI_PRODUCT_HIFN_7955
: return "Hifn 7955";
321 case PCI_PRODUCT_HIFN_7956
: return "Hifn 7956";
323 return "Hifn unknown-part";
324 case PCI_VENDOR_INVERTEX
:
325 switch (pci_get_device(sc
->sc_pcidev
)) {
326 case PCI_PRODUCT_INVERTEX_AEON
: return "Invertex AEON";
328 return "Invertex unknown-part";
329 case PCI_VENDOR_NETSEC
:
330 switch (pci_get_device(sc
->sc_pcidev
)) {
331 case PCI_PRODUCT_NETSEC_7751
: return "NetSec 7751";
333 return "NetSec unknown-part";
335 return "Unknown-vendor unknown-part";
339 checkmaxmin(struct pci_dev
*dev
, const char *what
, u_int v
, u_int min
, u_int max
)
341 struct hifn_softc
*sc
= pci_get_drvdata(dev
);
343 device_printf(sc
->sc_dev
, "Warning, %s %u out of range, "
344 "using max %u\n", what
, v
, max
);
346 } else if (v
< min
) {
347 device_printf(sc
->sc_dev
, "Warning, %s %u out of range, "
348 "using min %u\n", what
, v
, min
);
355 * Select PLL configuration for 795x parts. This is complicated in
356 * that we cannot determine the optimal parameters without user input.
357 * The reference clock is derived from an external clock through a
358 * multiplier. The external clock is either the host bus (i.e. PCI)
359 * or an external clock generator. When using the PCI bus we assume
360 * the clock is either 33 or 66 MHz; for an external source we cannot
363 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
364 * for an external source, followed by the frequency. We calculate
365 * the appropriate multiplier and PLL register contents accordingly.
366 * When no configuration is given we default to "pci66" since that
367 * always will allow the card to work. If a card is using the PCI
368 * bus clock and in a 33MHz slot then it will be operating at half
369 * speed until the correct information is provided.
371 * We use a default setting of "ext66" because according to Mike Ham
372 * of HiFn, almost every board in existence has an external crystal
373 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
374 * because PCI33 can have clocks from 0 to 33Mhz, and some have
375 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
378 hifn_getpllconfig(struct pci_dev
*dev
, u_int
*pll
)
380 const char *pllspec
= hifn_pllconfig
;
381 u_int freq
, mul
, fl
, fh
;
389 if (strncmp(pllspec
, "ext", 3) == 0) {
391 pllconfig
|= HIFN_PLL_REF_SEL
;
392 switch (pci_get_device(dev
)) {
393 case PCI_PRODUCT_HIFN_7955
:
394 case PCI_PRODUCT_HIFN_7956
:
398 case PCI_PRODUCT_HIFN_7954
:
403 } else if (strncmp(pllspec
, "pci", 3) == 0)
405 freq
= strtoul(pllspec
, &nxt
, 10);
409 freq
= checkmaxmin(dev
, "frequency", freq
, fl
, fh
);
411 * Calculate multiplier. We target a Fck of 266 MHz,
412 * allowing only even values, possibly rounded down.
413 * Multipliers > 8 must set the charge pump current.
415 mul
= checkmaxmin(dev
, "PLL divisor", (266 / freq
) &~ 1, 2, 12);
416 pllconfig
|= (mul
/ 2 - 1) << HIFN_PLL_ND_SHIFT
;
418 pllconfig
|= HIFN_PLL_IS
;
423 * Attach an interface that successfully probed.
426 hifn_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
)
428 struct hifn_softc
*sc
= NULL
;
432 unsigned long mem_start
, mem_len
;
433 static int num_chips
= 0;
435 DPRINTF("%s()\n", __FUNCTION__
);
437 if (pci_enable_device(dev
) < 0)
440 #ifdef CONFIG_HAVE_PCI_SET_MWI
441 if (pci_set_mwi(dev
))
446 printk("hifn: found device with no IRQ assigned. check BIOS settings!");
447 pci_disable_device(dev
);
451 sc
= (struct hifn_softc
*) kmalloc(sizeof(*sc
), GFP_KERNEL
);
454 memset(sc
, 0, sizeof(*sc
));
456 softc_device_init(sc
, "hifn", num_chips
, hifn_methods
);
461 sc
->sc_num
= num_chips
++;
462 if (sc
->sc_num
< HIFN_MAX_CHIPS
)
463 hifn_chip_idx
[sc
->sc_num
] = sc
;
465 pci_set_drvdata(sc
->sc_pcidev
, sc
);
467 spin_lock_init(&sc
->sc_mtx
);
469 /* XXX handle power management */
472 * The 7951 and 795x have a random number generator and
473 * public key support; note this.
475 if (pci_get_vendor(dev
) == PCI_VENDOR_HIFN
&&
476 (pci_get_device(dev
) == PCI_PRODUCT_HIFN_7951
||
477 pci_get_device(dev
) == PCI_PRODUCT_HIFN_7955
||
478 pci_get_device(dev
) == PCI_PRODUCT_HIFN_7956
))
479 sc
->sc_flags
= HIFN_HAS_RNG
| HIFN_HAS_PUBLIC
;
481 * The 7811 has a random number generator and
482 * we also note it's identity 'cuz of some quirks.
484 if (pci_get_vendor(dev
) == PCI_VENDOR_HIFN
&&
485 pci_get_device(dev
) == PCI_PRODUCT_HIFN_7811
)
486 sc
->sc_flags
|= HIFN_IS_7811
| HIFN_HAS_RNG
;
489 * The 795x parts support AES.
491 if (pci_get_vendor(dev
) == PCI_VENDOR_HIFN
&&
492 (pci_get_device(dev
) == PCI_PRODUCT_HIFN_7955
||
493 pci_get_device(dev
) == PCI_PRODUCT_HIFN_7956
)) {
494 sc
->sc_flags
|= HIFN_IS_7956
| HIFN_HAS_AES
;
496 * Select PLL configuration. This depends on the
497 * bus and board design and must be manually configured
498 * if the default setting is unacceptable.
500 hifn_getpllconfig(dev
, &sc
->sc_pllconfig
);
504 * Setup PCI resources. Note that we record the bus
505 * tag and handle for each register mapping, this is
506 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
507 * and WRITE_REG_1 macros throughout the driver.
509 mem_start
= pci_resource_start(sc
->sc_pcidev
, 0);
510 mem_len
= pci_resource_len(sc
->sc_pcidev
, 0);
511 sc
->sc_bar0
= (ocf_iomem_t
) ioremap(mem_start
, mem_len
);
513 device_printf(sc
->sc_dev
, "cannot map bar%d register space\n", 0);
516 sc
->sc_bar0_lastreg
= (bus_size_t
) -1;
518 mem_start
= pci_resource_start(sc
->sc_pcidev
, 1);
519 mem_len
= pci_resource_len(sc
->sc_pcidev
, 1);
520 sc
->sc_bar1
= (ocf_iomem_t
) ioremap(mem_start
, mem_len
);
522 device_printf(sc
->sc_dev
, "cannot map bar%d register space\n", 1);
525 sc
->sc_bar1_lastreg
= (bus_size_t
) -1;
527 /* fix up the bus size */
528 if (pci_set_dma_mask(dev
, DMA_32BIT_MASK
)) {
529 device_printf(sc
->sc_dev
, "No usable DMA configuration, aborting.\n");
532 if (pci_set_consistent_dma_mask(dev
, DMA_32BIT_MASK
)) {
533 device_printf(sc
->sc_dev
,
534 "No usable consistent DMA configuration, aborting.\n");
541 * Setup the area where the Hifn DMA's descriptors
542 * and associated data structures.
544 sc
->sc_dma
= (struct hifn_dma
*) pci_alloc_consistent(dev
,
546 &sc
->sc_dma_physaddr
);
548 device_printf(sc
->sc_dev
, "cannot alloc sc_dma\n");
551 bzero(sc
->sc_dma
, sizeof(*sc
->sc_dma
));
554 * Reset the board and do the ``secret handshake''
555 * to enable the crypto support. Then complete the
556 * initialization procedure by setting up the interrupt
557 * and hooking in to the system crypto support so we'll
558 * get used for system services like the crypto device,
559 * IPsec, RNG device, etc.
561 hifn_reset_board(sc
, 0);
563 if (hifn_enable_crypto(sc
) != 0) {
564 device_printf(sc
->sc_dev
, "crypto enabling failed\n");
570 hifn_init_pci_registers(sc
);
572 pci_set_master(sc
->sc_pcidev
);
574 /* XXX can't dynamically determine ram type for 795x; force dram */
575 if (sc
->sc_flags
& HIFN_IS_7956
)
576 sc
->sc_drammodel
= 1;
577 else if (hifn_ramtype(sc
))
580 if (sc
->sc_drammodel
== 0)
586 * Workaround for NetSec 7751 rev A: half ram size because two
587 * of the address lines were left floating
589 if (pci_get_vendor(dev
) == PCI_VENDOR_NETSEC
&&
590 pci_get_device(dev
) == PCI_PRODUCT_NETSEC_7751
&&
591 pci_get_revid(dev
) == 0x61) /*XXX???*/
592 sc
->sc_ramsize
>>= 1;
595 * Arrange the interrupt line.
597 rc
= request_irq(dev
->irq
, hifn_intr
, IRQF_SHARED
, "hifn", sc
);
599 device_printf(sc
->sc_dev
, "could not map interrupt: %d\n", rc
);
602 sc
->sc_irq
= dev
->irq
;
607 * NB: Keep only the low 16 bits; this masks the chip id
610 rev
= READ_REG_1(sc
, HIFN_1_REVID
) & 0xffff;
612 rseg
= sc
->sc_ramsize
/ 1024;
614 if (sc
->sc_ramsize
>= (1024 * 1024)) {
618 device_printf(sc
->sc_dev
, "%s, rev %u, %d%cB %cram",
619 hifn_partname(sc
), rev
,
620 rseg
, rbase
, sc
->sc_drammodel
? 'd' : 's');
621 if (sc
->sc_flags
& HIFN_IS_7956
)
622 printf(", pll=0x%x<%s clk, %ux mult>",
624 sc
->sc_pllconfig
& HIFN_PLL_REF_SEL
? "ext" : "pci",
625 2 + 2*((sc
->sc_pllconfig
& HIFN_PLL_ND
) >> 11));
628 sc
->sc_cid
= crypto_get_driverid(softc_get_device(sc
),CRYPTOCAP_F_HARDWARE
);
629 if (sc
->sc_cid
< 0) {
630 device_printf(sc
->sc_dev
, "could not get crypto driver id\n");
634 WRITE_REG_0(sc
, HIFN_0_PUCNFG
,
635 READ_REG_0(sc
, HIFN_0_PUCNFG
) | HIFN_PUCNFG_CHIPID
);
636 ena
= READ_REG_0(sc
, HIFN_0_PUSTAT
) & HIFN_PUSTAT_CHIPENA
;
639 case HIFN_PUSTAT_ENA_2
:
640 crypto_register(sc
->sc_cid
, CRYPTO_3DES_CBC
, 0, 0);
641 crypto_register(sc
->sc_cid
, CRYPTO_ARC4
, 0, 0);
642 if (sc
->sc_flags
& HIFN_HAS_AES
)
643 crypto_register(sc
->sc_cid
, CRYPTO_AES_CBC
, 0, 0);
645 case HIFN_PUSTAT_ENA_1
:
646 crypto_register(sc
->sc_cid
, CRYPTO_MD5
, 0, 0);
647 crypto_register(sc
->sc_cid
, CRYPTO_SHA1
, 0, 0);
648 crypto_register(sc
->sc_cid
, CRYPTO_MD5_HMAC
, 0, 0);
649 crypto_register(sc
->sc_cid
, CRYPTO_SHA1_HMAC
, 0, 0);
650 crypto_register(sc
->sc_cid
, CRYPTO_DES_CBC
, 0, 0);
654 if (sc
->sc_flags
& (HIFN_HAS_PUBLIC
| HIFN_HAS_RNG
))
655 hifn_init_pubrng(sc
);
657 init_timer(&sc
->sc_tickto
);
658 sc
->sc_tickto
.function
= hifn_tick
;
659 sc
->sc_tickto
.data
= (unsigned long) sc
->sc_num
;
660 mod_timer(&sc
->sc_tickto
, jiffies
+ HZ
);
666 crypto_unregister_all(sc
->sc_cid
);
667 if (sc
->sc_irq
!= -1)
668 free_irq(sc
->sc_irq
, sc
);
670 /* Turn off DMA polling */
671 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
672 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
674 pci_free_consistent(sc
->sc_pcidev
,
676 sc
->sc_dma
, sc
->sc_dma_physaddr
);
683 * Detach an interface that successfully probed.
686 hifn_remove(struct pci_dev
*dev
)
688 struct hifn_softc
*sc
= pci_get_drvdata(dev
);
689 unsigned long l_flags
;
691 DPRINTF("%s()\n", __FUNCTION__
);
693 KASSERT(sc
!= NULL
, ("hifn_detach: null software carrier!"));
695 /* disable interrupts */
697 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, 0);
700 /*XXX other resources */
701 del_timer_sync(&sc
->sc_tickto
);
703 /* Turn off DMA polling */
704 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
705 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
707 crypto_unregister_all(sc
->sc_cid
);
709 free_irq(sc
->sc_irq
, sc
);
711 pci_free_consistent(sc
->sc_pcidev
, sizeof(*sc
->sc_dma
),
712 sc
->sc_dma
, sc
->sc_dma_physaddr
);
717 hifn_init_pubrng(struct hifn_softc
*sc
)
721 DPRINTF("%s()\n", __FUNCTION__
);
723 if ((sc
->sc_flags
& HIFN_IS_7811
) == 0) {
724 /* Reset 7951 public key/rng engine */
725 WRITE_REG_1(sc
, HIFN_1_PUB_RESET
,
726 READ_REG_1(sc
, HIFN_1_PUB_RESET
) | HIFN_PUBRST_RESET
);
728 for (i
= 0; i
< 100; i
++) {
730 if ((READ_REG_1(sc
, HIFN_1_PUB_RESET
) &
731 HIFN_PUBRST_RESET
) == 0)
736 device_printf(sc
->sc_dev
, "public key init failed\n");
741 /* Enable the rng, if available */
742 #ifdef CONFIG_OCF_RANDOMHARVEST
743 if (sc
->sc_flags
& HIFN_HAS_RNG
) {
744 if (sc
->sc_flags
& HIFN_IS_7811
) {
746 r
= READ_REG_1(sc
, HIFN_1_7811_RNGENA
);
747 if (r
& HIFN_7811_RNGENA_ENA
) {
748 r
&= ~HIFN_7811_RNGENA_ENA
;
749 WRITE_REG_1(sc
, HIFN_1_7811_RNGENA
, r
);
751 WRITE_REG_1(sc
, HIFN_1_7811_RNGCFG
,
752 HIFN_7811_RNGCFG_DEFL
);
753 r
|= HIFN_7811_RNGENA_ENA
;
754 WRITE_REG_1(sc
, HIFN_1_7811_RNGENA
, r
);
756 WRITE_REG_1(sc
, HIFN_1_RNG_CONFIG
,
757 READ_REG_1(sc
, HIFN_1_RNG_CONFIG
) |
761 crypto_rregister(sc
->sc_cid
, hifn_read_random
, sc
);
765 /* Enable public key engine, if available */
766 if (sc
->sc_flags
& HIFN_HAS_PUBLIC
) {
767 WRITE_REG_1(sc
, HIFN_1_PUB_IEN
, HIFN_PUBIEN_DONE
);
768 sc
->sc_dmaier
|= HIFN_DMAIER_PUBDONE
;
769 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, sc
->sc_dmaier
);
770 #ifdef HIFN_VULCANDEV
771 sc
->sc_pkdev
= make_dev(&vulcanpk_cdevsw
, 0,
772 UID_ROOT
, GID_WHEEL
, 0666,
774 sc
->sc_pkdev
->si_drv1
= sc
;
781 #ifdef CONFIG_OCF_RANDOMHARVEST
783 hifn_read_random(void *arg
, u_int32_t
*buf
, int len
)
785 struct hifn_softc
*sc
= (struct hifn_softc
*) arg
;
792 if (sc
->sc_flags
& HIFN_IS_7811
) {
793 /* ONLY VALID ON 7811!!!! */
794 for (i
= 0; i
< 5; i
++) {
795 sts
= READ_REG_1(sc
, HIFN_1_7811_RNGSTS
);
796 if (sts
& HIFN_7811_RNGSTS_UFL
) {
797 device_printf(sc
->sc_dev
,
798 "RNG underflow: disabling\n");
799 /* DAVIDM perhaps return -1 */
802 if ((sts
& HIFN_7811_RNGSTS_RDY
) == 0)
806 * There are at least two words in the RNG FIFO
810 buf
[rc
++] = READ_REG_1(sc
, HIFN_1_7811_RNGDAT
);
812 buf
[rc
++] = READ_REG_1(sc
, HIFN_1_7811_RNGDAT
);
815 buf
[rc
++] = READ_REG_1(sc
, HIFN_1_RNG_DATA
);
817 /* NB: discard first data read */
818 if (sc
->sc_rngfirst
) {
825 #endif /* CONFIG_OCF_RANDOMHARVEST */
828 hifn_puc_wait(struct hifn_softc
*sc
)
831 int reg
= HIFN_0_PUCTRL
;
833 if (sc
->sc_flags
& HIFN_IS_7956
) {
834 reg
= HIFN_0_PUCTRL2
;
837 for (i
= 5000; i
> 0; i
--) {
839 if (!(READ_REG_0(sc
, reg
) & HIFN_PUCTRL_RESET
))
843 device_printf(sc
->sc_dev
, "proc unit did not reset(0x%x)\n",
844 READ_REG_0(sc
, HIFN_0_PUCTRL
));
848 * Reset the processing unit.
851 hifn_reset_puc(struct hifn_softc
*sc
)
853 /* Reset processing unit */
854 int reg
= HIFN_0_PUCTRL
;
856 if (sc
->sc_flags
& HIFN_IS_7956
) {
857 reg
= HIFN_0_PUCTRL2
;
859 WRITE_REG_0(sc
, reg
, HIFN_PUCTRL_DMAENA
);
865 * Set the Retry and TRDY registers; note that we set them to
866 * zero because the 7811 locks up when forced to retry (section
867 * 3.6 of "Specification Update SU-0014-04". Not clear if we
868 * should do this for all Hifn parts, but it doesn't seem to hurt.
871 hifn_set_retry(struct hifn_softc
*sc
)
873 DPRINTF("%s()\n", __FUNCTION__
);
874 /* NB: RETRY only responds to 8-bit reads/writes */
875 pci_write_config_byte(sc
->sc_pcidev
, HIFN_RETRY_TIMEOUT
, 0);
876 pci_write_config_dword(sc
->sc_pcidev
, HIFN_TRDY_TIMEOUT
, 0);
877 /* piggy back the cache line setting here */
878 pci_write_config_byte(sc
->sc_pcidev
, PCI_CACHE_LINE_SIZE
, hifn_cache_linesize
);
882 * Resets the board. Values in the regesters are left as is
883 * from the reset (i.e. initial values are assigned elsewhere).
886 hifn_reset_board(struct hifn_softc
*sc
, int full
)
890 DPRINTF("%s()\n", __FUNCTION__
);
892 * Set polling in the DMA configuration register to zero. 0x7 avoids
893 * resetting the board and zeros out the other fields.
895 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
896 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
899 * Now that polling has been disabled, we have to wait 1 ms
900 * before resetting the board.
904 /* Reset the DMA unit */
906 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MODE
);
909 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
,
910 HIFN_DMACNFG_MODE
| HIFN_DMACNFG_MSTRESET
);
914 KASSERT(sc
->sc_dma
!= NULL
, ("hifn_reset_board: null DMA tag!"));
915 bzero(sc
->sc_dma
, sizeof(*sc
->sc_dma
));
917 /* Bring dma unit out of reset */
918 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
919 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
924 if (sc
->sc_flags
& HIFN_IS_7811
) {
925 for (reg
= 0; reg
< 1000; reg
++) {
926 if (READ_REG_1(sc
, HIFN_1_7811_MIPSRST
) &
927 HIFN_MIPSRST_CRAMINIT
)
932 device_printf(sc
->sc_dev
, ": cram init timeout\n");
934 /* set up DMA configuration register #2 */
935 /* turn off all PK and BAR0 swaps */
936 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG2
,
937 (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT
)|
938 (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT
)|
939 (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT
)|
940 (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT
));
945 hifn_next_signature(u_int32_t a
, u_int cnt
)
950 for (i
= 0; i
< cnt
; i
++) {
960 a
= (v
& 1) ^ (a
<< 1);
968 * Checks to see if crypto is already enabled. If crypto isn't enable,
969 * "hifn_enable_crypto" is called to enable it. The check is important,
970 * as enabling crypto twice will lock the board.
973 hifn_enable_crypto(struct hifn_softc
*sc
)
975 u_int32_t dmacfg
, ramcfg
, encl
, addr
, i
;
976 char offtbl
[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
977 0x00, 0x00, 0x00, 0x00 };
979 DPRINTF("%s()\n", __FUNCTION__
);
981 ramcfg
= READ_REG_0(sc
, HIFN_0_PUCNFG
);
982 dmacfg
= READ_REG_1(sc
, HIFN_1_DMA_CNFG
);
985 * The RAM config register's encrypt level bit needs to be set before
986 * every read performed on the encryption level register.
988 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, ramcfg
| HIFN_PUCNFG_CHIPID
);
990 encl
= READ_REG_0(sc
, HIFN_0_PUSTAT
) & HIFN_PUSTAT_CHIPENA
;
993 * Make sure we don't re-unlock. Two unlocks kills chip until the
996 if (encl
== HIFN_PUSTAT_ENA_1
|| encl
== HIFN_PUSTAT_ENA_2
) {
999 device_printf(sc
->sc_dev
,
1000 "Strong crypto already enabled!\n");
1005 if (encl
!= 0 && encl
!= HIFN_PUSTAT_ENA_0
) {
1008 device_printf(sc
->sc_dev
,
1009 "Unknown encryption level 0x%x\n", encl
);
1014 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_UNLOCK
|
1015 HIFN_DMACNFG_MSTRESET
| HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
);
1017 addr
= READ_REG_1(sc
, HIFN_UNLOCK_SECRET1
);
1019 WRITE_REG_1(sc
, HIFN_UNLOCK_SECRET2
, 0);
1022 for (i
= 0; i
<= 12; i
++) {
1023 addr
= hifn_next_signature(addr
, offtbl
[i
] + 0x101);
1024 WRITE_REG_1(sc
, HIFN_UNLOCK_SECRET2
, addr
);
1029 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, ramcfg
| HIFN_PUCNFG_CHIPID
);
1030 encl
= READ_REG_0(sc
, HIFN_0_PUSTAT
) & HIFN_PUSTAT_CHIPENA
;
1034 if (encl
!= HIFN_PUSTAT_ENA_1
&& encl
!= HIFN_PUSTAT_ENA_2
)
1035 device_printf(sc
->sc_dev
, "Engine is permanently "
1036 "locked until next system reset!\n");
1038 device_printf(sc
->sc_dev
, "Engine enabled "
1044 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, ramcfg
);
1045 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, dmacfg
);
1048 case HIFN_PUSTAT_ENA_1
:
1049 case HIFN_PUSTAT_ENA_2
:
1051 case HIFN_PUSTAT_ENA_0
:
1053 device_printf(sc
->sc_dev
, "disabled\n");
1061 * Give initial values to the registers listed in the "Register Space"
1062 * section of the HIFN Software Development reference manual.
1065 hifn_init_pci_registers(struct hifn_softc
*sc
)
1067 DPRINTF("%s()\n", __FUNCTION__
);
1069 /* write fixed values needed by the Initialization registers */
1070 WRITE_REG_0(sc
, HIFN_0_PUCTRL
, HIFN_PUCTRL_DMAENA
);
1071 WRITE_REG_0(sc
, HIFN_0_FIFOCNFG
, HIFN_FIFOCNFG_THRESHOLD
);
1072 WRITE_REG_0(sc
, HIFN_0_PUIER
, HIFN_PUIER_DSTOVER
);
1074 /* write all 4 ring address registers */
1075 WRITE_REG_1(sc
, HIFN_1_DMA_CRAR
, sc
->sc_dma_physaddr
+
1076 offsetof(struct hifn_dma
, cmdr
[0]));
1077 WRITE_REG_1(sc
, HIFN_1_DMA_SRAR
, sc
->sc_dma_physaddr
+
1078 offsetof(struct hifn_dma
, srcr
[0]));
1079 WRITE_REG_1(sc
, HIFN_1_DMA_DRAR
, sc
->sc_dma_physaddr
+
1080 offsetof(struct hifn_dma
, dstr
[0]));
1081 WRITE_REG_1(sc
, HIFN_1_DMA_RRAR
, sc
->sc_dma_physaddr
+
1082 offsetof(struct hifn_dma
, resr
[0]));
1086 /* write status register */
1087 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1088 HIFN_DMACSR_D_CTRL_DIS
| HIFN_DMACSR_R_CTRL_DIS
|
1089 HIFN_DMACSR_S_CTRL_DIS
| HIFN_DMACSR_C_CTRL_DIS
|
1090 HIFN_DMACSR_D_ABORT
| HIFN_DMACSR_D_DONE
| HIFN_DMACSR_D_LAST
|
1091 HIFN_DMACSR_D_WAIT
| HIFN_DMACSR_D_OVER
|
1092 HIFN_DMACSR_R_ABORT
| HIFN_DMACSR_R_DONE
| HIFN_DMACSR_R_LAST
|
1093 HIFN_DMACSR_R_WAIT
| HIFN_DMACSR_R_OVER
|
1094 HIFN_DMACSR_S_ABORT
| HIFN_DMACSR_S_DONE
| HIFN_DMACSR_S_LAST
|
1095 HIFN_DMACSR_S_WAIT
|
1096 HIFN_DMACSR_C_ABORT
| HIFN_DMACSR_C_DONE
| HIFN_DMACSR_C_LAST
|
1097 HIFN_DMACSR_C_WAIT
|
1098 HIFN_DMACSR_ENGINE
|
1099 ((sc
->sc_flags
& HIFN_HAS_PUBLIC
) ?
1100 HIFN_DMACSR_PUBDONE
: 0) |
1101 ((sc
->sc_flags
& HIFN_IS_7811
) ?
1102 HIFN_DMACSR_ILLW
| HIFN_DMACSR_ILLR
: 0));
1104 sc
->sc_d_busy
= sc
->sc_r_busy
= sc
->sc_s_busy
= sc
->sc_c_busy
= 0;
1105 sc
->sc_dmaier
|= HIFN_DMAIER_R_DONE
| HIFN_DMAIER_C_ABORT
|
1106 HIFN_DMAIER_D_OVER
| HIFN_DMAIER_R_OVER
|
1107 HIFN_DMAIER_S_ABORT
| HIFN_DMAIER_D_ABORT
| HIFN_DMAIER_R_ABORT
|
1108 ((sc
->sc_flags
& HIFN_IS_7811
) ?
1109 HIFN_DMAIER_ILLW
| HIFN_DMAIER_ILLR
: 0);
1110 sc
->sc_dmaier
&= ~HIFN_DMAIER_C_WAIT
;
1111 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, sc
->sc_dmaier
);
1114 if (sc
->sc_flags
& HIFN_IS_7956
) {
1117 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, HIFN_PUCNFG_COMPSING
|
1118 HIFN_PUCNFG_TCALLPHASES
|
1119 HIFN_PUCNFG_TCDRVTOTEM
| HIFN_PUCNFG_BUS32
);
1121 /* turn off the clocks and insure bypass is set */
1122 pll
= READ_REG_1(sc
, HIFN_1_PLL
);
1123 pll
= (pll
&~ (HIFN_PLL_PK_CLK_SEL
| HIFN_PLL_PE_CLK_SEL
))
1124 | HIFN_PLL_BP
| HIFN_PLL_MBSET
;
1125 WRITE_REG_1(sc
, HIFN_1_PLL
, pll
);
1126 DELAY(10*1000); /* 10ms */
1128 /* change configuration */
1129 pll
= (pll
&~ HIFN_PLL_CONFIG
) | sc
->sc_pllconfig
;
1130 WRITE_REG_1(sc
, HIFN_1_PLL
, pll
);
1131 DELAY(10*1000); /* 10ms */
1133 /* disable bypass */
1134 pll
&= ~HIFN_PLL_BP
;
1135 WRITE_REG_1(sc
, HIFN_1_PLL
, pll
);
1136 /* enable clocks with new configuration */
1137 pll
|= HIFN_PLL_PK_CLK_SEL
| HIFN_PLL_PE_CLK_SEL
;
1138 WRITE_REG_1(sc
, HIFN_1_PLL
, pll
);
1140 WRITE_REG_0(sc
, HIFN_0_PUCNFG
, HIFN_PUCNFG_COMPSING
|
1141 HIFN_PUCNFG_DRFR_128
| HIFN_PUCNFG_TCALLPHASES
|
1142 HIFN_PUCNFG_TCDRVTOTEM
| HIFN_PUCNFG_BUS32
|
1143 (sc
->sc_drammodel
? HIFN_PUCNFG_DRAM
: HIFN_PUCNFG_SRAM
));
1146 WRITE_REG_0(sc
, HIFN_0_PUISR
, HIFN_PUISR_DSTOVER
);
1147 WRITE_REG_1(sc
, HIFN_1_DMA_CNFG
, HIFN_DMACNFG_MSTRESET
|
1148 HIFN_DMACNFG_DMARESET
| HIFN_DMACNFG_MODE
| HIFN_DMACNFG_LAST
|
1149 ((HIFN_POLL_FREQUENCY
<< 16 ) & HIFN_DMACNFG_POLLFREQ
) |
1150 ((HIFN_POLL_SCALAR
<< 8) & HIFN_DMACNFG_POLLINVAL
));
1154 * The maximum number of sessions supported by the card
1155 * is dependent on the amount of context ram, which
1156 * encryption algorithms are enabled, and how compression
1157 * is configured. This should be configured before this
1158 * routine is called.
1161 hifn_sessions(struct hifn_softc
*sc
)
1166 DPRINTF("%s()\n", __FUNCTION__
);
1168 pucnfg
= READ_REG_0(sc
, HIFN_0_PUCNFG
);
1170 if (pucnfg
& HIFN_PUCNFG_COMPSING
) {
1171 if (pucnfg
& HIFN_PUCNFG_ENCCNFG
)
1176 * 7955/7956 has internal context memory of 32K
1178 if (sc
->sc_flags
& HIFN_IS_7956
)
1179 sc
->sc_maxses
= 32768 / ctxsize
;
1182 ((sc
->sc_ramsize
- 32768) / ctxsize
);
1184 sc
->sc_maxses
= sc
->sc_ramsize
/ 16384;
1186 if (sc
->sc_maxses
> 2048)
1187 sc
->sc_maxses
= 2048;
1191 * Determine ram type (sram or dram). Board should be just out of a reset
1192 * state when this is called.
1195 hifn_ramtype(struct hifn_softc
*sc
)
1197 u_int8_t data
[8], dataexpect
[8];
1200 for (i
= 0; i
< sizeof(data
); i
++)
1201 data
[i
] = dataexpect
[i
] = 0x55;
1202 if (hifn_writeramaddr(sc
, 0, data
))
1204 if (hifn_readramaddr(sc
, 0, data
))
1206 if (bcmp(data
, dataexpect
, sizeof(data
)) != 0) {
1207 sc
->sc_drammodel
= 1;
1211 for (i
= 0; i
< sizeof(data
); i
++)
1212 data
[i
] = dataexpect
[i
] = 0xaa;
1213 if (hifn_writeramaddr(sc
, 0, data
))
1215 if (hifn_readramaddr(sc
, 0, data
))
1217 if (bcmp(data
, dataexpect
, sizeof(data
)) != 0) {
1218 sc
->sc_drammodel
= 1;
1225 #define HIFN_SRAM_MAX (32 << 20)
1226 #define HIFN_SRAM_STEP_SIZE 16384
1227 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1230 hifn_sramsize(struct hifn_softc
*sc
)
1234 u_int8_t dataexpect
[sizeof(data
)];
1237 for (i
= 0; i
< sizeof(data
); i
++)
1238 data
[i
] = dataexpect
[i
] = i
^ 0x5a;
1240 for (i
= HIFN_SRAM_GRANULARITY
- 1; i
>= 0; i
--) {
1241 a
= i
* HIFN_SRAM_STEP_SIZE
;
1242 bcopy(&i
, data
, sizeof(i
));
1243 hifn_writeramaddr(sc
, a
, data
);
1246 for (i
= 0; i
< HIFN_SRAM_GRANULARITY
; i
++) {
1247 a
= i
* HIFN_SRAM_STEP_SIZE
;
1248 bcopy(&i
, dataexpect
, sizeof(i
));
1249 if (hifn_readramaddr(sc
, a
, data
) < 0)
1251 if (bcmp(data
, dataexpect
, sizeof(data
)) != 0)
1253 sc
->sc_ramsize
= a
+ HIFN_SRAM_STEP_SIZE
;
1260 * XXX For dram boards, one should really try all of the
1261 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1262 * is already set up correctly.
1265 hifn_dramsize(struct hifn_softc
*sc
)
1269 if (sc
->sc_flags
& HIFN_IS_7956
) {
1271 * 7955/7956 have a fixed internal ram of only 32K.
1273 sc
->sc_ramsize
= 32768;
1275 cnfg
= READ_REG_0(sc
, HIFN_0_PUCNFG
) &
1276 HIFN_PUCNFG_DRAMMASK
;
1277 sc
->sc_ramsize
= 1 << ((cnfg
>> 13) + 18);
1283 hifn_alloc_slot(struct hifn_softc
*sc
, int *cmdp
, int *srcp
, int *dstp
, int *resp
)
1285 struct hifn_dma
*dma
= sc
->sc_dma
;
1287 DPRINTF("%s()\n", __FUNCTION__
);
1289 if (dma
->cmdi
== HIFN_D_CMD_RSIZE
) {
1291 dma
->cmdr
[HIFN_D_CMD_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1293 dma
->cmdr
[HIFN_D_CMD_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1294 HIFN_CMDR_SYNC(sc
, HIFN_D_CMD_RSIZE
,
1295 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1297 *cmdp
= dma
->cmdi
++;
1298 dma
->cmdk
= dma
->cmdi
;
1300 if (dma
->srci
== HIFN_D_SRC_RSIZE
) {
1302 dma
->srcr
[HIFN_D_SRC_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1304 dma
->srcr
[HIFN_D_SRC_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1305 HIFN_SRCR_SYNC(sc
, HIFN_D_SRC_RSIZE
,
1306 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1308 *srcp
= dma
->srci
++;
1309 dma
->srck
= dma
->srci
;
1311 if (dma
->dsti
== HIFN_D_DST_RSIZE
) {
1313 dma
->dstr
[HIFN_D_DST_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1315 dma
->dstr
[HIFN_D_DST_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1316 HIFN_DSTR_SYNC(sc
, HIFN_D_DST_RSIZE
,
1317 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1319 *dstp
= dma
->dsti
++;
1320 dma
->dstk
= dma
->dsti
;
1322 if (dma
->resi
== HIFN_D_RES_RSIZE
) {
1324 dma
->resr
[HIFN_D_RES_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1326 dma
->resr
[HIFN_D_RES_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1327 HIFN_RESR_SYNC(sc
, HIFN_D_RES_RSIZE
,
1328 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1330 *resp
= dma
->resi
++;
1331 dma
->resk
= dma
->resi
;
1335 hifn_writeramaddr(struct hifn_softc
*sc
, int addr
, u_int8_t
*data
)
1337 struct hifn_dma
*dma
= sc
->sc_dma
;
1338 hifn_base_command_t wc
;
1339 const u_int32_t masks
= HIFN_D_VALID
| HIFN_D_LAST
| HIFN_D_MASKDONEIRQ
;
1340 int r
, cmdi
, resi
, srci
, dsti
;
1342 DPRINTF("%s()\n", __FUNCTION__
);
1344 wc
.masks
= htole16(3 << 13);
1345 wc
.session_num
= htole16(addr
>> 14);
1346 wc
.total_source_count
= htole16(8);
1347 wc
.total_dest_count
= htole16(addr
& 0x3fff);
1349 hifn_alloc_slot(sc
, &cmdi
, &srci
, &dsti
, &resi
);
1351 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1352 HIFN_DMACSR_C_CTRL_ENA
| HIFN_DMACSR_S_CTRL_ENA
|
1353 HIFN_DMACSR_D_CTRL_ENA
| HIFN_DMACSR_R_CTRL_ENA
);
1355 /* build write command */
1356 bzero(dma
->command_bufs
[cmdi
], HIFN_MAX_COMMAND
);
1357 *(hifn_base_command_t
*)dma
->command_bufs
[cmdi
] = wc
;
1358 bcopy(data
, &dma
->test_src
, sizeof(dma
->test_src
));
1360 dma
->srcr
[srci
].p
= htole32(sc
->sc_dma_physaddr
1361 + offsetof(struct hifn_dma
, test_src
));
1362 dma
->dstr
[dsti
].p
= htole32(sc
->sc_dma_physaddr
1363 + offsetof(struct hifn_dma
, test_dst
));
1365 dma
->cmdr
[cmdi
].l
= htole32(16 | masks
);
1366 dma
->srcr
[srci
].l
= htole32(8 | masks
);
1367 dma
->dstr
[dsti
].l
= htole32(4 | masks
);
1368 dma
->resr
[resi
].l
= htole32(4 | masks
);
1370 for (r
= 10000; r
>= 0; r
--) {
1372 if ((dma
->resr
[resi
].l
& htole32(HIFN_D_VALID
)) == 0)
1376 device_printf(sc
->sc_dev
, "writeramaddr -- "
1377 "result[%d](addr %d) still valid\n", resi
, addr
);
1383 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1384 HIFN_DMACSR_C_CTRL_DIS
| HIFN_DMACSR_S_CTRL_DIS
|
1385 HIFN_DMACSR_D_CTRL_DIS
| HIFN_DMACSR_R_CTRL_DIS
);
1391 hifn_readramaddr(struct hifn_softc
*sc
, int addr
, u_int8_t
*data
)
1393 struct hifn_dma
*dma
= sc
->sc_dma
;
1394 hifn_base_command_t rc
;
1395 const u_int32_t masks
= HIFN_D_VALID
| HIFN_D_LAST
| HIFN_D_MASKDONEIRQ
;
1396 int r
, cmdi
, srci
, dsti
, resi
;
1398 DPRINTF("%s()\n", __FUNCTION__
);
1400 rc
.masks
= htole16(2 << 13);
1401 rc
.session_num
= htole16(addr
>> 14);
1402 rc
.total_source_count
= htole16(addr
& 0x3fff);
1403 rc
.total_dest_count
= htole16(8);
1405 hifn_alloc_slot(sc
, &cmdi
, &srci
, &dsti
, &resi
);
1407 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1408 HIFN_DMACSR_C_CTRL_ENA
| HIFN_DMACSR_S_CTRL_ENA
|
1409 HIFN_DMACSR_D_CTRL_ENA
| HIFN_DMACSR_R_CTRL_ENA
);
1411 bzero(dma
->command_bufs
[cmdi
], HIFN_MAX_COMMAND
);
1412 *(hifn_base_command_t
*)dma
->command_bufs
[cmdi
] = rc
;
1414 dma
->srcr
[srci
].p
= htole32(sc
->sc_dma_physaddr
+
1415 offsetof(struct hifn_dma
, test_src
));
1417 dma
->dstr
[dsti
].p
= htole32(sc
->sc_dma_physaddr
+
1418 offsetof(struct hifn_dma
, test_dst
));
1420 dma
->cmdr
[cmdi
].l
= htole32(8 | masks
);
1421 dma
->srcr
[srci
].l
= htole32(8 | masks
);
1422 dma
->dstr
[dsti
].l
= htole32(8 | masks
);
1423 dma
->resr
[resi
].l
= htole32(HIFN_MAX_RESULT
| masks
);
1425 for (r
= 10000; r
>= 0; r
--) {
1427 if ((dma
->resr
[resi
].l
& htole32(HIFN_D_VALID
)) == 0)
1431 device_printf(sc
->sc_dev
, "readramaddr -- "
1432 "result[%d](addr %d) still valid\n", resi
, addr
);
1436 bcopy(&dma
->test_dst
, data
, sizeof(dma
->test_dst
));
1439 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
,
1440 HIFN_DMACSR_C_CTRL_DIS
| HIFN_DMACSR_S_CTRL_DIS
|
1441 HIFN_DMACSR_D_CTRL_DIS
| HIFN_DMACSR_R_CTRL_DIS
);
1447 * Initialize the descriptor rings.
1450 hifn_init_dma(struct hifn_softc
*sc
)
1452 struct hifn_dma
*dma
= sc
->sc_dma
;
1455 DPRINTF("%s()\n", __FUNCTION__
);
1459 /* initialize static pointer values */
1460 for (i
= 0; i
< HIFN_D_CMD_RSIZE
; i
++)
1461 dma
->cmdr
[i
].p
= htole32(sc
->sc_dma_physaddr
+
1462 offsetof(struct hifn_dma
, command_bufs
[i
][0]));
1463 for (i
= 0; i
< HIFN_D_RES_RSIZE
; i
++)
1464 dma
->resr
[i
].p
= htole32(sc
->sc_dma_physaddr
+
1465 offsetof(struct hifn_dma
, result_bufs
[i
][0]));
1467 dma
->cmdr
[HIFN_D_CMD_RSIZE
].p
=
1468 htole32(sc
->sc_dma_physaddr
+ offsetof(struct hifn_dma
, cmdr
[0]));
1469 dma
->srcr
[HIFN_D_SRC_RSIZE
].p
=
1470 htole32(sc
->sc_dma_physaddr
+ offsetof(struct hifn_dma
, srcr
[0]));
1471 dma
->dstr
[HIFN_D_DST_RSIZE
].p
=
1472 htole32(sc
->sc_dma_physaddr
+ offsetof(struct hifn_dma
, dstr
[0]));
1473 dma
->resr
[HIFN_D_RES_RSIZE
].p
=
1474 htole32(sc
->sc_dma_physaddr
+ offsetof(struct hifn_dma
, resr
[0]));
1476 dma
->cmdu
= dma
->srcu
= dma
->dstu
= dma
->resu
= 0;
1477 dma
->cmdi
= dma
->srci
= dma
->dsti
= dma
->resi
= 0;
1478 dma
->cmdk
= dma
->srck
= dma
->dstk
= dma
->resk
= 0;
1482 * Writes out the raw command buffer space. Returns the
1483 * command buffer size.
1486 hifn_write_command(struct hifn_command
*cmd
, u_int8_t
*buf
)
1488 struct hifn_softc
*sc
= NULL
;
1490 hifn_base_command_t
*base_cmd
;
1491 hifn_mac_command_t
*mac_cmd
;
1492 hifn_crypt_command_t
*cry_cmd
;
1493 int using_mac
, using_crypt
, len
, ivlen
;
1494 u_int32_t dlen
, slen
;
1496 DPRINTF("%s()\n", __FUNCTION__
);
1499 using_mac
= cmd
->base_masks
& HIFN_BASE_CMD_MAC
;
1500 using_crypt
= cmd
->base_masks
& HIFN_BASE_CMD_CRYPT
;
1502 base_cmd
= (hifn_base_command_t
*)buf_pos
;
1503 base_cmd
->masks
= htole16(cmd
->base_masks
);
1504 slen
= cmd
->src_mapsize
;
1506 dlen
= cmd
->dst_mapsize
- cmd
->sloplen
+ sizeof(u_int32_t
);
1508 dlen
= cmd
->dst_mapsize
;
1509 base_cmd
->total_source_count
= htole16(slen
& HIFN_BASE_CMD_LENMASK_LO
);
1510 base_cmd
->total_dest_count
= htole16(dlen
& HIFN_BASE_CMD_LENMASK_LO
);
1513 base_cmd
->session_num
= htole16(
1514 ((slen
<< HIFN_BASE_CMD_SRCLEN_S
) & HIFN_BASE_CMD_SRCLEN_M
) |
1515 ((dlen
<< HIFN_BASE_CMD_DSTLEN_S
) & HIFN_BASE_CMD_DSTLEN_M
));
1516 buf_pos
+= sizeof(hifn_base_command_t
);
1519 mac_cmd
= (hifn_mac_command_t
*)buf_pos
;
1520 dlen
= cmd
->maccrd
->crd_len
;
1521 mac_cmd
->source_count
= htole16(dlen
& 0xffff);
1523 mac_cmd
->masks
= htole16(cmd
->mac_masks
|
1524 ((dlen
<< HIFN_MAC_CMD_SRCLEN_S
) & HIFN_MAC_CMD_SRCLEN_M
));
1525 mac_cmd
->header_skip
= htole16(cmd
->maccrd
->crd_skip
);
1526 mac_cmd
->reserved
= 0;
1527 buf_pos
+= sizeof(hifn_mac_command_t
);
1531 cry_cmd
= (hifn_crypt_command_t
*)buf_pos
;
1532 dlen
= cmd
->enccrd
->crd_len
;
1533 cry_cmd
->source_count
= htole16(dlen
& 0xffff);
1535 cry_cmd
->masks
= htole16(cmd
->cry_masks
|
1536 ((dlen
<< HIFN_CRYPT_CMD_SRCLEN_S
) & HIFN_CRYPT_CMD_SRCLEN_M
));
1537 cry_cmd
->header_skip
= htole16(cmd
->enccrd
->crd_skip
);
1538 cry_cmd
->reserved
= 0;
1539 buf_pos
+= sizeof(hifn_crypt_command_t
);
1542 if (using_mac
&& cmd
->mac_masks
& HIFN_MAC_CMD_NEW_KEY
) {
1543 bcopy(cmd
->mac
, buf_pos
, HIFN_MAC_KEY_LENGTH
);
1544 buf_pos
+= HIFN_MAC_KEY_LENGTH
;
1547 if (using_crypt
&& cmd
->cry_masks
& HIFN_CRYPT_CMD_NEW_KEY
) {
1548 switch (cmd
->cry_masks
& HIFN_CRYPT_CMD_ALG_MASK
) {
1549 case HIFN_CRYPT_CMD_ALG_3DES
:
1550 bcopy(cmd
->ck
, buf_pos
, HIFN_3DES_KEY_LENGTH
);
1551 buf_pos
+= HIFN_3DES_KEY_LENGTH
;
1553 case HIFN_CRYPT_CMD_ALG_DES
:
1554 bcopy(cmd
->ck
, buf_pos
, HIFN_DES_KEY_LENGTH
);
1555 buf_pos
+= HIFN_DES_KEY_LENGTH
;
1557 case HIFN_CRYPT_CMD_ALG_RC4
:
1562 clen
= MIN(cmd
->cklen
, len
);
1563 bcopy(cmd
->ck
, buf_pos
, clen
);
1570 case HIFN_CRYPT_CMD_ALG_AES
:
1572 * AES keys are variable 128, 192 and
1573 * 256 bits (16, 24 and 32 bytes).
1575 bcopy(cmd
->ck
, buf_pos
, cmd
->cklen
);
1576 buf_pos
+= cmd
->cklen
;
1581 if (using_crypt
&& cmd
->cry_masks
& HIFN_CRYPT_CMD_NEW_IV
) {
1582 switch (cmd
->cry_masks
& HIFN_CRYPT_CMD_ALG_MASK
) {
1583 case HIFN_CRYPT_CMD_ALG_AES
:
1584 ivlen
= HIFN_AES_IV_LENGTH
;
1587 ivlen
= HIFN_IV_LENGTH
;
1590 bcopy(cmd
->iv
, buf_pos
, ivlen
);
1594 if ((cmd
->base_masks
& (HIFN_BASE_CMD_MAC
|HIFN_BASE_CMD_CRYPT
)) == 0) {
1599 return (buf_pos
- buf
);
1603 hifn_dmamap_aligned(struct hifn_operand
*op
)
1605 struct hifn_softc
*sc
= NULL
;
1608 DPRINTF("%s()\n", __FUNCTION__
);
1610 for (i
= 0; i
< op
->nsegs
; i
++) {
1611 if (op
->segs
[i
].ds_addr
& 3)
1613 if ((i
!= (op
->nsegs
- 1)) && (op
->segs
[i
].ds_len
& 3))
1620 hifn_dmamap_dstwrap(struct hifn_softc
*sc
, int idx
)
1622 struct hifn_dma
*dma
= sc
->sc_dma
;
1624 if (++idx
== HIFN_D_DST_RSIZE
) {
1625 dma
->dstr
[idx
].l
= htole32(HIFN_D_VALID
| HIFN_D_JUMP
|
1626 HIFN_D_MASKDONEIRQ
);
1627 HIFN_DSTR_SYNC(sc
, idx
,
1628 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1635 hifn_dmamap_load_dst(struct hifn_softc
*sc
, struct hifn_command
*cmd
)
1637 struct hifn_dma
*dma
= sc
->sc_dma
;
1638 struct hifn_operand
*dst
= &cmd
->dst
;
1640 int idx
, used
= 0, i
;
1642 DPRINTF("%s()\n", __FUNCTION__
);
1645 for (i
= 0; i
< dst
->nsegs
- 1; i
++) {
1646 dma
->dstr
[idx
].p
= htole32(dst
->segs
[i
].ds_addr
);
1647 dma
->dstr
[idx
].l
= htole32(HIFN_D_MASKDONEIRQ
| dst
->segs
[i
].ds_len
);
1649 dma
->dstr
[idx
].l
|= htole32(HIFN_D_VALID
);
1650 HIFN_DSTR_SYNC(sc
, idx
,
1651 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1654 idx
= hifn_dmamap_dstwrap(sc
, idx
);
1657 if (cmd
->sloplen
== 0) {
1658 p
= dst
->segs
[i
].ds_addr
;
1659 l
= HIFN_D_MASKDONEIRQ
| HIFN_D_LAST
|
1660 dst
->segs
[i
].ds_len
;
1662 p
= sc
->sc_dma_physaddr
+
1663 offsetof(struct hifn_dma
, slop
[cmd
->slopidx
]);
1664 l
= HIFN_D_MASKDONEIRQ
| HIFN_D_LAST
|
1667 if ((dst
->segs
[i
].ds_len
- cmd
->sloplen
) != 0) {
1668 dma
->dstr
[idx
].p
= htole32(dst
->segs
[i
].ds_addr
);
1669 dma
->dstr
[idx
].l
= htole32(HIFN_D_MASKDONEIRQ
|
1670 (dst
->segs
[i
].ds_len
- cmd
->sloplen
));
1672 dma
->dstr
[idx
].l
|= htole32(HIFN_D_VALID
);
1673 HIFN_DSTR_SYNC(sc
, idx
,
1674 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1677 idx
= hifn_dmamap_dstwrap(sc
, idx
);
1680 dma
->dstr
[idx
].p
= htole32(p
);
1681 dma
->dstr
[idx
].l
= htole32(l
);
1683 dma
->dstr
[idx
].l
|= htole32(HIFN_D_VALID
);
1684 HIFN_DSTR_SYNC(sc
, idx
, BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1687 idx
= hifn_dmamap_dstwrap(sc
, idx
);
1695 hifn_dmamap_srcwrap(struct hifn_softc
*sc
, int idx
)
1697 struct hifn_dma
*dma
= sc
->sc_dma
;
1699 if (++idx
== HIFN_D_SRC_RSIZE
) {
1700 dma
->srcr
[idx
].l
= htole32(HIFN_D_VALID
|
1701 HIFN_D_JUMP
| HIFN_D_MASKDONEIRQ
);
1702 HIFN_SRCR_SYNC(sc
, HIFN_D_SRC_RSIZE
,
1703 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1710 hifn_dmamap_load_src(struct hifn_softc
*sc
, struct hifn_command
*cmd
)
1712 struct hifn_dma
*dma
= sc
->sc_dma
;
1713 struct hifn_operand
*src
= &cmd
->src
;
1717 DPRINTF("%s()\n", __FUNCTION__
);
1720 for (i
= 0; i
< src
->nsegs
; i
++) {
1721 if (i
== src
->nsegs
- 1)
1724 dma
->srcr
[idx
].p
= htole32(src
->segs
[i
].ds_addr
);
1725 dma
->srcr
[idx
].l
= htole32(src
->segs
[i
].ds_len
|
1726 HIFN_D_MASKDONEIRQ
| last
);
1728 dma
->srcr
[idx
].l
|= htole32(HIFN_D_VALID
);
1729 HIFN_SRCR_SYNC(sc
, idx
,
1730 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1732 idx
= hifn_dmamap_srcwrap(sc
, idx
);
1735 dma
->srcu
+= src
->nsegs
;
1742 struct hifn_softc
*sc
,
1743 struct hifn_command
*cmd
,
1744 struct cryptop
*crp
,
1747 struct hifn_dma
*dma
= sc
->sc_dma
;
1748 u_int32_t cmdlen
, csr
;
1749 int cmdi
, resi
, err
= 0;
1750 unsigned long l_flags
;
1752 DPRINTF("%s()\n", __FUNCTION__
);
1755 * need 1 cmd, and 1 res
1757 * NB: check this first since it's easy.
1760 if ((dma
->cmdu
+ 1) > HIFN_D_CMD_RSIZE
||
1761 (dma
->resu
+ 1) > HIFN_D_RES_RSIZE
) {
1764 device_printf(sc
->sc_dev
,
1765 "cmd/result exhaustion, cmdu %u resu %u\n",
1766 dma
->cmdu
, dma
->resu
);
1769 hifnstats
.hst_nomem_cr
++;
1770 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
1775 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
1776 if (pci_map_skb(sc
, &cmd
->src
, cmd
->src_skb
)) {
1777 hifnstats
.hst_nomem_load
++;
1781 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1782 if (pci_map_uio(sc
, &cmd
->src
, cmd
->src_io
)) {
1783 hifnstats
.hst_nomem_load
++;
1788 if (pci_map_buf(sc
, &cmd
->src
, cmd
->src_buf
, crp
->crp_ilen
)) {
1789 hifnstats
.hst_nomem_load
++;
1795 if (hifn_dmamap_aligned(&cmd
->src
)) {
1796 cmd
->sloplen
= cmd
->src_mapsize
& 3;
1797 cmd
->dst
= cmd
->src
;
1799 if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1800 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
1803 } else if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
1806 struct mbuf
*m
, *m0
, *mlast
;
1808 KASSERT(cmd
->dst_m
== cmd
->src_m
,
1809 ("hifn_crypto: dst_m initialized improperly"));
1810 hifnstats
.hst_unaligned
++;
1812 * Source is not aligned on a longword boundary.
1813 * Copy the data to insure alignment. If we fail
1814 * to allocate mbufs or clusters while doing this
1815 * we return ERESTART so the operation is requeued
1816 * at the crypto later, but only if there are
1817 * ops already posted to the hardware; otherwise we
1818 * have no guarantee that we'll be re-entered.
1820 totlen
= cmd
->src_mapsize
;
1821 if (cmd
->src_m
->m_flags
& M_PKTHDR
) {
1823 MGETHDR(m0
, M_DONTWAIT
, MT_DATA
);
1824 if (m0
&& !m_dup_pkthdr(m0
, cmd
->src_m
, M_DONTWAIT
)) {
1830 MGET(m0
, M_DONTWAIT
, MT_DATA
);
1833 hifnstats
.hst_nomem_mbuf
++;
1834 err
= dma
->cmdu
? ERESTART
: ENOMEM
;
1837 if (totlen
>= MINCLSIZE
) {
1838 MCLGET(m0
, M_DONTWAIT
);
1839 if ((m0
->m_flags
& M_EXT
) == 0) {
1840 hifnstats
.hst_nomem_mcl
++;
1841 err
= dma
->cmdu
? ERESTART
: ENOMEM
;
1848 m0
->m_pkthdr
.len
= m0
->m_len
= len
;
1851 while (totlen
> 0) {
1852 MGET(m
, M_DONTWAIT
, MT_DATA
);
1854 hifnstats
.hst_nomem_mbuf
++;
1855 err
= dma
->cmdu
? ERESTART
: ENOMEM
;
1860 if (totlen
>= MINCLSIZE
) {
1861 MCLGET(m
, M_DONTWAIT
);
1862 if ((m
->m_flags
& M_EXT
) == 0) {
1863 hifnstats
.hst_nomem_mcl
++;
1864 err
= dma
->cmdu
? ERESTART
: ENOMEM
;
1873 m0
->m_pkthdr
.len
+= len
;
1881 device_printf(sc
->sc_dev
,
1882 "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
1883 __FILE__
, __LINE__
);
1888 device_printf(sc
->sc_dev
,
1889 "%s,%d: unaligned contig buffers not implemented\n",
1890 __FILE__
, __LINE__
);
1896 if (cmd
->dst_map
== NULL
) {
1897 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
1898 if (pci_map_skb(sc
, &cmd
->dst
, cmd
->dst_skb
)) {
1899 hifnstats
.hst_nomem_map
++;
1903 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1904 if (pci_map_uio(sc
, &cmd
->dst
, cmd
->dst_io
)) {
1905 hifnstats
.hst_nomem_load
++;
1910 if (pci_map_buf(sc
, &cmd
->dst
, cmd
->dst_buf
, crp
->crp_ilen
)) {
1911 hifnstats
.hst_nomem_load
++;
1920 device_printf(sc
->sc_dev
,
1921 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1922 READ_REG_1(sc
, HIFN_1_DMA_CSR
),
1923 READ_REG_1(sc
, HIFN_1_DMA_IER
),
1924 dma
->cmdu
, dma
->srcu
, dma
->dstu
, dma
->resu
,
1925 cmd
->src_nsegs
, cmd
->dst_nsegs
);
1930 if (cmd
->src_map
== cmd
->dst_map
) {
1931 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
1932 BUS_DMASYNC_PREWRITE
|BUS_DMASYNC_PREREAD
);
1934 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
1935 BUS_DMASYNC_PREWRITE
);
1936 bus_dmamap_sync(sc
->sc_dmat
, cmd
->dst_map
,
1937 BUS_DMASYNC_PREREAD
);
1942 * need N src, and N dst
1944 if ((dma
->srcu
+ cmd
->src_nsegs
) > HIFN_D_SRC_RSIZE
||
1945 (dma
->dstu
+ cmd
->dst_nsegs
+ 1) > HIFN_D_DST_RSIZE
) {
1948 device_printf(sc
->sc_dev
,
1949 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1950 dma
->srcu
, cmd
->src_nsegs
,
1951 dma
->dstu
, cmd
->dst_nsegs
);
1954 hifnstats
.hst_nomem_sd
++;
1959 if (dma
->cmdi
== HIFN_D_CMD_RSIZE
) {
1961 dma
->cmdr
[HIFN_D_CMD_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
1963 dma
->cmdr
[HIFN_D_CMD_RSIZE
].l
|= htole32(HIFN_D_VALID
);
1964 HIFN_CMDR_SYNC(sc
, HIFN_D_CMD_RSIZE
,
1965 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1968 cmdlen
= hifn_write_command(cmd
, dma
->command_bufs
[cmdi
]);
1969 HIFN_CMD_SYNC(sc
, cmdi
, BUS_DMASYNC_PREWRITE
);
1971 /* .p for command/result already set */
1972 dma
->cmdr
[cmdi
].l
= htole32(cmdlen
| HIFN_D_LAST
|
1973 HIFN_D_MASKDONEIRQ
);
1975 dma
->cmdr
[cmdi
].l
|= htole32(HIFN_D_VALID
);
1976 HIFN_CMDR_SYNC(sc
, cmdi
,
1977 BUS_DMASYNC_PREWRITE
| BUS_DMASYNC_PREREAD
);
1981 * We don't worry about missing an interrupt (which a "command wait"
1982 * interrupt salvages us from), unless there is more than one command
1985 if (dma
->cmdu
> 1) {
1986 sc
->sc_dmaier
|= HIFN_DMAIER_C_WAIT
;
1987 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, sc
->sc_dmaier
);
1990 hifnstats
.hst_ipackets
++;
1991 hifnstats
.hst_ibytes
+= cmd
->src_mapsize
;
1993 hifn_dmamap_load_src(sc
, cmd
);
1996 * Unlike other descriptors, we don't mask done interrupt from
1997 * result descriptor.
2001 device_printf(sc
->sc_dev
, "load res\n");
2003 if (dma
->resi
== HIFN_D_RES_RSIZE
) {
2005 dma
->resr
[HIFN_D_RES_RSIZE
].l
= htole32(HIFN_D_JUMP
|HIFN_D_MASKDONEIRQ
);
2007 dma
->resr
[HIFN_D_RES_RSIZE
].l
|= htole32(HIFN_D_VALID
);
2008 HIFN_RESR_SYNC(sc
, HIFN_D_RES_RSIZE
,
2009 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2012 KASSERT(dma
->hifn_commands
[resi
] == NULL
,
2013 ("hifn_crypto: command slot %u busy", resi
));
2014 dma
->hifn_commands
[resi
] = cmd
;
2015 HIFN_RES_SYNC(sc
, resi
, BUS_DMASYNC_PREREAD
);
2016 if ((hint
& CRYPTO_HINT_MORE
) && sc
->sc_curbatch
< hifn_maxbatch
) {
2017 dma
->resr
[resi
].l
= htole32(HIFN_MAX_RESULT
|
2018 HIFN_D_LAST
| HIFN_D_MASKDONEIRQ
);
2020 dma
->resr
[resi
].l
|= htole32(HIFN_D_VALID
);
2022 if (sc
->sc_curbatch
> hifnstats
.hst_maxbatch
)
2023 hifnstats
.hst_maxbatch
= sc
->sc_curbatch
;
2024 hifnstats
.hst_totbatch
++;
2026 dma
->resr
[resi
].l
= htole32(HIFN_MAX_RESULT
| HIFN_D_LAST
);
2028 dma
->resr
[resi
].l
|= htole32(HIFN_D_VALID
);
2029 sc
->sc_curbatch
= 0;
2031 HIFN_RESR_SYNC(sc
, resi
,
2032 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2036 cmd
->slopidx
= resi
;
2038 hifn_dmamap_load_dst(sc
, cmd
);
2041 if (sc
->sc_c_busy
== 0) {
2042 csr
|= HIFN_DMACSR_C_CTRL_ENA
;
2045 if (sc
->sc_s_busy
== 0) {
2046 csr
|= HIFN_DMACSR_S_CTRL_ENA
;
2049 if (sc
->sc_r_busy
== 0) {
2050 csr
|= HIFN_DMACSR_R_CTRL_ENA
;
2053 if (sc
->sc_d_busy
== 0) {
2054 csr
|= HIFN_DMACSR_D_CTRL_ENA
;
2058 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
, csr
);
2062 device_printf(sc
->sc_dev
, "command: stat %8x ier %8x\n",
2063 READ_REG_1(sc
, HIFN_1_DMA_CSR
),
2064 READ_REG_1(sc
, HIFN_1_DMA_IER
));
2070 KASSERT(err
== 0, ("hifn_crypto: success with error %u", err
));
2071 return (err
); /* success */
2074 if (cmd
->src_map
!= cmd
->dst_map
)
2075 pci_unmap_buf(sc
, &cmd
->dst
);
2078 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
2079 if (cmd
->src_skb
!= cmd
->dst_skb
)
2081 m_freem(cmd
->dst_m
);
2083 device_printf(sc
->sc_dev
,
2084 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2085 __FILE__
, __LINE__
);
2088 pci_unmap_buf(sc
, &cmd
->src
);
2095 hifn_tick(unsigned long arg
)
2097 struct hifn_softc
*sc
;
2098 unsigned long l_flags
;
2100 if (arg
>= HIFN_MAX_CHIPS
)
2102 sc
= hifn_chip_idx
[arg
];
2107 if (sc
->sc_active
== 0) {
2108 struct hifn_dma
*dma
= sc
->sc_dma
;
2111 if (dma
->cmdu
== 0 && sc
->sc_c_busy
) {
2113 r
|= HIFN_DMACSR_C_CTRL_DIS
;
2115 if (dma
->srcu
== 0 && sc
->sc_s_busy
) {
2117 r
|= HIFN_DMACSR_S_CTRL_DIS
;
2119 if (dma
->dstu
== 0 && sc
->sc_d_busy
) {
2121 r
|= HIFN_DMACSR_D_CTRL_DIS
;
2123 if (dma
->resu
== 0 && sc
->sc_r_busy
) {
2125 r
|= HIFN_DMACSR_R_CTRL_DIS
;
2128 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
, r
);
2132 mod_timer(&sc
->sc_tickto
, jiffies
+ HZ
);
2136 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
2137 hifn_intr(int irq
, void *arg
)
2139 hifn_intr(int irq
, void *arg
, struct pt_regs
*regs
)
2142 struct hifn_softc
*sc
= arg
;
2143 struct hifn_dma
*dma
;
2144 u_int32_t dmacsr
, restart
;
2146 unsigned long l_flags
;
2148 dmacsr
= READ_REG_1(sc
, HIFN_1_DMA_CSR
);
2150 /* Nothing in the DMA unit interrupted */
2151 if ((dmacsr
& sc
->sc_dmaier
) == 0)
2160 device_printf(sc
->sc_dev
,
2161 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2162 dmacsr
, READ_REG_1(sc
, HIFN_1_DMA_IER
), sc
->sc_dmaier
,
2163 dma
->cmdi
, dma
->srci
, dma
->dsti
, dma
->resi
,
2164 dma
->cmdk
, dma
->srck
, dma
->dstk
, dma
->resk
,
2165 dma
->cmdu
, dma
->srcu
, dma
->dstu
, dma
->resu
);
2169 WRITE_REG_1(sc
, HIFN_1_DMA_CSR
, dmacsr
& sc
->sc_dmaier
);
2171 if ((sc
->sc_flags
& HIFN_HAS_PUBLIC
) &&
2172 (dmacsr
& HIFN_DMACSR_PUBDONE
))
2173 WRITE_REG_1(sc
, HIFN_1_PUB_STATUS
,
2174 READ_REG_1(sc
, HIFN_1_PUB_STATUS
) | HIFN_PUBSTS_DONE
);
2176 restart
= dmacsr
& (HIFN_DMACSR_D_OVER
| HIFN_DMACSR_R_OVER
);
2178 device_printf(sc
->sc_dev
, "overrun %x\n", dmacsr
);
2180 if (sc
->sc_flags
& HIFN_IS_7811
) {
2181 if (dmacsr
& HIFN_DMACSR_ILLR
)
2182 device_printf(sc
->sc_dev
, "illegal read\n");
2183 if (dmacsr
& HIFN_DMACSR_ILLW
)
2184 device_printf(sc
->sc_dev
, "illegal write\n");
2187 restart
= dmacsr
& (HIFN_DMACSR_C_ABORT
| HIFN_DMACSR_S_ABORT
|
2188 HIFN_DMACSR_D_ABORT
| HIFN_DMACSR_R_ABORT
);
2190 device_printf(sc
->sc_dev
, "abort, resetting.\n");
2191 hifnstats
.hst_abort
++;
2197 if ((dmacsr
& HIFN_DMACSR_C_WAIT
) && (dma
->cmdu
== 0)) {
2199 * If no slots to process and we receive a "waiting on
2200 * command" interrupt, we disable the "waiting on command"
2203 sc
->sc_dmaier
&= ~HIFN_DMAIER_C_WAIT
;
2204 WRITE_REG_1(sc
, HIFN_1_DMA_IER
, sc
->sc_dmaier
);
2207 /* clear the rings */
2208 i
= dma
->resk
; u
= dma
->resu
;
2210 HIFN_RESR_SYNC(sc
, i
,
2211 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
2212 if (dma
->resr
[i
].l
& htole32(HIFN_D_VALID
)) {
2213 HIFN_RESR_SYNC(sc
, i
,
2214 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2218 if (i
!= HIFN_D_RES_RSIZE
) {
2219 struct hifn_command
*cmd
;
2220 u_int8_t
*macbuf
= NULL
;
2222 HIFN_RES_SYNC(sc
, i
, BUS_DMASYNC_POSTREAD
);
2223 cmd
= dma
->hifn_commands
[i
];
2224 KASSERT(cmd
!= NULL
,
2225 ("hifn_intr: null command slot %u", i
));
2226 dma
->hifn_commands
[i
] = NULL
;
2228 if (cmd
->base_masks
& HIFN_BASE_CMD_MAC
) {
2229 macbuf
= dma
->result_bufs
[i
];
2233 hifn_callback(sc
, cmd
, macbuf
);
2234 hifnstats
.hst_opackets
++;
2238 if (++i
== (HIFN_D_RES_RSIZE
+ 1))
2241 dma
->resk
= i
; dma
->resu
= u
;
2243 i
= dma
->srck
; u
= dma
->srcu
;
2245 if (i
== HIFN_D_SRC_RSIZE
)
2247 HIFN_SRCR_SYNC(sc
, i
,
2248 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
2249 if (dma
->srcr
[i
].l
& htole32(HIFN_D_VALID
)) {
2250 HIFN_SRCR_SYNC(sc
, i
,
2251 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2256 dma
->srck
= i
; dma
->srcu
= u
;
2258 i
= dma
->cmdk
; u
= dma
->cmdu
;
2260 HIFN_CMDR_SYNC(sc
, i
,
2261 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
2262 if (dma
->cmdr
[i
].l
& htole32(HIFN_D_VALID
)) {
2263 HIFN_CMDR_SYNC(sc
, i
,
2264 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2267 if (i
!= HIFN_D_CMD_RSIZE
) {
2269 HIFN_CMD_SYNC(sc
, i
, BUS_DMASYNC_POSTWRITE
);
2271 if (++i
== (HIFN_D_CMD_RSIZE
+ 1))
2274 dma
->cmdk
= i
; dma
->cmdu
= u
;
2278 if (sc
->sc_needwakeup
) { /* XXX check high watermark */
2279 int wakeup
= sc
->sc_needwakeup
& (CRYPTO_SYMQ
|CRYPTO_ASYMQ
);
2282 device_printf(sc
->sc_dev
,
2283 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2285 dma
->cmdu
, dma
->srcu
, dma
->dstu
, dma
->resu
);
2287 sc
->sc_needwakeup
&= ~wakeup
;
2288 crypto_unblock(sc
->sc_cid
, wakeup
);
2295 * Allocate a new 'session' and return an encoded session id. 'sidp'
2296 * contains our registration id, and should contain an encoded session
2297 * id on successful allocation.
2300 hifn_newsession(device_t dev
, u_int32_t
*sidp
, struct cryptoini
*cri
)
2302 struct hifn_softc
*sc
= device_get_softc(dev
);
2303 struct cryptoini
*c
;
2304 int mac
= 0, cry
= 0, sesn
;
2305 struct hifn_session
*ses
= NULL
;
2306 unsigned long l_flags
;
2308 DPRINTF("%s()\n", __FUNCTION__
);
2310 KASSERT(sc
!= NULL
, ("hifn_newsession: null softc"));
2311 if (sidp
== NULL
|| cri
== NULL
|| sc
== NULL
) {
2312 DPRINTF("%s,%d: %s - EINVAL\n", __FILE__
, __LINE__
, __FUNCTION__
);
2317 if (sc
->sc_sessions
== NULL
) {
2318 ses
= sc
->sc_sessions
= (struct hifn_session
*)kmalloc(sizeof(*ses
),
2325 sc
->sc_nsessions
= 1;
2327 for (sesn
= 0; sesn
< sc
->sc_nsessions
; sesn
++) {
2328 if (!sc
->sc_sessions
[sesn
].hs_used
) {
2329 ses
= &sc
->sc_sessions
[sesn
];
2335 sesn
= sc
->sc_nsessions
;
2336 ses
= (struct hifn_session
*)kmalloc((sesn
+ 1) * sizeof(*ses
),
2342 bcopy(sc
->sc_sessions
, ses
, sesn
* sizeof(*ses
));
2343 bzero(sc
->sc_sessions
, sesn
* sizeof(*ses
));
2344 kfree(sc
->sc_sessions
);
2345 sc
->sc_sessions
= ses
;
2346 ses
= &sc
->sc_sessions
[sesn
];
2352 bzero(ses
, sizeof(*ses
));
2355 for (c
= cri
; c
!= NULL
; c
= c
->cri_next
) {
2356 switch (c
->cri_alg
) {
2359 case CRYPTO_MD5_HMAC
:
2360 case CRYPTO_SHA1_HMAC
:
2362 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2366 ses
->hs_mlen
= c
->cri_mlen
;
2367 if (ses
->hs_mlen
== 0) {
2368 switch (c
->cri_alg
) {
2370 case CRYPTO_MD5_HMAC
:
2374 case CRYPTO_SHA1_HMAC
:
2380 case CRYPTO_DES_CBC
:
2381 case CRYPTO_3DES_CBC
:
2382 case CRYPTO_AES_CBC
:
2383 /* XXX this may read fewer, does it matter? */
2384 read_random(ses
->hs_iv
,
2385 c
->cri_alg
== CRYPTO_AES_CBC
?
2386 HIFN_AES_IV_LENGTH
: HIFN_IV_LENGTH
);
2390 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2396 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2400 if (mac
== 0 && cry
== 0) {
2401 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2405 *sidp
= HIFN_SID(device_get_unit(sc
->sc_dev
), sesn
);
2411 * Deallocate a session.
2412 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2413 * XXX to blow away any keys already stored there.
2416 hifn_freesession(device_t dev
, u_int64_t tid
)
2418 struct hifn_softc
*sc
= device_get_softc(dev
);
2420 u_int32_t sid
= CRYPTO_SESID2LID(tid
);
2421 unsigned long l_flags
;
2423 DPRINTF("%s()\n", __FUNCTION__
);
2425 KASSERT(sc
!= NULL
, ("hifn_freesession: null softc"));
2427 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2432 session
= HIFN_SESSION(sid
);
2433 if (session
< sc
->sc_nsessions
) {
2434 bzero(&sc
->sc_sessions
[session
], sizeof(struct hifn_session
));
2437 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2446 hifn_process(device_t dev
, struct cryptop
*crp
, int hint
)
2448 struct hifn_softc
*sc
= device_get_softc(dev
);
2449 struct hifn_command
*cmd
= NULL
;
2450 int session
, err
, ivlen
;
2451 struct cryptodesc
*crd1
, *crd2
, *maccrd
, *enccrd
;
2453 DPRINTF("%s()\n", __FUNCTION__
);
2455 if (crp
== NULL
|| crp
->crp_callback
== NULL
) {
2456 hifnstats
.hst_invalid
++;
2457 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2460 session
= HIFN_SESSION(crp
->crp_sid
);
2462 if (sc
== NULL
|| session
>= sc
->sc_nsessions
) {
2463 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2468 cmd
= kmalloc(sizeof(struct hifn_command
), SLAB_ATOMIC
);
2470 hifnstats
.hst_nomem
++;
2474 memset(cmd
, 0, sizeof(*cmd
));
2476 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
2477 cmd
->src_skb
= (struct sk_buff
*)crp
->crp_buf
;
2478 cmd
->dst_skb
= (struct sk_buff
*)crp
->crp_buf
;
2479 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
2480 cmd
->src_io
= (struct uio
*)crp
->crp_buf
;
2481 cmd
->dst_io
= (struct uio
*)crp
->crp_buf
;
2483 cmd
->src_buf
= crp
->crp_buf
;
2484 cmd
->dst_buf
= crp
->crp_buf
;
2487 crd1
= crp
->crp_desc
;
2489 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2493 crd2
= crd1
->crd_next
;
2496 if (crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
2497 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
||
2498 crd1
->crd_alg
== CRYPTO_SHA1
||
2499 crd1
->crd_alg
== CRYPTO_MD5
) {
2502 } else if (crd1
->crd_alg
== CRYPTO_DES_CBC
||
2503 crd1
->crd_alg
== CRYPTO_3DES_CBC
||
2504 crd1
->crd_alg
== CRYPTO_AES_CBC
||
2505 crd1
->crd_alg
== CRYPTO_ARC4
) {
2506 if ((crd1
->crd_flags
& CRD_F_ENCRYPT
) == 0)
2507 cmd
->base_masks
|= HIFN_BASE_CMD_DECODE
;
2511 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2516 if ((crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
2517 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
||
2518 crd1
->crd_alg
== CRYPTO_MD5
||
2519 crd1
->crd_alg
== CRYPTO_SHA1
) &&
2520 (crd2
->crd_alg
== CRYPTO_DES_CBC
||
2521 crd2
->crd_alg
== CRYPTO_3DES_CBC
||
2522 crd2
->crd_alg
== CRYPTO_AES_CBC
||
2523 crd2
->crd_alg
== CRYPTO_ARC4
) &&
2524 ((crd2
->crd_flags
& CRD_F_ENCRYPT
) == 0)) {
2525 cmd
->base_masks
= HIFN_BASE_CMD_DECODE
;
2528 } else if ((crd1
->crd_alg
== CRYPTO_DES_CBC
||
2529 crd1
->crd_alg
== CRYPTO_ARC4
||
2530 crd1
->crd_alg
== CRYPTO_3DES_CBC
||
2531 crd1
->crd_alg
== CRYPTO_AES_CBC
) &&
2532 (crd2
->crd_alg
== CRYPTO_MD5_HMAC
||
2533 crd2
->crd_alg
== CRYPTO_SHA1_HMAC
||
2534 crd2
->crd_alg
== CRYPTO_MD5
||
2535 crd2
->crd_alg
== CRYPTO_SHA1
) &&
2536 (crd1
->crd_flags
& CRD_F_ENCRYPT
)) {
2541 * We cannot order the 7751 as requested
2543 DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
, crd1
->crd_alg
, crd2
->crd_alg
, crd1
->crd_flags
& CRD_F_ENCRYPT
);
2550 cmd
->enccrd
= enccrd
;
2551 cmd
->base_masks
|= HIFN_BASE_CMD_CRYPT
;
2552 switch (enccrd
->crd_alg
) {
2554 cmd
->cry_masks
|= HIFN_CRYPT_CMD_ALG_RC4
;
2556 case CRYPTO_DES_CBC
:
2557 cmd
->cry_masks
|= HIFN_CRYPT_CMD_ALG_DES
|
2558 HIFN_CRYPT_CMD_MODE_CBC
|
2559 HIFN_CRYPT_CMD_NEW_IV
;
2561 case CRYPTO_3DES_CBC
:
2562 cmd
->cry_masks
|= HIFN_CRYPT_CMD_ALG_3DES
|
2563 HIFN_CRYPT_CMD_MODE_CBC
|
2564 HIFN_CRYPT_CMD_NEW_IV
;
2566 case CRYPTO_AES_CBC
:
2567 cmd
->cry_masks
|= HIFN_CRYPT_CMD_ALG_AES
|
2568 HIFN_CRYPT_CMD_MODE_CBC
|
2569 HIFN_CRYPT_CMD_NEW_IV
;
2572 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2576 if (enccrd
->crd_alg
!= CRYPTO_ARC4
) {
2577 ivlen
= ((enccrd
->crd_alg
== CRYPTO_AES_CBC
) ?
2578 HIFN_AES_IV_LENGTH
: HIFN_IV_LENGTH
);
2579 if (enccrd
->crd_flags
& CRD_F_ENCRYPT
) {
2580 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
2581 bcopy(enccrd
->crd_iv
, cmd
->iv
, ivlen
);
2583 bcopy(sc
->sc_sessions
[session
].hs_iv
,
2586 if ((enccrd
->crd_flags
& CRD_F_IV_PRESENT
)
2588 crypto_copyback(crp
->crp_flags
,
2589 crp
->crp_buf
, enccrd
->crd_inject
,
2593 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
2594 bcopy(enccrd
->crd_iv
, cmd
->iv
, ivlen
);
2596 crypto_copydata(crp
->crp_flags
,
2597 crp
->crp_buf
, enccrd
->crd_inject
,
2603 if (enccrd
->crd_flags
& CRD_F_KEY_EXPLICIT
)
2604 cmd
->cry_masks
|= HIFN_CRYPT_CMD_NEW_KEY
;
2605 cmd
->ck
= enccrd
->crd_key
;
2606 cmd
->cklen
= enccrd
->crd_klen
>> 3;
2607 cmd
->cry_masks
|= HIFN_CRYPT_CMD_NEW_KEY
;
2610 * Need to specify the size for the AES key in the masks.
2612 if ((cmd
->cry_masks
& HIFN_CRYPT_CMD_ALG_MASK
) ==
2613 HIFN_CRYPT_CMD_ALG_AES
) {
2614 switch (cmd
->cklen
) {
2616 cmd
->cry_masks
|= HIFN_CRYPT_CMD_KSZ_128
;
2619 cmd
->cry_masks
|= HIFN_CRYPT_CMD_KSZ_192
;
2622 cmd
->cry_masks
|= HIFN_CRYPT_CMD_KSZ_256
;
2625 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__
,__LINE__
,__FUNCTION__
);
2633 cmd
->maccrd
= maccrd
;
2634 cmd
->base_masks
|= HIFN_BASE_CMD_MAC
;
2636 switch (maccrd
->crd_alg
) {
2638 cmd
->mac_masks
|= HIFN_MAC_CMD_ALG_MD5
|
2639 HIFN_MAC_CMD_RESULT
| HIFN_MAC_CMD_MODE_HASH
|
2640 HIFN_MAC_CMD_POS_IPSEC
;
2642 case CRYPTO_MD5_HMAC
:
2643 cmd
->mac_masks
|= HIFN_MAC_CMD_ALG_MD5
|
2644 HIFN_MAC_CMD_RESULT
| HIFN_MAC_CMD_MODE_HMAC
|
2645 HIFN_MAC_CMD_POS_IPSEC
| HIFN_MAC_CMD_TRUNC
;
2648 cmd
->mac_masks
|= HIFN_MAC_CMD_ALG_SHA1
|
2649 HIFN_MAC_CMD_RESULT
| HIFN_MAC_CMD_MODE_HASH
|
2650 HIFN_MAC_CMD_POS_IPSEC
;
2652 case CRYPTO_SHA1_HMAC
:
2653 cmd
->mac_masks
|= HIFN_MAC_CMD_ALG_SHA1
|
2654 HIFN_MAC_CMD_RESULT
| HIFN_MAC_CMD_MODE_HMAC
|
2655 HIFN_MAC_CMD_POS_IPSEC
| HIFN_MAC_CMD_TRUNC
;
2659 if (maccrd
->crd_alg
== CRYPTO_SHA1_HMAC
||
2660 maccrd
->crd_alg
== CRYPTO_MD5_HMAC
) {
2661 cmd
->mac_masks
|= HIFN_MAC_CMD_NEW_KEY
;
2662 bcopy(maccrd
->crd_key
, cmd
->mac
, maccrd
->crd_klen
>> 3);
2663 bzero(cmd
->mac
+ (maccrd
->crd_klen
>> 3),
2664 HIFN_MAC_KEY_LENGTH
- (maccrd
->crd_klen
>> 3));
2669 cmd
->session_num
= session
;
2672 err
= hifn_crypto(sc
, cmd
, crp
, hint
);
2675 } else if (err
== ERESTART
) {
2677 * There weren't enough resources to dispatch the request
2678 * to the part. Notify the caller so they'll requeue this
2679 * request and resubmit it again soon.
2683 device_printf(sc
->sc_dev
, "requeue request\n");
2686 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
2694 hifnstats
.hst_invalid
++;
2696 hifnstats
.hst_nomem
++;
2697 crp
->crp_etype
= err
;
2703 hifn_abort(struct hifn_softc
*sc
)
2705 struct hifn_dma
*dma
= sc
->sc_dma
;
2706 struct hifn_command
*cmd
;
2707 struct cryptop
*crp
;
2710 DPRINTF("%s()\n", __FUNCTION__
);
2712 i
= dma
->resk
; u
= dma
->resu
;
2714 cmd
= dma
->hifn_commands
[i
];
2715 KASSERT(cmd
!= NULL
, ("hifn_abort: null command slot %u", i
));
2716 dma
->hifn_commands
[i
] = NULL
;
2719 if ((dma
->resr
[i
].l
& htole32(HIFN_D_VALID
)) == 0) {
2720 /* Salvage what we can. */
2723 if (cmd
->base_masks
& HIFN_BASE_CMD_MAC
) {
2724 macbuf
= dma
->result_bufs
[i
];
2728 hifnstats
.hst_opackets
++;
2729 hifn_callback(sc
, cmd
, macbuf
);
2732 if (cmd
->src_map
== cmd
->dst_map
) {
2733 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
2734 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
2736 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
2737 BUS_DMASYNC_POSTWRITE
);
2738 bus_dmamap_sync(sc
->sc_dmat
, cmd
->dst_map
,
2739 BUS_DMASYNC_POSTREAD
);
2743 if (cmd
->src_skb
!= cmd
->dst_skb
) {
2745 m_freem(cmd
->src_m
);
2746 crp
->crp_buf
= (caddr_t
)cmd
->dst_m
;
2748 device_printf(sc
->sc_dev
,
2749 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2750 __FILE__
, __LINE__
);
2754 /* non-shared buffers cannot be restarted */
2755 if (cmd
->src_map
!= cmd
->dst_map
) {
2757 * XXX should be EAGAIN, delayed until
2760 crp
->crp_etype
= ENOMEM
;
2761 pci_unmap_buf(sc
, &cmd
->dst
);
2763 crp
->crp_etype
= ENOMEM
;
2765 pci_unmap_buf(sc
, &cmd
->src
);
2768 if (crp
->crp_etype
!= EAGAIN
)
2772 if (++i
== HIFN_D_RES_RSIZE
)
2776 dma
->resk
= i
; dma
->resu
= u
;
2778 hifn_reset_board(sc
, 1);
2780 hifn_init_pci_registers(sc
);
2784 hifn_callback(struct hifn_softc
*sc
, struct hifn_command
*cmd
, u_int8_t
*macbuf
)
2786 struct hifn_dma
*dma
= sc
->sc_dma
;
2787 struct cryptop
*crp
= cmd
->crp
;
2788 struct cryptodesc
*crd
;
2791 DPRINTF("%s()\n", __FUNCTION__
);
2794 if (cmd
->src_map
== cmd
->dst_map
) {
2795 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
2796 BUS_DMASYNC_POSTWRITE
| BUS_DMASYNC_POSTREAD
);
2798 bus_dmamap_sync(sc
->sc_dmat
, cmd
->src_map
,
2799 BUS_DMASYNC_POSTWRITE
);
2800 bus_dmamap_sync(sc
->sc_dmat
, cmd
->dst_map
,
2801 BUS_DMASYNC_POSTREAD
);
2805 if (crp
->crp_flags
& CRYPTO_F_SKBUF
) {
2806 if (cmd
->src_skb
!= cmd
->dst_skb
) {
2808 crp
->crp_buf
= (caddr_t
)cmd
->dst_m
;
2809 totlen
= cmd
->src_mapsize
;
2810 for (m
= cmd
->dst_m
; m
!= NULL
; m
= m
->m_next
) {
2811 if (totlen
< m
->m_len
) {
2817 cmd
->dst_m
->m_pkthdr
.len
= cmd
->src_m
->m_pkthdr
.len
;
2818 m_freem(cmd
->src_m
);
2820 device_printf(sc
->sc_dev
,
2821 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2822 __FILE__
, __LINE__
);
2827 if (cmd
->sloplen
!= 0) {
2828 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
2829 cmd
->src_mapsize
- cmd
->sloplen
, cmd
->sloplen
,
2830 (caddr_t
)&dma
->slop
[cmd
->slopidx
]);
2833 i
= dma
->dstk
; u
= dma
->dstu
;
2835 if (i
== HIFN_D_DST_RSIZE
)
2838 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
,
2839 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
2841 if (dma
->dstr
[i
].l
& htole32(HIFN_D_VALID
)) {
2843 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
,
2844 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
2850 dma
->dstk
= i
; dma
->dstu
= u
;
2852 hifnstats
.hst_obytes
+= cmd
->dst_mapsize
;
2854 if ((cmd
->base_masks
& (HIFN_BASE_CMD_CRYPT
| HIFN_BASE_CMD_DECODE
)) ==
2855 HIFN_BASE_CMD_CRYPT
) {
2856 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
2857 if (crd
->crd_alg
!= CRYPTO_DES_CBC
&&
2858 crd
->crd_alg
!= CRYPTO_3DES_CBC
&&
2859 crd
->crd_alg
!= CRYPTO_AES_CBC
)
2861 ivlen
= ((crd
->crd_alg
== CRYPTO_AES_CBC
) ?
2862 HIFN_AES_IV_LENGTH
: HIFN_IV_LENGTH
);
2863 crypto_copydata(crp
->crp_flags
, crp
->crp_buf
,
2864 crd
->crd_skip
+ crd
->crd_len
- ivlen
, ivlen
,
2865 cmd
->softc
->sc_sessions
[cmd
->session_num
].hs_iv
);
2870 if (macbuf
!= NULL
) {
2871 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
2874 if (crd
->crd_alg
!= CRYPTO_MD5
&&
2875 crd
->crd_alg
!= CRYPTO_SHA1
&&
2876 crd
->crd_alg
!= CRYPTO_MD5_HMAC
&&
2877 crd
->crd_alg
!= CRYPTO_SHA1_HMAC
) {
2880 len
= cmd
->softc
->sc_sessions
[cmd
->session_num
].hs_mlen
;
2881 crypto_copyback(crp
->crp_flags
, crp
->crp_buf
,
2882 crd
->crd_inject
, len
, macbuf
);
2887 if (cmd
->src_map
!= cmd
->dst_map
)
2888 pci_unmap_buf(sc
, &cmd
->dst
);
2889 pci_unmap_buf(sc
, &cmd
->src
);
2895 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2896 * and Group 1 registers; avoid conditions that could create
2897 * burst writes by doing a read in between the writes.
2899 * NB: The read we interpose is always to the same register;
2900 * we do this because reading from an arbitrary (e.g. last)
2901 * register may not always work.
2904 hifn_write_reg_0(struct hifn_softc
*sc
, bus_size_t reg
, u_int32_t val
)
2906 if (sc
->sc_flags
& HIFN_IS_7811
) {
2907 if (sc
->sc_bar0_lastreg
== reg
- 4)
2908 readl(sc
->sc_bar0
+ HIFN_0_PUCNFG
);
2909 sc
->sc_bar0_lastreg
= reg
;
2911 writel(val
, sc
->sc_bar0
+ reg
);
2915 hifn_write_reg_1(struct hifn_softc
*sc
, bus_size_t reg
, u_int32_t val
)
2917 if (sc
->sc_flags
& HIFN_IS_7811
) {
2918 if (sc
->sc_bar1_lastreg
== reg
- 4)
2919 readl(sc
->sc_bar1
+ HIFN_1_REVID
);
2920 sc
->sc_bar1_lastreg
= reg
;
2922 writel(val
, sc
->sc_bar1
+ reg
);
2926 static struct pci_device_id hifn_pci_tbl
[] = {
2927 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7951
,
2928 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2929 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7955
,
2930 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2931 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7956
,
2932 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2933 { PCI_VENDOR_NETSEC
, PCI_PRODUCT_NETSEC_7751
,
2934 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2935 { PCI_VENDOR_INVERTEX
, PCI_PRODUCT_INVERTEX_AEON
,
2936 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2937 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7811
,
2938 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2940 * Other vendors share this PCI ID as well, such as
2941 * http://www.powercrypt.com, and obviously they also
2944 { PCI_VENDOR_HIFN
, PCI_PRODUCT_HIFN_7751
,
2945 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
2946 { 0, 0, 0, 0, 0, 0, }
2948 MODULE_DEVICE_TABLE(pci
, hifn_pci_tbl
);
2950 static struct pci_driver hifn_driver
= {
2952 .id_table
= hifn_pci_tbl
,
2953 .probe
= hifn_probe
,
2954 .remove
= hifn_remove
,
2955 /* add PM stuff here one day */
2958 static int __init
hifn_init (void)
2960 struct hifn_softc
*sc
= NULL
;
2963 DPRINTF("%s(%p)\n", __FUNCTION__
, hifn_init
);
2965 rc
= pci_register_driver(&hifn_driver
);
2966 pci_register_driver_compat(&hifn_driver
, rc
);
2971 static void __exit
hifn_exit (void)
2973 pci_unregister_driver(&hifn_driver
);
2976 module_init(hifn_init
);
2977 module_exit(hifn_exit
);
2979 MODULE_LICENSE("BSD");
2980 MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
2981 MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");