[ar71xx] parse dot-separated MAC address (patch by Jonas <jmajau@ubnt.com>)
[openwrt.git] / target / linux / generic-2.6 / files / crypto / ocf / hifn / hifn7751.c
1 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
2
3 /*-
4 * Invertex AEON / Hifn 7751 driver
5 * Copyright (c) 1999 Invertex Inc. All rights reserved.
6 * Copyright (c) 1999 Theo de Raadt
7 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8 * http://www.netsec.net
9 * Copyright (c) 2003 Hifn Inc.
10 *
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested: Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41 *
42 *
43 __FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
44 */
45
46 /*
47 * Driver for various Hifn encryption processors.
48 */
49 #ifndef AUTOCONF_INCLUDED
50 #include <linux/config.h>
51 #endif
52 #include <linux/module.h>
53 #include <linux/init.h>
54 #include <linux/list.h>
55 #include <linux/slab.h>
56 #include <linux/wait.h>
57 #include <linux/sched.h>
58 #include <linux/pci.h>
59 #include <linux/delay.h>
60 #include <linux/interrupt.h>
61 #include <linux/spinlock.h>
62 #include <linux/random.h>
63 #include <linux/version.h>
64 #include <linux/skbuff.h>
65 #include <asm/io.h>
66
67 #include <cryptodev.h>
68 #include <uio.h>
69 #include <hifn/hifn7751reg.h>
70 #include <hifn/hifn7751var.h>
71
72 #if 1
73 #define DPRINTF(a...) if (hifn_debug) { \
74 printk("%s: ", sc ? \
75 device_get_nameunit(sc->sc_dev) : "hifn"); \
76 printk(a); \
77 } else
78 #else
79 #define DPRINTF(a...)
80 #endif
81
82 static inline int
83 pci_get_revid(struct pci_dev *dev)
84 {
85 u8 rid = 0;
86 pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
87 return rid;
88 }
89
90 static struct hifn_stats hifnstats;
91
92 #define debug hifn_debug
93 int hifn_debug = 0;
94 module_param(hifn_debug, int, 0644);
95 MODULE_PARM_DESC(hifn_debug, "Enable debug");
96
97 int hifn_maxbatch = 1;
98 module_param(hifn_maxbatch, int, 0644);
99 MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
100
101 #ifdef MODULE_PARM
102 char *hifn_pllconfig = NULL;
103 MODULE_PARM(hifn_pllconfig, "s");
104 #else
105 char hifn_pllconfig[32]; /* This setting is RO after loading */
106 module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
107 #endif
108 MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
109
110 #ifdef HIFN_VULCANDEV
111 #include <sys/conf.h>
112 #include <sys/uio.h>
113
114 static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
115 #endif
116
117 /*
118 * Prototypes and count for the pci_device structure
119 */
120 static int hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
121 static void hifn_remove(struct pci_dev *dev);
122
123 static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
124 static int hifn_freesession(device_t, u_int64_t);
125 static int hifn_process(device_t, struct cryptop *, int);
126
127 static device_method_t hifn_methods = {
128 /* crypto device methods */
129 DEVMETHOD(cryptodev_newsession, hifn_newsession),
130 DEVMETHOD(cryptodev_freesession,hifn_freesession),
131 DEVMETHOD(cryptodev_process, hifn_process),
132 };
133
134 static void hifn_reset_board(struct hifn_softc *, int);
135 static void hifn_reset_puc(struct hifn_softc *);
136 static void hifn_puc_wait(struct hifn_softc *);
137 static int hifn_enable_crypto(struct hifn_softc *);
138 static void hifn_set_retry(struct hifn_softc *sc);
139 static void hifn_init_dma(struct hifn_softc *);
140 static void hifn_init_pci_registers(struct hifn_softc *);
141 static int hifn_sramsize(struct hifn_softc *);
142 static int hifn_dramsize(struct hifn_softc *);
143 static int hifn_ramtype(struct hifn_softc *);
144 static void hifn_sessions(struct hifn_softc *);
145 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
146 static irqreturn_t hifn_intr(int irq, void *arg);
147 #else
148 static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
149 #endif
150 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
151 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
152 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
153 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
154 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
155 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
156 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
157 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
158 static int hifn_init_pubrng(struct hifn_softc *);
159 static void hifn_tick(unsigned long arg);
160 static void hifn_abort(struct hifn_softc *);
161 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
162
163 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
164 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
165
166 #ifdef CONFIG_OCF_RANDOMHARVEST
167 static int hifn_read_random(void *arg, u_int32_t *buf, int len);
168 #endif
169
170 #define HIFN_MAX_CHIPS 8
171 static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
172
173 static __inline u_int32_t
174 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
175 {
176 u_int32_t v = readl(sc->sc_bar0 + reg);
177 sc->sc_bar0_lastreg = (bus_size_t) -1;
178 return (v);
179 }
180 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
181
182 static __inline u_int32_t
183 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
184 {
185 u_int32_t v = readl(sc->sc_bar1 + reg);
186 sc->sc_bar1_lastreg = (bus_size_t) -1;
187 return (v);
188 }
189 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
190
191 /*
192 * map in a given buffer (great on some arches :-)
193 */
194
195 static int
196 pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
197 {
198 struct iovec *iov = uio->uio_iov;
199
200 DPRINTF("%s()\n", __FUNCTION__);
201
202 buf->mapsize = 0;
203 for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
204 buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
205 iov->iov_base, iov->iov_len,
206 PCI_DMA_BIDIRECTIONAL);
207 buf->segs[buf->nsegs].ds_len = iov->iov_len;
208 buf->mapsize += iov->iov_len;
209 iov++;
210 buf->nsegs++;
211 }
212 /* identify this buffer by the first segment */
213 buf->map = (void *) buf->segs[0].ds_addr;
214 return(0);
215 }
216
217 /*
218 * map in a given sk_buff
219 */
220
221 static int
222 pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
223 {
224 int i;
225
226 DPRINTF("%s()\n", __FUNCTION__);
227
228 buf->mapsize = 0;
229
230 buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
231 skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
232 buf->segs[0].ds_len = skb_headlen(skb);
233 buf->mapsize += buf->segs[0].ds_len;
234
235 buf->nsegs = 1;
236
237 for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
238 buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
239 buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
240 page_address(skb_shinfo(skb)->frags[i].page) +
241 skb_shinfo(skb)->frags[i].page_offset,
242 buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
243 buf->mapsize += buf->segs[buf->nsegs].ds_len;
244 buf->nsegs++;
245 }
246
247 /* identify this buffer by the first segment */
248 buf->map = (void *) buf->segs[0].ds_addr;
249 return(0);
250 }
251
252 /*
253 * map in a given contiguous buffer
254 */
255
256 static int
257 pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
258 {
259 DPRINTF("%s()\n", __FUNCTION__);
260
261 buf->mapsize = 0;
262 buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
263 b, len, PCI_DMA_BIDIRECTIONAL);
264 buf->segs[0].ds_len = len;
265 buf->mapsize += buf->segs[0].ds_len;
266 buf->nsegs = 1;
267
268 /* identify this buffer by the first segment */
269 buf->map = (void *) buf->segs[0].ds_addr;
270 return(0);
271 }
272
273 #if 0 /* not needed at this time */
274 static void
275 pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
276 {
277 int i;
278
279 DPRINTF("%s()\n", __FUNCTION__);
280 for (i = 0; i < buf->nsegs; i++)
281 pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
282 buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
283 }
284 #endif
285
286 static void
287 pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
288 {
289 int i;
290 DPRINTF("%s()\n", __FUNCTION__);
291 for (i = 0; i < buf->nsegs; i++) {
292 pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
293 buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
294 buf->segs[i].ds_addr = 0;
295 buf->segs[i].ds_len = 0;
296 }
297 buf->nsegs = 0;
298 buf->mapsize = 0;
299 buf->map = 0;
300 }
301
302 static const char*
303 hifn_partname(struct hifn_softc *sc)
304 {
305 /* XXX sprintf numbers when not decoded */
306 switch (pci_get_vendor(sc->sc_pcidev)) {
307 case PCI_VENDOR_HIFN:
308 switch (pci_get_device(sc->sc_pcidev)) {
309 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
310 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
311 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
312 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
313 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
314 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
315 }
316 return "Hifn unknown-part";
317 case PCI_VENDOR_INVERTEX:
318 switch (pci_get_device(sc->sc_pcidev)) {
319 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
320 }
321 return "Invertex unknown-part";
322 case PCI_VENDOR_NETSEC:
323 switch (pci_get_device(sc->sc_pcidev)) {
324 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
325 }
326 return "NetSec unknown-part";
327 }
328 return "Unknown-vendor unknown-part";
329 }
330
331 static u_int
332 checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
333 {
334 struct hifn_softc *sc = pci_get_drvdata(dev);
335 if (v > max) {
336 device_printf(sc->sc_dev, "Warning, %s %u out of range, "
337 "using max %u\n", what, v, max);
338 v = max;
339 } else if (v < min) {
340 device_printf(sc->sc_dev, "Warning, %s %u out of range, "
341 "using min %u\n", what, v, min);
342 v = min;
343 }
344 return v;
345 }
346
347 /*
348 * Select PLL configuration for 795x parts. This is complicated in
349 * that we cannot determine the optimal parameters without user input.
350 * The reference clock is derived from an external clock through a
351 * multiplier. The external clock is either the host bus (i.e. PCI)
352 * or an external clock generator. When using the PCI bus we assume
353 * the clock is either 33 or 66 MHz; for an external source we cannot
354 * tell the speed.
355 *
356 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
357 * for an external source, followed by the frequency. We calculate
358 * the appropriate multiplier and PLL register contents accordingly.
359 * When no configuration is given we default to "pci66" since that
360 * always will allow the card to work. If a card is using the PCI
361 * bus clock and in a 33MHz slot then it will be operating at half
362 * speed until the correct information is provided.
363 *
364 * We use a default setting of "ext66" because according to Mike Ham
365 * of HiFn, almost every board in existence has an external crystal
366 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
367 * because PCI33 can have clocks from 0 to 33Mhz, and some have
368 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
369 */
370 static void
371 hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
372 {
373 const char *pllspec = hifn_pllconfig;
374 u_int freq, mul, fl, fh;
375 u_int32_t pllconfig;
376 char *nxt;
377
378 if (pllspec == NULL)
379 pllspec = "ext66";
380 fl = 33, fh = 66;
381 pllconfig = 0;
382 if (strncmp(pllspec, "ext", 3) == 0) {
383 pllspec += 3;
384 pllconfig |= HIFN_PLL_REF_SEL;
385 switch (pci_get_device(dev)) {
386 case PCI_PRODUCT_HIFN_7955:
387 case PCI_PRODUCT_HIFN_7956:
388 fl = 20, fh = 100;
389 break;
390 #ifdef notyet
391 case PCI_PRODUCT_HIFN_7954:
392 fl = 20, fh = 66;
393 break;
394 #endif
395 }
396 } else if (strncmp(pllspec, "pci", 3) == 0)
397 pllspec += 3;
398 freq = strtoul(pllspec, &nxt, 10);
399 if (nxt == pllspec)
400 freq = 66;
401 else
402 freq = checkmaxmin(dev, "frequency", freq, fl, fh);
403 /*
404 * Calculate multiplier. We target a Fck of 266 MHz,
405 * allowing only even values, possibly rounded down.
406 * Multipliers > 8 must set the charge pump current.
407 */
408 mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
409 pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
410 if (mul > 8)
411 pllconfig |= HIFN_PLL_IS;
412 *pll = pllconfig;
413 }
414
415 /*
416 * Attach an interface that successfully probed.
417 */
418 static int
419 hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
420 {
421 struct hifn_softc *sc = NULL;
422 char rbase;
423 u_int16_t ena, rev;
424 int rseg, rc;
425 unsigned long mem_start, mem_len;
426 static int num_chips = 0;
427
428 DPRINTF("%s()\n", __FUNCTION__);
429
430 if (pci_enable_device(dev) < 0)
431 return(-ENODEV);
432
433 if (pci_set_mwi(dev))
434 return(-ENODEV);
435
436 if (!dev->irq) {
437 printk("hifn: found device with no IRQ assigned. check BIOS settings!");
438 pci_disable_device(dev);
439 return(-ENODEV);
440 }
441
442 sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
443 if (!sc)
444 return(-ENOMEM);
445 memset(sc, 0, sizeof(*sc));
446
447 softc_device_init(sc, "hifn", num_chips, hifn_methods);
448
449 sc->sc_pcidev = dev;
450 sc->sc_irq = -1;
451 sc->sc_cid = -1;
452 sc->sc_num = num_chips++;
453 if (sc->sc_num < HIFN_MAX_CHIPS)
454 hifn_chip_idx[sc->sc_num] = sc;
455
456 pci_set_drvdata(sc->sc_pcidev, sc);
457
458 spin_lock_init(&sc->sc_mtx);
459
460 /* XXX handle power management */
461
462 /*
463 * The 7951 and 795x have a random number generator and
464 * public key support; note this.
465 */
466 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
467 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
468 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
469 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
470 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
471 /*
472 * The 7811 has a random number generator and
473 * we also note it's identity 'cuz of some quirks.
474 */
475 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
476 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
477 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
478
479 /*
480 * The 795x parts support AES.
481 */
482 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
483 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
484 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
485 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
486 /*
487 * Select PLL configuration. This depends on the
488 * bus and board design and must be manually configured
489 * if the default setting is unacceptable.
490 */
491 hifn_getpllconfig(dev, &sc->sc_pllconfig);
492 }
493
494 /*
495 * Setup PCI resources. Note that we record the bus
496 * tag and handle for each register mapping, this is
497 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
498 * and WRITE_REG_1 macros throughout the driver.
499 */
500 mem_start = pci_resource_start(sc->sc_pcidev, 0);
501 mem_len = pci_resource_len(sc->sc_pcidev, 0);
502 sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
503 if (!sc->sc_bar0) {
504 device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
505 goto fail;
506 }
507 sc->sc_bar0_lastreg = (bus_size_t) -1;
508
509 mem_start = pci_resource_start(sc->sc_pcidev, 1);
510 mem_len = pci_resource_len(sc->sc_pcidev, 1);
511 sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
512 if (!sc->sc_bar1) {
513 device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
514 goto fail;
515 }
516 sc->sc_bar1_lastreg = (bus_size_t) -1;
517
518 /* fix up the bus size */
519 if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
520 device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
521 goto fail;
522 }
523 if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
524 device_printf(sc->sc_dev,
525 "No usable consistent DMA configuration, aborting.\n");
526 goto fail;
527 }
528
529 hifn_set_retry(sc);
530
531 /*
532 * Setup the area where the Hifn DMA's descriptors
533 * and associated data structures.
534 */
535 sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
536 sizeof(*sc->sc_dma),
537 &sc->sc_dma_physaddr);
538 if (!sc->sc_dma) {
539 device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
540 goto fail;
541 }
542 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
543
544 /*
545 * Reset the board and do the ``secret handshake''
546 * to enable the crypto support. Then complete the
547 * initialization procedure by setting up the interrupt
548 * and hooking in to the system crypto support so we'll
549 * get used for system services like the crypto device,
550 * IPsec, RNG device, etc.
551 */
552 hifn_reset_board(sc, 0);
553
554 if (hifn_enable_crypto(sc) != 0) {
555 device_printf(sc->sc_dev, "crypto enabling failed\n");
556 goto fail;
557 }
558 hifn_reset_puc(sc);
559
560 hifn_init_dma(sc);
561 hifn_init_pci_registers(sc);
562
563 pci_set_master(sc->sc_pcidev);
564
565 /* XXX can't dynamically determine ram type for 795x; force dram */
566 if (sc->sc_flags & HIFN_IS_7956)
567 sc->sc_drammodel = 1;
568 else if (hifn_ramtype(sc))
569 goto fail;
570
571 if (sc->sc_drammodel == 0)
572 hifn_sramsize(sc);
573 else
574 hifn_dramsize(sc);
575
576 /*
577 * Workaround for NetSec 7751 rev A: half ram size because two
578 * of the address lines were left floating
579 */
580 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
581 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
582 pci_get_revid(dev) == 0x61) /*XXX???*/
583 sc->sc_ramsize >>= 1;
584
585 /*
586 * Arrange the interrupt line.
587 */
588 rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
589 if (rc) {
590 device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
591 goto fail;
592 }
593 sc->sc_irq = dev->irq;
594
595 hifn_sessions(sc);
596
597 /*
598 * NB: Keep only the low 16 bits; this masks the chip id
599 * from the 7951.
600 */
601 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
602
603 rseg = sc->sc_ramsize / 1024;
604 rbase = 'K';
605 if (sc->sc_ramsize >= (1024 * 1024)) {
606 rbase = 'M';
607 rseg /= 1024;
608 }
609 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
610 hifn_partname(sc), rev,
611 rseg, rbase, sc->sc_drammodel ? 'd' : 's');
612 if (sc->sc_flags & HIFN_IS_7956)
613 printf(", pll=0x%x<%s clk, %ux mult>",
614 sc->sc_pllconfig,
615 sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
616 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
617 printf("\n");
618
619 sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
620 if (sc->sc_cid < 0) {
621 device_printf(sc->sc_dev, "could not get crypto driver id\n");
622 goto fail;
623 }
624
625 WRITE_REG_0(sc, HIFN_0_PUCNFG,
626 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
627 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
628
629 switch (ena) {
630 case HIFN_PUSTAT_ENA_2:
631 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
632 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
633 if (sc->sc_flags & HIFN_HAS_AES)
634 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
635 /*FALLTHROUGH*/
636 case HIFN_PUSTAT_ENA_1:
637 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
638 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
639 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
640 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
641 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
642 break;
643 }
644
645 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
646 hifn_init_pubrng(sc);
647
648 init_timer(&sc->sc_tickto);
649 sc->sc_tickto.function = hifn_tick;
650 sc->sc_tickto.data = (unsigned long) sc->sc_num;
651 mod_timer(&sc->sc_tickto, jiffies + HZ);
652
653 return (0);
654
655 fail:
656 if (sc->sc_cid >= 0)
657 crypto_unregister_all(sc->sc_cid);
658 if (sc->sc_irq != -1)
659 free_irq(sc->sc_irq, sc);
660 if (sc->sc_dma) {
661 /* Turn off DMA polling */
662 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
663 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
664
665 pci_free_consistent(sc->sc_pcidev,
666 sizeof(*sc->sc_dma),
667 sc->sc_dma, sc->sc_dma_physaddr);
668 }
669 kfree(sc);
670 return (-ENXIO);
671 }
672
673 /*
674 * Detach an interface that successfully probed.
675 */
676 static void
677 hifn_remove(struct pci_dev *dev)
678 {
679 struct hifn_softc *sc = pci_get_drvdata(dev);
680 unsigned long l_flags;
681
682 DPRINTF("%s()\n", __FUNCTION__);
683
684 KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
685
686 /* disable interrupts */
687 HIFN_LOCK(sc);
688 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
689 HIFN_UNLOCK(sc);
690
691 /*XXX other resources */
692 del_timer_sync(&sc->sc_tickto);
693
694 /* Turn off DMA polling */
695 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
696 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
697
698 crypto_unregister_all(sc->sc_cid);
699
700 free_irq(sc->sc_irq, sc);
701
702 pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
703 sc->sc_dma, sc->sc_dma_physaddr);
704 }
705
706
707 static int
708 hifn_init_pubrng(struct hifn_softc *sc)
709 {
710 int i;
711
712 DPRINTF("%s()\n", __FUNCTION__);
713
714 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
715 /* Reset 7951 public key/rng engine */
716 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
717 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
718
719 for (i = 0; i < 100; i++) {
720 DELAY(1000);
721 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
722 HIFN_PUBRST_RESET) == 0)
723 break;
724 }
725
726 if (i == 100) {
727 device_printf(sc->sc_dev, "public key init failed\n");
728 return (1);
729 }
730 }
731
732 /* Enable the rng, if available */
733 #ifdef CONFIG_OCF_RANDOMHARVEST
734 if (sc->sc_flags & HIFN_HAS_RNG) {
735 if (sc->sc_flags & HIFN_IS_7811) {
736 u_int32_t r;
737 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
738 if (r & HIFN_7811_RNGENA_ENA) {
739 r &= ~HIFN_7811_RNGENA_ENA;
740 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
741 }
742 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
743 HIFN_7811_RNGCFG_DEFL);
744 r |= HIFN_7811_RNGENA_ENA;
745 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
746 } else
747 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
748 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
749 HIFN_RNGCFG_ENA);
750
751 sc->sc_rngfirst = 1;
752 crypto_rregister(sc->sc_cid, hifn_read_random, sc);
753 }
754 #endif
755
756 /* Enable public key engine, if available */
757 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
758 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
759 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
760 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
761 #ifdef HIFN_VULCANDEV
762 sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
763 UID_ROOT, GID_WHEEL, 0666,
764 "vulcanpk");
765 sc->sc_pkdev->si_drv1 = sc;
766 #endif
767 }
768
769 return (0);
770 }
771
772 #ifdef CONFIG_OCF_RANDOMHARVEST
773 static int
774 hifn_read_random(void *arg, u_int32_t *buf, int len)
775 {
776 struct hifn_softc *sc = (struct hifn_softc *) arg;
777 u_int32_t sts;
778 int i, rc = 0;
779
780 if (len <= 0)
781 return rc;
782
783 if (sc->sc_flags & HIFN_IS_7811) {
784 /* ONLY VALID ON 7811!!!! */
785 for (i = 0; i < 5; i++) {
786 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
787 if (sts & HIFN_7811_RNGSTS_UFL) {
788 device_printf(sc->sc_dev,
789 "RNG underflow: disabling\n");
790 /* DAVIDM perhaps return -1 */
791 break;
792 }
793 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
794 break;
795
796 /*
797 * There are at least two words in the RNG FIFO
798 * at this point.
799 */
800 if (rc < len)
801 buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
802 if (rc < len)
803 buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
804 }
805 } else
806 buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
807
808 /* NB: discard first data read */
809 if (sc->sc_rngfirst) {
810 sc->sc_rngfirst = 0;
811 rc = 0;
812 }
813
814 return(rc);
815 }
816 #endif /* CONFIG_OCF_RANDOMHARVEST */
817
818 static void
819 hifn_puc_wait(struct hifn_softc *sc)
820 {
821 int i;
822 int reg = HIFN_0_PUCTRL;
823
824 if (sc->sc_flags & HIFN_IS_7956) {
825 reg = HIFN_0_PUCTRL2;
826 }
827
828 for (i = 5000; i > 0; i--) {
829 DELAY(1);
830 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
831 break;
832 }
833 if (!i)
834 device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
835 READ_REG_0(sc, HIFN_0_PUCTRL));
836 }
837
838 /*
839 * Reset the processing unit.
840 */
841 static void
842 hifn_reset_puc(struct hifn_softc *sc)
843 {
844 /* Reset processing unit */
845 int reg = HIFN_0_PUCTRL;
846
847 if (sc->sc_flags & HIFN_IS_7956) {
848 reg = HIFN_0_PUCTRL2;
849 }
850 WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
851
852 hifn_puc_wait(sc);
853 }
854
855 /*
856 * Set the Retry and TRDY registers; note that we set them to
857 * zero because the 7811 locks up when forced to retry (section
858 * 3.6 of "Specification Update SU-0014-04". Not clear if we
859 * should do this for all Hifn parts, but it doesn't seem to hurt.
860 */
861 static void
862 hifn_set_retry(struct hifn_softc *sc)
863 {
864 DPRINTF("%s()\n", __FUNCTION__);
865 /* NB: RETRY only responds to 8-bit reads/writes */
866 pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
867 pci_write_config_dword(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
868 }
869
870 /*
871 * Resets the board. Values in the regesters are left as is
872 * from the reset (i.e. initial values are assigned elsewhere).
873 */
874 static void
875 hifn_reset_board(struct hifn_softc *sc, int full)
876 {
877 u_int32_t reg;
878
879 DPRINTF("%s()\n", __FUNCTION__);
880 /*
881 * Set polling in the DMA configuration register to zero. 0x7 avoids
882 * resetting the board and zeros out the other fields.
883 */
884 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
885 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
886
887 /*
888 * Now that polling has been disabled, we have to wait 1 ms
889 * before resetting the board.
890 */
891 DELAY(1000);
892
893 /* Reset the DMA unit */
894 if (full) {
895 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
896 DELAY(1000);
897 } else {
898 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
899 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
900 hifn_reset_puc(sc);
901 }
902
903 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
904 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
905
906 /* Bring dma unit out of reset */
907 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
908 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
909
910 hifn_puc_wait(sc);
911 hifn_set_retry(sc);
912
913 if (sc->sc_flags & HIFN_IS_7811) {
914 for (reg = 0; reg < 1000; reg++) {
915 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
916 HIFN_MIPSRST_CRAMINIT)
917 break;
918 DELAY(1000);
919 }
920 if (reg == 1000)
921 device_printf(sc->sc_dev, ": cram init timeout\n");
922 } else {
923 /* set up DMA configuration register #2 */
924 /* turn off all PK and BAR0 swaps */
925 WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
926 (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
927 (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
928 (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
929 (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
930 }
931 }
932
933 static u_int32_t
934 hifn_next_signature(u_int32_t a, u_int cnt)
935 {
936 int i;
937 u_int32_t v;
938
939 for (i = 0; i < cnt; i++) {
940
941 /* get the parity */
942 v = a & 0x80080125;
943 v ^= v >> 16;
944 v ^= v >> 8;
945 v ^= v >> 4;
946 v ^= v >> 2;
947 v ^= v >> 1;
948
949 a = (v & 1) ^ (a << 1);
950 }
951
952 return a;
953 }
954
955
956 /*
957 * Checks to see if crypto is already enabled. If crypto isn't enable,
958 * "hifn_enable_crypto" is called to enable it. The check is important,
959 * as enabling crypto twice will lock the board.
960 */
961 static int
962 hifn_enable_crypto(struct hifn_softc *sc)
963 {
964 u_int32_t dmacfg, ramcfg, encl, addr, i;
965 char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
966 0x00, 0x00, 0x00, 0x00 };
967
968 DPRINTF("%s()\n", __FUNCTION__);
969
970 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
971 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
972
973 /*
974 * The RAM config register's encrypt level bit needs to be set before
975 * every read performed on the encryption level register.
976 */
977 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
978
979 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
980
981 /*
982 * Make sure we don't re-unlock. Two unlocks kills chip until the
983 * next reboot.
984 */
985 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
986 #ifdef HIFN_DEBUG
987 if (hifn_debug)
988 device_printf(sc->sc_dev,
989 "Strong crypto already enabled!\n");
990 #endif
991 goto report;
992 }
993
994 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
995 #ifdef HIFN_DEBUG
996 if (hifn_debug)
997 device_printf(sc->sc_dev,
998 "Unknown encryption level 0x%x\n", encl);
999 #endif
1000 return 1;
1001 }
1002
1003 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1004 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1005 DELAY(1000);
1006 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1007 DELAY(1000);
1008 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1009 DELAY(1000);
1010
1011 for (i = 0; i <= 12; i++) {
1012 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1013 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1014
1015 DELAY(1000);
1016 }
1017
1018 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1019 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1020
1021 #ifdef HIFN_DEBUG
1022 if (hifn_debug) {
1023 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1024 device_printf(sc->sc_dev, "Engine is permanently "
1025 "locked until next system reset!\n");
1026 else
1027 device_printf(sc->sc_dev, "Engine enabled "
1028 "successfully!\n");
1029 }
1030 #endif
1031
1032 report:
1033 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1034 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1035
1036 switch (encl) {
1037 case HIFN_PUSTAT_ENA_1:
1038 case HIFN_PUSTAT_ENA_2:
1039 break;
1040 case HIFN_PUSTAT_ENA_0:
1041 default:
1042 device_printf(sc->sc_dev, "disabled\n");
1043 break;
1044 }
1045
1046 return 0;
1047 }
1048
1049 /*
1050 * Give initial values to the registers listed in the "Register Space"
1051 * section of the HIFN Software Development reference manual.
1052 */
1053 static void
1054 hifn_init_pci_registers(struct hifn_softc *sc)
1055 {
1056 DPRINTF("%s()\n", __FUNCTION__);
1057
1058 /* write fixed values needed by the Initialization registers */
1059 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1060 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1061 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1062
1063 /* write all 4 ring address registers */
1064 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1065 offsetof(struct hifn_dma, cmdr[0]));
1066 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1067 offsetof(struct hifn_dma, srcr[0]));
1068 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1069 offsetof(struct hifn_dma, dstr[0]));
1070 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1071 offsetof(struct hifn_dma, resr[0]));
1072
1073 DELAY(2000);
1074
1075 /* write status register */
1076 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1077 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1078 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1079 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1080 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1081 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1082 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1083 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1084 HIFN_DMACSR_S_WAIT |
1085 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1086 HIFN_DMACSR_C_WAIT |
1087 HIFN_DMACSR_ENGINE |
1088 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1089 HIFN_DMACSR_PUBDONE : 0) |
1090 ((sc->sc_flags & HIFN_IS_7811) ?
1091 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1092
1093 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1094 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1095 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1096 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1097 ((sc->sc_flags & HIFN_IS_7811) ?
1098 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1099 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1100 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1101
1102
1103 if (sc->sc_flags & HIFN_IS_7956) {
1104 u_int32_t pll;
1105
1106 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1107 HIFN_PUCNFG_TCALLPHASES |
1108 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1109
1110 /* turn off the clocks and insure bypass is set */
1111 pll = READ_REG_1(sc, HIFN_1_PLL);
1112 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1113 | HIFN_PLL_BP | HIFN_PLL_MBSET;
1114 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1115 DELAY(10*1000); /* 10ms */
1116
1117 /* change configuration */
1118 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1119 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1120 DELAY(10*1000); /* 10ms */
1121
1122 /* disable bypass */
1123 pll &= ~HIFN_PLL_BP;
1124 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1125 /* enable clocks with new configuration */
1126 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1127 WRITE_REG_1(sc, HIFN_1_PLL, pll);
1128 } else {
1129 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1130 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1131 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1132 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1133 }
1134
1135 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1136 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1137 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1138 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1139 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1140 }
1141
1142 /*
1143 * The maximum number of sessions supported by the card
1144 * is dependent on the amount of context ram, which
1145 * encryption algorithms are enabled, and how compression
1146 * is configured. This should be configured before this
1147 * routine is called.
1148 */
1149 static void
1150 hifn_sessions(struct hifn_softc *sc)
1151 {
1152 u_int32_t pucnfg;
1153 int ctxsize;
1154
1155 DPRINTF("%s()\n", __FUNCTION__);
1156
1157 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1158
1159 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1160 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1161 ctxsize = 128;
1162 else
1163 ctxsize = 512;
1164 /*
1165 * 7955/7956 has internal context memory of 32K
1166 */
1167 if (sc->sc_flags & HIFN_IS_7956)
1168 sc->sc_maxses = 32768 / ctxsize;
1169 else
1170 sc->sc_maxses = 1 +
1171 ((sc->sc_ramsize - 32768) / ctxsize);
1172 } else
1173 sc->sc_maxses = sc->sc_ramsize / 16384;
1174
1175 if (sc->sc_maxses > 2048)
1176 sc->sc_maxses = 2048;
1177 }
1178
1179 /*
1180 * Determine ram type (sram or dram). Board should be just out of a reset
1181 * state when this is called.
1182 */
1183 static int
1184 hifn_ramtype(struct hifn_softc *sc)
1185 {
1186 u_int8_t data[8], dataexpect[8];
1187 int i;
1188
1189 for (i = 0; i < sizeof(data); i++)
1190 data[i] = dataexpect[i] = 0x55;
1191 if (hifn_writeramaddr(sc, 0, data))
1192 return (-1);
1193 if (hifn_readramaddr(sc, 0, data))
1194 return (-1);
1195 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1196 sc->sc_drammodel = 1;
1197 return (0);
1198 }
1199
1200 for (i = 0; i < sizeof(data); i++)
1201 data[i] = dataexpect[i] = 0xaa;
1202 if (hifn_writeramaddr(sc, 0, data))
1203 return (-1);
1204 if (hifn_readramaddr(sc, 0, data))
1205 return (-1);
1206 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1207 sc->sc_drammodel = 1;
1208 return (0);
1209 }
1210
1211 return (0);
1212 }
1213
1214 #define HIFN_SRAM_MAX (32 << 20)
1215 #define HIFN_SRAM_STEP_SIZE 16384
1216 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1217
1218 static int
1219 hifn_sramsize(struct hifn_softc *sc)
1220 {
1221 u_int32_t a;
1222 u_int8_t data[8];
1223 u_int8_t dataexpect[sizeof(data)];
1224 int32_t i;
1225
1226 for (i = 0; i < sizeof(data); i++)
1227 data[i] = dataexpect[i] = i ^ 0x5a;
1228
1229 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1230 a = i * HIFN_SRAM_STEP_SIZE;
1231 bcopy(&i, data, sizeof(i));
1232 hifn_writeramaddr(sc, a, data);
1233 }
1234
1235 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1236 a = i * HIFN_SRAM_STEP_SIZE;
1237 bcopy(&i, dataexpect, sizeof(i));
1238 if (hifn_readramaddr(sc, a, data) < 0)
1239 return (0);
1240 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1241 return (0);
1242 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1243 }
1244
1245 return (0);
1246 }
1247
1248 /*
1249 * XXX For dram boards, one should really try all of the
1250 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1251 * is already set up correctly.
1252 */
1253 static int
1254 hifn_dramsize(struct hifn_softc *sc)
1255 {
1256 u_int32_t cnfg;
1257
1258 if (sc->sc_flags & HIFN_IS_7956) {
1259 /*
1260 * 7955/7956 have a fixed internal ram of only 32K.
1261 */
1262 sc->sc_ramsize = 32768;
1263 } else {
1264 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1265 HIFN_PUCNFG_DRAMMASK;
1266 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1267 }
1268 return (0);
1269 }
1270
1271 static void
1272 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1273 {
1274 struct hifn_dma *dma = sc->sc_dma;
1275
1276 DPRINTF("%s()\n", __FUNCTION__);
1277
1278 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1279 dma->cmdi = 0;
1280 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1281 wmb();
1282 dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
1283 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1284 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1285 }
1286 *cmdp = dma->cmdi++;
1287 dma->cmdk = dma->cmdi;
1288
1289 if (dma->srci == HIFN_D_SRC_RSIZE) {
1290 dma->srci = 0;
1291 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1292 wmb();
1293 dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
1294 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1295 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1296 }
1297 *srcp = dma->srci++;
1298 dma->srck = dma->srci;
1299
1300 if (dma->dsti == HIFN_D_DST_RSIZE) {
1301 dma->dsti = 0;
1302 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1303 wmb();
1304 dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
1305 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1306 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1307 }
1308 *dstp = dma->dsti++;
1309 dma->dstk = dma->dsti;
1310
1311 if (dma->resi == HIFN_D_RES_RSIZE) {
1312 dma->resi = 0;
1313 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1314 wmb();
1315 dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
1316 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1317 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1318 }
1319 *resp = dma->resi++;
1320 dma->resk = dma->resi;
1321 }
1322
1323 static int
1324 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1325 {
1326 struct hifn_dma *dma = sc->sc_dma;
1327 hifn_base_command_t wc;
1328 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1329 int r, cmdi, resi, srci, dsti;
1330
1331 DPRINTF("%s()\n", __FUNCTION__);
1332
1333 wc.masks = htole16(3 << 13);
1334 wc.session_num = htole16(addr >> 14);
1335 wc.total_source_count = htole16(8);
1336 wc.total_dest_count = htole16(addr & 0x3fff);
1337
1338 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1339
1340 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1341 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1342 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1343
1344 /* build write command */
1345 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1346 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1347 bcopy(data, &dma->test_src, sizeof(dma->test_src));
1348
1349 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1350 + offsetof(struct hifn_dma, test_src));
1351 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1352 + offsetof(struct hifn_dma, test_dst));
1353
1354 dma->cmdr[cmdi].l = htole32(16 | masks);
1355 dma->srcr[srci].l = htole32(8 | masks);
1356 dma->dstr[dsti].l = htole32(4 | masks);
1357 dma->resr[resi].l = htole32(4 | masks);
1358
1359 for (r = 10000; r >= 0; r--) {
1360 DELAY(10);
1361 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1362 break;
1363 }
1364 if (r == 0) {
1365 device_printf(sc->sc_dev, "writeramaddr -- "
1366 "result[%d](addr %d) still valid\n", resi, addr);
1367 r = -1;
1368 return (-1);
1369 } else
1370 r = 0;
1371
1372 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1373 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1374 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1375
1376 return (r);
1377 }
1378
1379 static int
1380 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1381 {
1382 struct hifn_dma *dma = sc->sc_dma;
1383 hifn_base_command_t rc;
1384 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1385 int r, cmdi, srci, dsti, resi;
1386
1387 DPRINTF("%s()\n", __FUNCTION__);
1388
1389 rc.masks = htole16(2 << 13);
1390 rc.session_num = htole16(addr >> 14);
1391 rc.total_source_count = htole16(addr & 0x3fff);
1392 rc.total_dest_count = htole16(8);
1393
1394 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1395
1396 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1397 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1398 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1399
1400 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1401 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1402
1403 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1404 offsetof(struct hifn_dma, test_src));
1405 dma->test_src = 0;
1406 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
1407 offsetof(struct hifn_dma, test_dst));
1408 dma->test_dst = 0;
1409 dma->cmdr[cmdi].l = htole32(8 | masks);
1410 dma->srcr[srci].l = htole32(8 | masks);
1411 dma->dstr[dsti].l = htole32(8 | masks);
1412 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1413
1414 for (r = 10000; r >= 0; r--) {
1415 DELAY(10);
1416 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1417 break;
1418 }
1419 if (r == 0) {
1420 device_printf(sc->sc_dev, "readramaddr -- "
1421 "result[%d](addr %d) still valid\n", resi, addr);
1422 r = -1;
1423 } else {
1424 r = 0;
1425 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1426 }
1427
1428 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1429 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1430 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1431
1432 return (r);
1433 }
1434
1435 /*
1436 * Initialize the descriptor rings.
1437 */
1438 static void
1439 hifn_init_dma(struct hifn_softc *sc)
1440 {
1441 struct hifn_dma *dma = sc->sc_dma;
1442 int i;
1443
1444 DPRINTF("%s()\n", __FUNCTION__);
1445
1446 hifn_set_retry(sc);
1447
1448 /* initialize static pointer values */
1449 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1450 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1451 offsetof(struct hifn_dma, command_bufs[i][0]));
1452 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1453 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1454 offsetof(struct hifn_dma, result_bufs[i][0]));
1455
1456 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1457 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1458 dma->srcr[HIFN_D_SRC_RSIZE].p =
1459 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1460 dma->dstr[HIFN_D_DST_RSIZE].p =
1461 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1462 dma->resr[HIFN_D_RES_RSIZE].p =
1463 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1464
1465 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1466 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1467 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1468 }
1469
1470 /*
1471 * Writes out the raw command buffer space. Returns the
1472 * command buffer size.
1473 */
1474 static u_int
1475 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1476 {
1477 struct hifn_softc *sc = NULL;
1478 u_int8_t *buf_pos;
1479 hifn_base_command_t *base_cmd;
1480 hifn_mac_command_t *mac_cmd;
1481 hifn_crypt_command_t *cry_cmd;
1482 int using_mac, using_crypt, len, ivlen;
1483 u_int32_t dlen, slen;
1484
1485 DPRINTF("%s()\n", __FUNCTION__);
1486
1487 buf_pos = buf;
1488 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1489 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1490
1491 base_cmd = (hifn_base_command_t *)buf_pos;
1492 base_cmd->masks = htole16(cmd->base_masks);
1493 slen = cmd->src_mapsize;
1494 if (cmd->sloplen)
1495 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1496 else
1497 dlen = cmd->dst_mapsize;
1498 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1499 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1500 dlen >>= 16;
1501 slen >>= 16;
1502 base_cmd->session_num = htole16(
1503 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1504 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1505 buf_pos += sizeof(hifn_base_command_t);
1506
1507 if (using_mac) {
1508 mac_cmd = (hifn_mac_command_t *)buf_pos;
1509 dlen = cmd->maccrd->crd_len;
1510 mac_cmd->source_count = htole16(dlen & 0xffff);
1511 dlen >>= 16;
1512 mac_cmd->masks = htole16(cmd->mac_masks |
1513 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1514 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1515 mac_cmd->reserved = 0;
1516 buf_pos += sizeof(hifn_mac_command_t);
1517 }
1518
1519 if (using_crypt) {
1520 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1521 dlen = cmd->enccrd->crd_len;
1522 cry_cmd->source_count = htole16(dlen & 0xffff);
1523 dlen >>= 16;
1524 cry_cmd->masks = htole16(cmd->cry_masks |
1525 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1526 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1527 cry_cmd->reserved = 0;
1528 buf_pos += sizeof(hifn_crypt_command_t);
1529 }
1530
1531 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1532 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1533 buf_pos += HIFN_MAC_KEY_LENGTH;
1534 }
1535
1536 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1537 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1538 case HIFN_CRYPT_CMD_ALG_3DES:
1539 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1540 buf_pos += HIFN_3DES_KEY_LENGTH;
1541 break;
1542 case HIFN_CRYPT_CMD_ALG_DES:
1543 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1544 buf_pos += HIFN_DES_KEY_LENGTH;
1545 break;
1546 case HIFN_CRYPT_CMD_ALG_RC4:
1547 len = 256;
1548 do {
1549 int clen;
1550
1551 clen = MIN(cmd->cklen, len);
1552 bcopy(cmd->ck, buf_pos, clen);
1553 len -= clen;
1554 buf_pos += clen;
1555 } while (len > 0);
1556 bzero(buf_pos, 4);
1557 buf_pos += 4;
1558 break;
1559 case HIFN_CRYPT_CMD_ALG_AES:
1560 /*
1561 * AES keys are variable 128, 192 and
1562 * 256 bits (16, 24 and 32 bytes).
1563 */
1564 bcopy(cmd->ck, buf_pos, cmd->cklen);
1565 buf_pos += cmd->cklen;
1566 break;
1567 }
1568 }
1569
1570 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1571 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1572 case HIFN_CRYPT_CMD_ALG_AES:
1573 ivlen = HIFN_AES_IV_LENGTH;
1574 break;
1575 default:
1576 ivlen = HIFN_IV_LENGTH;
1577 break;
1578 }
1579 bcopy(cmd->iv, buf_pos, ivlen);
1580 buf_pos += ivlen;
1581 }
1582
1583 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1584 bzero(buf_pos, 8);
1585 buf_pos += 8;
1586 }
1587
1588 return (buf_pos - buf);
1589 }
1590
1591 static int
1592 hifn_dmamap_aligned(struct hifn_operand *op)
1593 {
1594 struct hifn_softc *sc = NULL;
1595 int i;
1596
1597 DPRINTF("%s()\n", __FUNCTION__);
1598
1599 for (i = 0; i < op->nsegs; i++) {
1600 if (op->segs[i].ds_addr & 3)
1601 return (0);
1602 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1603 return (0);
1604 }
1605 return (1);
1606 }
1607
1608 static __inline int
1609 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1610 {
1611 struct hifn_dma *dma = sc->sc_dma;
1612
1613 if (++idx == HIFN_D_DST_RSIZE) {
1614 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1615 HIFN_D_MASKDONEIRQ);
1616 HIFN_DSTR_SYNC(sc, idx,
1617 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1618 idx = 0;
1619 }
1620 return (idx);
1621 }
1622
1623 static int
1624 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1625 {
1626 struct hifn_dma *dma = sc->sc_dma;
1627 struct hifn_operand *dst = &cmd->dst;
1628 u_int32_t p, l;
1629 int idx, used = 0, i;
1630
1631 DPRINTF("%s()\n", __FUNCTION__);
1632
1633 idx = dma->dsti;
1634 for (i = 0; i < dst->nsegs - 1; i++) {
1635 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1636 dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1637 wmb();
1638 dma->dstr[idx].l |= htole32(HIFN_D_VALID);
1639 HIFN_DSTR_SYNC(sc, idx,
1640 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1641 used++;
1642
1643 idx = hifn_dmamap_dstwrap(sc, idx);
1644 }
1645
1646 if (cmd->sloplen == 0) {
1647 p = dst->segs[i].ds_addr;
1648 l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1649 dst->segs[i].ds_len;
1650 } else {
1651 p = sc->sc_dma_physaddr +
1652 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1653 l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1654 sizeof(u_int32_t);
1655
1656 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1657 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1658 dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
1659 (dst->segs[i].ds_len - cmd->sloplen));
1660 wmb();
1661 dma->dstr[idx].l |= htole32(HIFN_D_VALID);
1662 HIFN_DSTR_SYNC(sc, idx,
1663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1664 used++;
1665
1666 idx = hifn_dmamap_dstwrap(sc, idx);
1667 }
1668 }
1669 dma->dstr[idx].p = htole32(p);
1670 dma->dstr[idx].l = htole32(l);
1671 wmb();
1672 dma->dstr[idx].l |= htole32(HIFN_D_VALID);
1673 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1674 used++;
1675
1676 idx = hifn_dmamap_dstwrap(sc, idx);
1677
1678 dma->dsti = idx;
1679 dma->dstu += used;
1680 return (idx);
1681 }
1682
1683 static __inline int
1684 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1685 {
1686 struct hifn_dma *dma = sc->sc_dma;
1687
1688 if (++idx == HIFN_D_SRC_RSIZE) {
1689 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1690 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1691 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1692 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1693 idx = 0;
1694 }
1695 return (idx);
1696 }
1697
1698 static int
1699 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1700 {
1701 struct hifn_dma *dma = sc->sc_dma;
1702 struct hifn_operand *src = &cmd->src;
1703 int idx, i;
1704 u_int32_t last = 0;
1705
1706 DPRINTF("%s()\n", __FUNCTION__);
1707
1708 idx = dma->srci;
1709 for (i = 0; i < src->nsegs; i++) {
1710 if (i == src->nsegs - 1)
1711 last = HIFN_D_LAST;
1712
1713 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1714 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1715 HIFN_D_MASKDONEIRQ | last);
1716 wmb();
1717 dma->srcr[idx].l |= htole32(HIFN_D_VALID);
1718 HIFN_SRCR_SYNC(sc, idx,
1719 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1720
1721 idx = hifn_dmamap_srcwrap(sc, idx);
1722 }
1723 dma->srci = idx;
1724 dma->srcu += src->nsegs;
1725 return (idx);
1726 }
1727
1728
1729 static int
1730 hifn_crypto(
1731 struct hifn_softc *sc,
1732 struct hifn_command *cmd,
1733 struct cryptop *crp,
1734 int hint)
1735 {
1736 struct hifn_dma *dma = sc->sc_dma;
1737 u_int32_t cmdlen, csr;
1738 int cmdi, resi, err = 0;
1739 unsigned long l_flags;
1740
1741 DPRINTF("%s()\n", __FUNCTION__);
1742
1743 /*
1744 * need 1 cmd, and 1 res
1745 *
1746 * NB: check this first since it's easy.
1747 */
1748 HIFN_LOCK(sc);
1749 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1750 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1751 #ifdef HIFN_DEBUG
1752 if (hifn_debug) {
1753 device_printf(sc->sc_dev,
1754 "cmd/result exhaustion, cmdu %u resu %u\n",
1755 dma->cmdu, dma->resu);
1756 }
1757 #endif
1758 hifnstats.hst_nomem_cr++;
1759 sc->sc_needwakeup |= CRYPTO_SYMQ;
1760 HIFN_UNLOCK(sc);
1761 return (ERESTART);
1762 }
1763
1764 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1765 if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
1766 hifnstats.hst_nomem_load++;
1767 err = ENOMEM;
1768 goto err_srcmap1;
1769 }
1770 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1771 if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
1772 hifnstats.hst_nomem_load++;
1773 err = ENOMEM;
1774 goto err_srcmap1;
1775 }
1776 } else {
1777 if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
1778 hifnstats.hst_nomem_load++;
1779 err = ENOMEM;
1780 goto err_srcmap1;
1781 }
1782 }
1783
1784 if (hifn_dmamap_aligned(&cmd->src)) {
1785 cmd->sloplen = cmd->src_mapsize & 3;
1786 cmd->dst = cmd->src;
1787 } else {
1788 if (crp->crp_flags & CRYPTO_F_IOV) {
1789 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
1790 err = EINVAL;
1791 goto err_srcmap;
1792 } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
1793 #ifdef NOTYET
1794 int totlen, len;
1795 struct mbuf *m, *m0, *mlast;
1796
1797 KASSERT(cmd->dst_m == cmd->src_m,
1798 ("hifn_crypto: dst_m initialized improperly"));
1799 hifnstats.hst_unaligned++;
1800 /*
1801 * Source is not aligned on a longword boundary.
1802 * Copy the data to insure alignment. If we fail
1803 * to allocate mbufs or clusters while doing this
1804 * we return ERESTART so the operation is requeued
1805 * at the crypto later, but only if there are
1806 * ops already posted to the hardware; otherwise we
1807 * have no guarantee that we'll be re-entered.
1808 */
1809 totlen = cmd->src_mapsize;
1810 if (cmd->src_m->m_flags & M_PKTHDR) {
1811 len = MHLEN;
1812 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1813 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
1814 m_free(m0);
1815 m0 = NULL;
1816 }
1817 } else {
1818 len = MLEN;
1819 MGET(m0, M_DONTWAIT, MT_DATA);
1820 }
1821 if (m0 == NULL) {
1822 hifnstats.hst_nomem_mbuf++;
1823 err = dma->cmdu ? ERESTART : ENOMEM;
1824 goto err_srcmap;
1825 }
1826 if (totlen >= MINCLSIZE) {
1827 MCLGET(m0, M_DONTWAIT);
1828 if ((m0->m_flags & M_EXT) == 0) {
1829 hifnstats.hst_nomem_mcl++;
1830 err = dma->cmdu ? ERESTART : ENOMEM;
1831 m_freem(m0);
1832 goto err_srcmap;
1833 }
1834 len = MCLBYTES;
1835 }
1836 totlen -= len;
1837 m0->m_pkthdr.len = m0->m_len = len;
1838 mlast = m0;
1839
1840 while (totlen > 0) {
1841 MGET(m, M_DONTWAIT, MT_DATA);
1842 if (m == NULL) {
1843 hifnstats.hst_nomem_mbuf++;
1844 err = dma->cmdu ? ERESTART : ENOMEM;
1845 m_freem(m0);
1846 goto err_srcmap;
1847 }
1848 len = MLEN;
1849 if (totlen >= MINCLSIZE) {
1850 MCLGET(m, M_DONTWAIT);
1851 if ((m->m_flags & M_EXT) == 0) {
1852 hifnstats.hst_nomem_mcl++;
1853 err = dma->cmdu ? ERESTART : ENOMEM;
1854 mlast->m_next = m;
1855 m_freem(m0);
1856 goto err_srcmap;
1857 }
1858 len = MCLBYTES;
1859 }
1860
1861 m->m_len = len;
1862 m0->m_pkthdr.len += len;
1863 totlen -= len;
1864
1865 mlast->m_next = m;
1866 mlast = m;
1867 }
1868 cmd->dst_m = m0;
1869 #else
1870 device_printf(sc->sc_dev,
1871 "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
1872 __FILE__, __LINE__);
1873 err = EINVAL;
1874 goto err_srcmap;
1875 #endif
1876 } else {
1877 device_printf(sc->sc_dev,
1878 "%s,%d: unaligned contig buffers not implemented\n",
1879 __FILE__, __LINE__);
1880 err = EINVAL;
1881 goto err_srcmap;
1882 }
1883 }
1884
1885 if (cmd->dst_map == NULL) {
1886 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1887 if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
1888 hifnstats.hst_nomem_map++;
1889 err = ENOMEM;
1890 goto err_dstmap1;
1891 }
1892 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1893 if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
1894 hifnstats.hst_nomem_load++;
1895 err = ENOMEM;
1896 goto err_dstmap1;
1897 }
1898 } else {
1899 if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
1900 hifnstats.hst_nomem_load++;
1901 err = ENOMEM;
1902 goto err_dstmap1;
1903 }
1904 }
1905 }
1906
1907 #ifdef HIFN_DEBUG
1908 if (hifn_debug) {
1909 device_printf(sc->sc_dev,
1910 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1911 READ_REG_1(sc, HIFN_1_DMA_CSR),
1912 READ_REG_1(sc, HIFN_1_DMA_IER),
1913 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1914 cmd->src_nsegs, cmd->dst_nsegs);
1915 }
1916 #endif
1917
1918 #if 0
1919 if (cmd->src_map == cmd->dst_map) {
1920 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1921 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1922 } else {
1923 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1924 BUS_DMASYNC_PREWRITE);
1925 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1926 BUS_DMASYNC_PREREAD);
1927 }
1928 #endif
1929
1930 /*
1931 * need N src, and N dst
1932 */
1933 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1934 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1935 #ifdef HIFN_DEBUG
1936 if (hifn_debug) {
1937 device_printf(sc->sc_dev,
1938 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1939 dma->srcu, cmd->src_nsegs,
1940 dma->dstu, cmd->dst_nsegs);
1941 }
1942 #endif
1943 hifnstats.hst_nomem_sd++;
1944 err = ERESTART;
1945 goto err_dstmap;
1946 }
1947
1948 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1949 dma->cmdi = 0;
1950 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1951 wmb();
1952 dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
1953 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1954 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1955 }
1956 cmdi = dma->cmdi++;
1957 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1958 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1959
1960 /* .p for command/result already set */
1961 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
1962 HIFN_D_MASKDONEIRQ);
1963 wmb();
1964 dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
1965 HIFN_CMDR_SYNC(sc, cmdi,
1966 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1967 dma->cmdu++;
1968
1969 /*
1970 * We don't worry about missing an interrupt (which a "command wait"
1971 * interrupt salvages us from), unless there is more than one command
1972 * in the queue.
1973 */
1974 if (dma->cmdu > 1) {
1975 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1976 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1977 }
1978
1979 hifnstats.hst_ipackets++;
1980 hifnstats.hst_ibytes += cmd->src_mapsize;
1981
1982 hifn_dmamap_load_src(sc, cmd);
1983
1984 /*
1985 * Unlike other descriptors, we don't mask done interrupt from
1986 * result descriptor.
1987 */
1988 #ifdef HIFN_DEBUG
1989 if (hifn_debug)
1990 device_printf(sc->sc_dev, "load res\n");
1991 #endif
1992 if (dma->resi == HIFN_D_RES_RSIZE) {
1993 dma->resi = 0;
1994 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
1995 wmb();
1996 dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
1997 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1998 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1999 }
2000 resi = dma->resi++;
2001 KASSERT(dma->hifn_commands[resi] == NULL,
2002 ("hifn_crypto: command slot %u busy", resi));
2003 dma->hifn_commands[resi] = cmd;
2004 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2005 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2006 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2007 HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2008 wmb();
2009 dma->resr[resi].l |= htole32(HIFN_D_VALID);
2010 sc->sc_curbatch++;
2011 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2012 hifnstats.hst_maxbatch = sc->sc_curbatch;
2013 hifnstats.hst_totbatch++;
2014 } else {
2015 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
2016 wmb();
2017 dma->resr[resi].l |= htole32(HIFN_D_VALID);
2018 sc->sc_curbatch = 0;
2019 }
2020 HIFN_RESR_SYNC(sc, resi,
2021 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2022 dma->resu++;
2023
2024 if (cmd->sloplen)
2025 cmd->slopidx = resi;
2026
2027 hifn_dmamap_load_dst(sc, cmd);
2028
2029 csr = 0;
2030 if (sc->sc_c_busy == 0) {
2031 csr |= HIFN_DMACSR_C_CTRL_ENA;
2032 sc->sc_c_busy = 1;
2033 }
2034 if (sc->sc_s_busy == 0) {
2035 csr |= HIFN_DMACSR_S_CTRL_ENA;
2036 sc->sc_s_busy = 1;
2037 }
2038 if (sc->sc_r_busy == 0) {
2039 csr |= HIFN_DMACSR_R_CTRL_ENA;
2040 sc->sc_r_busy = 1;
2041 }
2042 if (sc->sc_d_busy == 0) {
2043 csr |= HIFN_DMACSR_D_CTRL_ENA;
2044 sc->sc_d_busy = 1;
2045 }
2046 if (csr)
2047 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2048
2049 #ifdef HIFN_DEBUG
2050 if (hifn_debug) {
2051 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2052 READ_REG_1(sc, HIFN_1_DMA_CSR),
2053 READ_REG_1(sc, HIFN_1_DMA_IER));
2054 }
2055 #endif
2056
2057 sc->sc_active = 5;
2058 HIFN_UNLOCK(sc);
2059 KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2060 return (err); /* success */
2061
2062 err_dstmap:
2063 if (cmd->src_map != cmd->dst_map)
2064 pci_unmap_buf(sc, &cmd->dst);
2065 err_dstmap1:
2066 err_srcmap:
2067 if (crp->crp_flags & CRYPTO_F_SKBUF) {
2068 if (cmd->src_skb != cmd->dst_skb)
2069 #ifdef NOTYET
2070 m_freem(cmd->dst_m);
2071 #else
2072 device_printf(sc->sc_dev,
2073 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2074 __FILE__, __LINE__);
2075 #endif
2076 }
2077 pci_unmap_buf(sc, &cmd->src);
2078 err_srcmap1:
2079 HIFN_UNLOCK(sc);
2080 return (err);
2081 }
2082
2083 static void
2084 hifn_tick(unsigned long arg)
2085 {
2086 struct hifn_softc *sc;
2087 unsigned long l_flags;
2088
2089 if (arg >= HIFN_MAX_CHIPS)
2090 return;
2091 sc = hifn_chip_idx[arg];
2092 if (!sc)
2093 return;
2094
2095 HIFN_LOCK(sc);
2096 if (sc->sc_active == 0) {
2097 struct hifn_dma *dma = sc->sc_dma;
2098 u_int32_t r = 0;
2099
2100 if (dma->cmdu == 0 && sc->sc_c_busy) {
2101 sc->sc_c_busy = 0;
2102 r |= HIFN_DMACSR_C_CTRL_DIS;
2103 }
2104 if (dma->srcu == 0 && sc->sc_s_busy) {
2105 sc->sc_s_busy = 0;
2106 r |= HIFN_DMACSR_S_CTRL_DIS;
2107 }
2108 if (dma->dstu == 0 && sc->sc_d_busy) {
2109 sc->sc_d_busy = 0;
2110 r |= HIFN_DMACSR_D_CTRL_DIS;
2111 }
2112 if (dma->resu == 0 && sc->sc_r_busy) {
2113 sc->sc_r_busy = 0;
2114 r |= HIFN_DMACSR_R_CTRL_DIS;
2115 }
2116 if (r)
2117 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2118 } else
2119 sc->sc_active--;
2120 HIFN_UNLOCK(sc);
2121 mod_timer(&sc->sc_tickto, jiffies + HZ);
2122 }
2123
2124 static irqreturn_t
2125 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
2126 hifn_intr(int irq, void *arg)
2127 #else
2128 hifn_intr(int irq, void *arg, struct pt_regs *regs)
2129 #endif
2130 {
2131 struct hifn_softc *sc = arg;
2132 struct hifn_dma *dma;
2133 u_int32_t dmacsr, restart;
2134 int i, u;
2135 unsigned long l_flags;
2136
2137 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2138
2139 /* Nothing in the DMA unit interrupted */
2140 if ((dmacsr & sc->sc_dmaier) == 0)
2141 return IRQ_NONE;
2142
2143 HIFN_LOCK(sc);
2144
2145 dma = sc->sc_dma;
2146
2147 #ifdef HIFN_DEBUG
2148 if (hifn_debug) {
2149 device_printf(sc->sc_dev,
2150 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2151 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2152 dma->cmdi, dma->srci, dma->dsti, dma->resi,
2153 dma->cmdk, dma->srck, dma->dstk, dma->resk,
2154 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2155 }
2156 #endif
2157
2158 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2159
2160 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2161 (dmacsr & HIFN_DMACSR_PUBDONE))
2162 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2163 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2164
2165 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2166 if (restart)
2167 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2168
2169 if (sc->sc_flags & HIFN_IS_7811) {
2170 if (dmacsr & HIFN_DMACSR_ILLR)
2171 device_printf(sc->sc_dev, "illegal read\n");
2172 if (dmacsr & HIFN_DMACSR_ILLW)
2173 device_printf(sc->sc_dev, "illegal write\n");
2174 }
2175
2176 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2177 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2178 if (restart) {
2179 device_printf(sc->sc_dev, "abort, resetting.\n");
2180 hifnstats.hst_abort++;
2181 hifn_abort(sc);
2182 HIFN_UNLOCK(sc);
2183 return IRQ_HANDLED;
2184 }
2185
2186 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2187 /*
2188 * If no slots to process and we receive a "waiting on
2189 * command" interrupt, we disable the "waiting on command"
2190 * (by clearing it).
2191 */
2192 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2193 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2194 }
2195
2196 /* clear the rings */
2197 i = dma->resk; u = dma->resu;
2198 while (u != 0) {
2199 HIFN_RESR_SYNC(sc, i,
2200 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2201 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2202 HIFN_RESR_SYNC(sc, i,
2203 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2204 break;
2205 }
2206
2207 if (i != HIFN_D_RES_RSIZE) {
2208 struct hifn_command *cmd;
2209 u_int8_t *macbuf = NULL;
2210
2211 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2212 cmd = dma->hifn_commands[i];
2213 KASSERT(cmd != NULL,
2214 ("hifn_intr: null command slot %u", i));
2215 dma->hifn_commands[i] = NULL;
2216
2217 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2218 macbuf = dma->result_bufs[i];
2219 macbuf += 12;
2220 }
2221
2222 hifn_callback(sc, cmd, macbuf);
2223 hifnstats.hst_opackets++;
2224 u--;
2225 }
2226
2227 if (++i == (HIFN_D_RES_RSIZE + 1))
2228 i = 0;
2229 }
2230 dma->resk = i; dma->resu = u;
2231
2232 i = dma->srck; u = dma->srcu;
2233 while (u != 0) {
2234 if (i == HIFN_D_SRC_RSIZE)
2235 i = 0;
2236 HIFN_SRCR_SYNC(sc, i,
2237 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2238 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2239 HIFN_SRCR_SYNC(sc, i,
2240 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2241 break;
2242 }
2243 i++, u--;
2244 }
2245 dma->srck = i; dma->srcu = u;
2246
2247 i = dma->cmdk; u = dma->cmdu;
2248 while (u != 0) {
2249 HIFN_CMDR_SYNC(sc, i,
2250 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2251 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2252 HIFN_CMDR_SYNC(sc, i,
2253 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2254 break;
2255 }
2256 if (i != HIFN_D_CMD_RSIZE) {
2257 u--;
2258 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2259 }
2260 if (++i == (HIFN_D_CMD_RSIZE + 1))
2261 i = 0;
2262 }
2263 dma->cmdk = i; dma->cmdu = u;
2264
2265 HIFN_UNLOCK(sc);
2266
2267 if (sc->sc_needwakeup) { /* XXX check high watermark */
2268 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2269 #ifdef HIFN_DEBUG
2270 if (hifn_debug)
2271 device_printf(sc->sc_dev,
2272 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2273 sc->sc_needwakeup,
2274 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2275 #endif
2276 sc->sc_needwakeup &= ~wakeup;
2277 crypto_unblock(sc->sc_cid, wakeup);
2278 }
2279
2280 return IRQ_HANDLED;
2281 }
2282
2283 /*
2284 * Allocate a new 'session' and return an encoded session id. 'sidp'
2285 * contains our registration id, and should contain an encoded session
2286 * id on successful allocation.
2287 */
2288 static int
2289 hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2290 {
2291 struct hifn_softc *sc = device_get_softc(dev);
2292 struct cryptoini *c;
2293 int mac = 0, cry = 0, sesn;
2294 struct hifn_session *ses = NULL;
2295 unsigned long l_flags;
2296
2297 DPRINTF("%s()\n", __FUNCTION__);
2298
2299 KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2300 if (sidp == NULL || cri == NULL || sc == NULL) {
2301 DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
2302 return (EINVAL);
2303 }
2304
2305 HIFN_LOCK(sc);
2306 if (sc->sc_sessions == NULL) {
2307 ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
2308 SLAB_ATOMIC);
2309 if (ses == NULL) {
2310 HIFN_UNLOCK(sc);
2311 return (ENOMEM);
2312 }
2313 sesn = 0;
2314 sc->sc_nsessions = 1;
2315 } else {
2316 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2317 if (!sc->sc_sessions[sesn].hs_used) {
2318 ses = &sc->sc_sessions[sesn];
2319 break;
2320 }
2321 }
2322
2323 if (ses == NULL) {
2324 sesn = sc->sc_nsessions;
2325 ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
2326 SLAB_ATOMIC);
2327 if (ses == NULL) {
2328 HIFN_UNLOCK(sc);
2329 return (ENOMEM);
2330 }
2331 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2332 bzero(sc->sc_sessions, sesn * sizeof(*ses));
2333 kfree(sc->sc_sessions);
2334 sc->sc_sessions = ses;
2335 ses = &sc->sc_sessions[sesn];
2336 sc->sc_nsessions++;
2337 }
2338 }
2339 HIFN_UNLOCK(sc);
2340
2341 bzero(ses, sizeof(*ses));
2342 ses->hs_used = 1;
2343
2344 for (c = cri; c != NULL; c = c->cri_next) {
2345 switch (c->cri_alg) {
2346 case CRYPTO_MD5:
2347 case CRYPTO_SHA1:
2348 case CRYPTO_MD5_HMAC:
2349 case CRYPTO_SHA1_HMAC:
2350 if (mac) {
2351 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2352 return (EINVAL);
2353 }
2354 mac = 1;
2355 ses->hs_mlen = c->cri_mlen;
2356 if (ses->hs_mlen == 0) {
2357 switch (c->cri_alg) {
2358 case CRYPTO_MD5:
2359 case CRYPTO_MD5_HMAC:
2360 ses->hs_mlen = 16;
2361 break;
2362 case CRYPTO_SHA1:
2363 case CRYPTO_SHA1_HMAC:
2364 ses->hs_mlen = 20;
2365 break;
2366 }
2367 }
2368 break;
2369 case CRYPTO_DES_CBC:
2370 case CRYPTO_3DES_CBC:
2371 case CRYPTO_AES_CBC:
2372 /* XXX this may read fewer, does it matter? */
2373 read_random(ses->hs_iv,
2374 c->cri_alg == CRYPTO_AES_CBC ?
2375 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2376 /*FALLTHROUGH*/
2377 case CRYPTO_ARC4:
2378 if (cry) {
2379 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2380 return (EINVAL);
2381 }
2382 cry = 1;
2383 break;
2384 default:
2385 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2386 return (EINVAL);
2387 }
2388 }
2389 if (mac == 0 && cry == 0) {
2390 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2391 return (EINVAL);
2392 }
2393
2394 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2395
2396 return (0);
2397 }
2398
2399 /*
2400 * Deallocate a session.
2401 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2402 * XXX to blow away any keys already stored there.
2403 */
2404 static int
2405 hifn_freesession(device_t dev, u_int64_t tid)
2406 {
2407 struct hifn_softc *sc = device_get_softc(dev);
2408 int session, error;
2409 u_int32_t sid = CRYPTO_SESID2LID(tid);
2410 unsigned long l_flags;
2411
2412 DPRINTF("%s()\n", __FUNCTION__);
2413
2414 KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2415 if (sc == NULL) {
2416 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2417 return (EINVAL);
2418 }
2419
2420 HIFN_LOCK(sc);
2421 session = HIFN_SESSION(sid);
2422 if (session < sc->sc_nsessions) {
2423 bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2424 error = 0;
2425 } else {
2426 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2427 error = EINVAL;
2428 }
2429 HIFN_UNLOCK(sc);
2430
2431 return (error);
2432 }
2433
2434 static int
2435 hifn_process(device_t dev, struct cryptop *crp, int hint)
2436 {
2437 struct hifn_softc *sc = device_get_softc(dev);
2438 struct hifn_command *cmd = NULL;
2439 int session, err, ivlen;
2440 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2441
2442 DPRINTF("%s()\n", __FUNCTION__);
2443
2444 if (crp == NULL || crp->crp_callback == NULL) {
2445 hifnstats.hst_invalid++;
2446 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2447 return (EINVAL);
2448 }
2449 session = HIFN_SESSION(crp->crp_sid);
2450
2451 if (sc == NULL || session >= sc->sc_nsessions) {
2452 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2453 err = EINVAL;
2454 goto errout;
2455 }
2456
2457 cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
2458 if (cmd == NULL) {
2459 hifnstats.hst_nomem++;
2460 err = ENOMEM;
2461 goto errout;
2462 }
2463 memset(cmd, 0, sizeof(*cmd));
2464
2465 if (crp->crp_flags & CRYPTO_F_SKBUF) {
2466 cmd->src_skb = (struct sk_buff *)crp->crp_buf;
2467 cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
2468 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2469 cmd->src_io = (struct uio *)crp->crp_buf;
2470 cmd->dst_io = (struct uio *)crp->crp_buf;
2471 } else {
2472 cmd->src_buf = crp->crp_buf;
2473 cmd->dst_buf = crp->crp_buf;
2474 }
2475
2476 crd1 = crp->crp_desc;
2477 if (crd1 == NULL) {
2478 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2479 err = EINVAL;
2480 goto errout;
2481 }
2482 crd2 = crd1->crd_next;
2483
2484 if (crd2 == NULL) {
2485 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2486 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2487 crd1->crd_alg == CRYPTO_SHA1 ||
2488 crd1->crd_alg == CRYPTO_MD5) {
2489 maccrd = crd1;
2490 enccrd = NULL;
2491 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2492 crd1->crd_alg == CRYPTO_3DES_CBC ||
2493 crd1->crd_alg == CRYPTO_AES_CBC ||
2494 crd1->crd_alg == CRYPTO_ARC4) {
2495 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2496 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2497 maccrd = NULL;
2498 enccrd = crd1;
2499 } else {
2500 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2501 err = EINVAL;
2502 goto errout;
2503 }
2504 } else {
2505 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2506 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2507 crd1->crd_alg == CRYPTO_MD5 ||
2508 crd1->crd_alg == CRYPTO_SHA1) &&
2509 (crd2->crd_alg == CRYPTO_DES_CBC ||
2510 crd2->crd_alg == CRYPTO_3DES_CBC ||
2511 crd2->crd_alg == CRYPTO_AES_CBC ||
2512 crd2->crd_alg == CRYPTO_ARC4) &&
2513 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2514 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2515 maccrd = crd1;
2516 enccrd = crd2;
2517 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2518 crd1->crd_alg == CRYPTO_ARC4 ||
2519 crd1->crd_alg == CRYPTO_3DES_CBC ||
2520 crd1->crd_alg == CRYPTO_AES_CBC) &&
2521 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2522 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2523 crd2->crd_alg == CRYPTO_MD5 ||
2524 crd2->crd_alg == CRYPTO_SHA1) &&
2525 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2526 enccrd = crd1;
2527 maccrd = crd2;
2528 } else {
2529 /*
2530 * We cannot order the 7751 as requested
2531 */
2532 DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
2533 err = EINVAL;
2534 goto errout;
2535 }
2536 }
2537
2538 if (enccrd) {
2539 cmd->enccrd = enccrd;
2540 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2541 switch (enccrd->crd_alg) {
2542 case CRYPTO_ARC4:
2543 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2544 break;
2545 case CRYPTO_DES_CBC:
2546 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2547 HIFN_CRYPT_CMD_MODE_CBC |
2548 HIFN_CRYPT_CMD_NEW_IV;
2549 break;
2550 case CRYPTO_3DES_CBC:
2551 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2552 HIFN_CRYPT_CMD_MODE_CBC |
2553 HIFN_CRYPT_CMD_NEW_IV;
2554 break;
2555 case CRYPTO_AES_CBC:
2556 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2557 HIFN_CRYPT_CMD_MODE_CBC |
2558 HIFN_CRYPT_CMD_NEW_IV;
2559 break;
2560 default:
2561 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2562 err = EINVAL;
2563 goto errout;
2564 }
2565 if (enccrd->crd_alg != CRYPTO_ARC4) {
2566 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2567 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2568 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2569 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2570 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2571 else
2572 bcopy(sc->sc_sessions[session].hs_iv,
2573 cmd->iv, ivlen);
2574
2575 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2576 == 0) {
2577 crypto_copyback(crp->crp_flags,
2578 crp->crp_buf, enccrd->crd_inject,
2579 ivlen, cmd->iv);
2580 }
2581 } else {
2582 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2583 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2584 else {
2585 crypto_copydata(crp->crp_flags,
2586 crp->crp_buf, enccrd->crd_inject,
2587 ivlen, cmd->iv);
2588 }
2589 }
2590 }
2591
2592 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2593 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2594 cmd->ck = enccrd->crd_key;
2595 cmd->cklen = enccrd->crd_klen >> 3;
2596 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2597
2598 /*
2599 * Need to specify the size for the AES key in the masks.
2600 */
2601 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2602 HIFN_CRYPT_CMD_ALG_AES) {
2603 switch (cmd->cklen) {
2604 case 16:
2605 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2606 break;
2607 case 24:
2608 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2609 break;
2610 case 32:
2611 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2612 break;
2613 default:
2614 DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
2615 err = EINVAL;
2616 goto errout;
2617 }
2618 }
2619 }
2620
2621 if (maccrd) {
2622 cmd->maccrd = maccrd;
2623 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2624
2625 switch (maccrd->crd_alg) {
2626 case CRYPTO_MD5:
2627 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2628 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2629 HIFN_MAC_CMD_POS_IPSEC;
2630 break;
2631 case CRYPTO_MD5_HMAC:
2632 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2633 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2634 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2635 break;
2636 case CRYPTO_SHA1:
2637 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2638 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2639 HIFN_MAC_CMD_POS_IPSEC;
2640 break;
2641 case CRYPTO_SHA1_HMAC:
2642 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2643 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2644 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2645 break;
2646 }
2647
2648 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2649 maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2650 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2651 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2652 bzero(cmd->mac + (maccrd->crd_klen >> 3),
2653 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2654 }
2655 }
2656
2657 cmd->crp = crp;
2658 cmd->session_num = session;
2659 cmd->softc = sc;
2660
2661 err = hifn_crypto(sc, cmd, crp, hint);
2662 if (!err) {
2663 return 0;
2664 } else if (err == ERESTART) {
2665 /*
2666 * There weren't enough resources to dispatch the request
2667 * to the part. Notify the caller so they'll requeue this
2668 * request and resubmit it again soon.
2669 */
2670 #ifdef HIFN_DEBUG
2671 if (hifn_debug)
2672 device_printf(sc->sc_dev, "requeue request\n");
2673 #endif
2674 kfree(cmd);
2675 sc->sc_needwakeup |= CRYPTO_SYMQ;
2676 return (err);
2677 }
2678
2679 errout:
2680 if (cmd != NULL)
2681 kfree(cmd);
2682 if (err == EINVAL)
2683 hifnstats.hst_invalid++;
2684 else
2685 hifnstats.hst_nomem++;
2686 crp->crp_etype = err;
2687 crypto_done(crp);
2688 return (err);
2689 }
2690
2691 static void
2692 hifn_abort(struct hifn_softc *sc)
2693 {
2694 struct hifn_dma *dma = sc->sc_dma;
2695 struct hifn_command *cmd;
2696 struct cryptop *crp;
2697 int i, u;
2698
2699 DPRINTF("%s()\n", __FUNCTION__);
2700
2701 i = dma->resk; u = dma->resu;
2702 while (u != 0) {
2703 cmd = dma->hifn_commands[i];
2704 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2705 dma->hifn_commands[i] = NULL;
2706 crp = cmd->crp;
2707
2708 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2709 /* Salvage what we can. */
2710 u_int8_t *macbuf;
2711
2712 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2713 macbuf = dma->result_bufs[i];
2714 macbuf += 12;
2715 } else
2716 macbuf = NULL;
2717 hifnstats.hst_opackets++;
2718 hifn_callback(sc, cmd, macbuf);
2719 } else {
2720 #if 0
2721 if (cmd->src_map == cmd->dst_map) {
2722 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2723 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2724 } else {
2725 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2726 BUS_DMASYNC_POSTWRITE);
2727 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2728 BUS_DMASYNC_POSTREAD);
2729 }
2730 #endif
2731
2732 if (cmd->src_skb != cmd->dst_skb) {
2733 #ifdef NOTYET
2734 m_freem(cmd->src_m);
2735 crp->crp_buf = (caddr_t)cmd->dst_m;
2736 #else
2737 device_printf(sc->sc_dev,
2738 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2739 __FILE__, __LINE__);
2740 #endif
2741 }
2742
2743 /* non-shared buffers cannot be restarted */
2744 if (cmd->src_map != cmd->dst_map) {
2745 /*
2746 * XXX should be EAGAIN, delayed until
2747 * after the reset.
2748 */
2749 crp->crp_etype = ENOMEM;
2750 pci_unmap_buf(sc, &cmd->dst);
2751 } else
2752 crp->crp_etype = ENOMEM;
2753
2754 pci_unmap_buf(sc, &cmd->src);
2755
2756 kfree(cmd);
2757 if (crp->crp_etype != EAGAIN)
2758 crypto_done(crp);
2759 }
2760
2761 if (++i == HIFN_D_RES_RSIZE)
2762 i = 0;
2763 u--;
2764 }
2765 dma->resk = i; dma->resu = u;
2766
2767 hifn_reset_board(sc, 1);
2768 hifn_init_dma(sc);
2769 hifn_init_pci_registers(sc);
2770 }
2771
2772 static void
2773 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2774 {
2775 struct hifn_dma *dma = sc->sc_dma;
2776 struct cryptop *crp = cmd->crp;
2777 struct cryptodesc *crd;
2778 int i, u, ivlen;
2779
2780 DPRINTF("%s()\n", __FUNCTION__);
2781
2782 #if 0
2783 if (cmd->src_map == cmd->dst_map) {
2784 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2785 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2786 } else {
2787 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2788 BUS_DMASYNC_POSTWRITE);
2789 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2790 BUS_DMASYNC_POSTREAD);
2791 }
2792 #endif
2793
2794 if (crp->crp_flags & CRYPTO_F_SKBUF) {
2795 if (cmd->src_skb != cmd->dst_skb) {
2796 #ifdef NOTYET
2797 crp->crp_buf = (caddr_t)cmd->dst_m;
2798 totlen = cmd->src_mapsize;
2799 for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2800 if (totlen < m->m_len) {
2801 m->m_len = totlen;
2802 totlen = 0;
2803 } else
2804 totlen -= m->m_len;
2805 }
2806 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2807 m_freem(cmd->src_m);
2808 #else
2809 device_printf(sc->sc_dev,
2810 "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
2811 __FILE__, __LINE__);
2812 #endif
2813 }
2814 }
2815
2816 if (cmd->sloplen != 0) {
2817 crypto_copyback(crp->crp_flags, crp->crp_buf,
2818 cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2819 (caddr_t)&dma->slop[cmd->slopidx]);
2820 }
2821
2822 i = dma->dstk; u = dma->dstu;
2823 while (u != 0) {
2824 if (i == HIFN_D_DST_RSIZE)
2825 i = 0;
2826 #if 0
2827 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2828 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2829 #endif
2830 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2831 #if 0
2832 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2833 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2834 #endif
2835 break;
2836 }
2837 i++, u--;
2838 }
2839 dma->dstk = i; dma->dstu = u;
2840
2841 hifnstats.hst_obytes += cmd->dst_mapsize;
2842
2843 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2844 HIFN_BASE_CMD_CRYPT) {
2845 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2846 if (crd->crd_alg != CRYPTO_DES_CBC &&
2847 crd->crd_alg != CRYPTO_3DES_CBC &&
2848 crd->crd_alg != CRYPTO_AES_CBC)
2849 continue;
2850 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2851 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2852 crypto_copydata(crp->crp_flags, crp->crp_buf,
2853 crd->crd_skip + crd->crd_len - ivlen, ivlen,
2854 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2855 break;
2856 }
2857 }
2858
2859 if (macbuf != NULL) {
2860 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2861 int len;
2862
2863 if (crd->crd_alg != CRYPTO_MD5 &&
2864 crd->crd_alg != CRYPTO_SHA1 &&
2865 crd->crd_alg != CRYPTO_MD5_HMAC &&
2866 crd->crd_alg != CRYPTO_SHA1_HMAC) {
2867 continue;
2868 }
2869 len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2870 crypto_copyback(crp->crp_flags, crp->crp_buf,
2871 crd->crd_inject, len, macbuf);
2872 break;
2873 }
2874 }
2875
2876 if (cmd->src_map != cmd->dst_map)
2877 pci_unmap_buf(sc, &cmd->dst);
2878 pci_unmap_buf(sc, &cmd->src);
2879 kfree(cmd);
2880 crypto_done(crp);
2881 }
2882
2883 /*
2884 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2885 * and Group 1 registers; avoid conditions that could create
2886 * burst writes by doing a read in between the writes.
2887 *
2888 * NB: The read we interpose is always to the same register;
2889 * we do this because reading from an arbitrary (e.g. last)
2890 * register may not always work.
2891 */
2892 static void
2893 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2894 {
2895 if (sc->sc_flags & HIFN_IS_7811) {
2896 if (sc->sc_bar0_lastreg == reg - 4)
2897 readl(sc->sc_bar0 + HIFN_0_PUCNFG);
2898 sc->sc_bar0_lastreg = reg;
2899 }
2900 writel(val, sc->sc_bar0 + reg);
2901 }
2902
2903 static void
2904 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2905 {
2906 if (sc->sc_flags & HIFN_IS_7811) {
2907 if (sc->sc_bar1_lastreg == reg - 4)
2908 readl(sc->sc_bar1 + HIFN_1_REVID);
2909 sc->sc_bar1_lastreg = reg;
2910 }
2911 writel(val, sc->sc_bar1 + reg);
2912 }
2913
2914
2915 static struct pci_device_id hifn_pci_tbl[] = {
2916 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
2917 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2918 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
2919 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2920 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
2921 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2922 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
2923 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2924 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
2925 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2926 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
2927 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2928 /*
2929 * Other vendors share this PCI ID as well, such as
2930 * http://www.powercrypt.com, and obviously they also
2931 * use the same key.
2932 */
2933 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
2934 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2935 { 0, 0, 0, 0, 0, 0, }
2936 };
2937 MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
2938
2939 static struct pci_driver hifn_driver = {
2940 .name = "hifn",
2941 .id_table = hifn_pci_tbl,
2942 .probe = hifn_probe,
2943 .remove = hifn_remove,
2944 /* add PM stuff here one day */
2945 };
2946
2947 static int __init hifn_init (void)
2948 {
2949 struct hifn_softc *sc = NULL;
2950 int rc;
2951
2952 DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
2953
2954 rc = pci_register_driver(&hifn_driver);
2955 pci_register_driver_compat(&hifn_driver, rc);
2956
2957 return rc;
2958 }
2959
2960 static void __exit hifn_exit (void)
2961 {
2962 pci_unregister_driver(&hifn_driver);
2963 }
2964
2965 module_init(hifn_init);
2966 module_exit(hifn_exit);
2967
2968 MODULE_LICENSE("BSD");
2969 MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
2970 MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
This page took 0.173955 seconds and 5 git commands to generate.