1 --- a/drivers/char/random.c
2 +++ b/drivers/char/random.c
5 * void add_interrupt_randomness(int irq);
7 + * void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
8 + * int random_input_wait(void);
10 * add_input_randomness() uses the input layer interrupt timing, as well as
11 * the event type information from the hardware.
14 * a better measure, since the timing of the disk interrupts are more
17 + * random_input_words() just provides a raw block of entropy to the input
18 + * pool, such as from a hardware entropy generator.
20 + * random_input_wait() suspends the caller until such time as the
21 + * entropy pool falls below the write threshold, and returns a count of how
22 + * much entropy (in bits) is needed to sustain the pool.
24 * All of these routines try to estimate how many bits of randomness a
25 * particular randomness source. They do this by keeping track of the
26 * first and second order deltas of the event timings.
27 @@ -667,6 +677,61 @@ void add_disk_randomness(struct gendisk
32 + * random_input_words - add bulk entropy to pool
34 + * @buf: buffer to add
35 + * @wordcount: number of __u32 words to add
36 + * @ent_count: total amount of entropy (in bits) to credit
38 + * this provides bulk input of entropy to the input pool
41 +void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
43 + mix_pool_bytes(&input_pool, buf, wordcount*4);
45 + credit_entropy_bits(&input_pool, ent_count);
47 + DEBUG_ENT("crediting %d bits => %d\n",
48 + ent_count, input_pool.entropy_count);
50 + * Wake up waiting processes if we have enough
53 + if (input_pool.entropy_count >= random_read_wakeup_thresh)
54 + wake_up_interruptible(&random_read_wait);
56 +EXPORT_SYMBOL(random_input_words);
59 + * random_input_wait - wait until random needs entropy
61 + * this function sleeps until the /dev/random subsystem actually
62 + * needs more entropy, and then return the amount of entropy
63 + * that it would be nice to have added to the system.
65 +int random_input_wait(void)
69 + wait_event_interruptible(random_write_wait,
70 + input_pool.entropy_count < random_write_wakeup_thresh);
72 + count = random_write_wakeup_thresh - input_pool.entropy_count;
74 + /* likely we got woken up due to a signal */
75 + if (count <= 0) count = random_read_wakeup_thresh;
77 + DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
79 + input_pool.entropy_count, random_write_wakeup_thresh);
83 +EXPORT_SYMBOL(random_input_wait);
86 #define EXTRACT_SIZE 10
88 /*********************************************************************
91 @@ -191,6 +191,7 @@ asmlinkage long sys_dup(unsigned int fil
92 ret = dupfd(file, 0, 0);
95 +EXPORT_SYMBOL(sys_dup);
97 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
99 --- a/include/linux/miscdevice.h
100 +++ b/include/linux/miscdevice.h
102 #define APOLLO_MOUSE_MINOR 7
103 #define PC110PAD_MINOR 9
104 /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
105 +#define CRYPTODEV_MINOR 70 /* /dev/crypto */
106 #define WATCHDOG_MINOR 130 /* Watchdog timer */
107 #define TEMP_MINOR 131 /* Temperature Sensor */
108 #define RTC_MINOR 135
109 --- a/include/linux/random.h
110 +++ b/include/linux/random.h
112 #define _LINUX_RANDOM_H
114 #include <linux/ioctl.h>
115 +#include <linux/types.h> /* for __u32 in user space */
117 /* ioctl()'s for the random number generator */
120 /* Clear the entropy pool and associated counters. (Superuser only.) */
121 #define RNDCLEARPOOL _IO( 'R', 0x06 )
123 +#ifdef CONFIG_FIPS_RNG
125 +/* Size of seed value - equal to AES blocksize */
126 +#define AES_BLOCK_SIZE_BYTES 16
127 +#define SEED_SIZE_BYTES AES_BLOCK_SIZE_BYTES
128 +/* Size of AES key */
129 +#define KEY_SIZE_BYTES 16
131 +/* ioctl() structure used by FIPS 140-2 Tests */
132 +struct rand_fips_test {
133 + unsigned char key[KEY_SIZE_BYTES]; /* Input */
134 + unsigned char datetime[SEED_SIZE_BYTES]; /* Input */
135 + unsigned char seed[SEED_SIZE_BYTES]; /* Input */
136 + unsigned char result[SEED_SIZE_BYTES]; /* Output */
139 +/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
140 +#define RNDFIPSVST _IOWR('R', 0x10, struct rand_fips_test)
142 +/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
143 +#define RNDFIPSMCT _IOWR('R', 0x11, struct rand_fips_test)
145 +#endif /* #ifdef CONFIG_FIPS_RNG */
147 struct rand_pool_info {
150 @@ -48,6 +73,10 @@ extern void add_input_randomness(unsigne
152 extern void add_interrupt_randomness(int irq);
154 +extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
155 +extern int random_input_wait(void);
156 +#define HAS_RANDOM_INPUT_WAIT 1
158 extern void get_random_bytes(void *buf, int nbytes);
159 void generate_random_uuid(unsigned char uuid_out[16]);
162 +++ b/crypto/ocf/hifn/Makefile
164 +# for SGlinux builds
165 +-include $(ROOTDIR)/modules/.config
167 +obj-$(CONFIG_OCF_HIFN) += hifn7751.o
168 +obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
171 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
174 +-include $(TOPDIR)/Rules.make
178 +++ b/crypto/ocf/safe/Makefile
180 +# for SGlinux builds
181 +-include $(ROOTDIR)/modules/.config
183 +obj-$(CONFIG_OCF_SAFE) += safe.o
186 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
189 +-include $(TOPDIR)/Rules.make
193 +++ b/crypto/ocf/Makefile
195 +# for SGlinux builds
196 +-include $(ROOTDIR)/modules/.config
198 +OCF_OBJS = crypto.o criov.o
200 +ifdef CONFIG_OCF_RANDOMHARVEST
201 + OCF_OBJS += random.o
204 +ifdef CONFIG_OCF_FIPS
205 + OCF_OBJS += rndtest.o
208 +# Add in autoconf.h to get #defines for CONFIG_xxx
209 +AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
210 +ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
211 + EXTRA_CFLAGS += -include $(AUTOCONF_H)
212 + export EXTRA_CFLAGS
218 + mod-subdirs := safe hifn ixp4xx talitos ocfnull
219 + export-objs += crypto.o criov.o random.o
220 + list-multi += ocf.o
227 +EXTRA_CFLAGS += -I$(obj)/.
229 +obj-$(CONFIG_OCF_OCF) += ocf.o
230 +obj-$(CONFIG_OCF_CRYPTODEV) += cryptodev.o
231 +obj-$(CONFIG_OCF_CRYPTOSOFT) += cryptosoft.o
232 +obj-$(CONFIG_OCF_BENCH) += ocf-bench.o
234 +$(_obj)-$(CONFIG_OCF_SAFE) += safe$(_slash)
235 +$(_obj)-$(CONFIG_OCF_HIFN) += hifn$(_slash)
236 +$(_obj)-$(CONFIG_OCF_IXP4XX) += ixp4xx$(_slash)
237 +$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
238 +$(_obj)-$(CONFIG_OCF_PASEMI) += pasemi$(_slash)
239 +$(_obj)-$(CONFIG_OCF_EP80579) += ep80579$(_slash)
240 +$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
242 +ocf-objs := $(OCF_OBJS)
244 +$(list-multi) dummy1: $(ocf-objs)
245 + $(LD) -r -o $@ $(ocf-objs)
249 + rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
250 + rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
253 +-include $(TOPDIR)/Rules.make
257 +# release gen targets
262 + REL=`date +%Y%m%d`; \
263 + patch=ocf-linux-$$REL.patch; \
264 + patch24=ocf-linux-24-$$REL.patch; \
265 + patch26=ocf-linux-26-$$REL.patch; \
267 + find . -name Makefile; \
268 + find . -name Config.in; \
269 + find . -name Kconfig; \
270 + find . -name README; \
271 + find . -name '*.[ch]' | grep -v '.mod.c'; \
272 + ) | while read t; do \
273 + diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
275 + cat patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
276 + cat patches/linux-2.6.26-ocf.patch $$patch > $$patch26
280 + REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
282 + rm -rf /tmp/ocf-linux-$$REL*; \
283 + mkdir -p $$RELDIR/tools; \
284 + cp README* $$RELDIR; \
285 + cp patches/openss*.patch $$RELDIR; \
286 + cp patches/crypto-tools.patch $$RELDIR; \
287 + cp tools/[!C]* $$RELDIR/tools; \
289 + tar cvf $$RELDIR/ocf-linux.tar \
294 + --exclude=*.mod.* \
295 + --exclude=README* \
296 + --exclude=ocf-*.patch \
297 + --exclude=ocf/patches/openss*.patch \
298 + --exclude=ocf/patches/crypto-tools.patch \
299 + --exclude=ocf/tools \
301 + gzip -9 $$RELDIR/ocf-linux.tar; \
303 + tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
304 + gzip -9 ocf-linux-$$REL.tar; \
305 + cd $$CURDIR/../../user; \
306 + rm -rf /tmp/crypto-tools-$$REL*; \
307 + tar cvf /tmp/crypto-tools-$$REL.tar \
311 + --exclude=cryptotest \
312 + --exclude=cryptokeytest \
314 + gzip -9 /tmp/crypto-tools-$$REL.tar
317 +++ b/crypto/ocf/talitos/Makefile
319 +# for SGlinux builds
320 +-include $(ROOTDIR)/modules/.config
322 +obj-$(CONFIG_OCF_TALITOS) += talitos.o
325 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
328 +-include $(TOPDIR)/Rules.make
332 +++ b/crypto/ocf/ixp4xx/Makefile
334 +# for SGlinux builds
335 +-include $(ROOTDIR)/modules/.config
338 +# You will need to point this at your Intel ixp425 includes, this portion
339 +# of the Makefile only really works under SGLinux with the appropriate libs
340 +# installed. They can be downloaded from http://www.snapgear.org/
342 +ifeq ($(CONFIG_CPU_IXP46X),y)
345 +ifeq ($(CONFIG_CPU_IXP43X),y)
352 +ifdef CONFIG_IXP400_LIB_2_4
353 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
354 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
356 +ifdef CONFIG_IXP400_LIB_2_1
357 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
358 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
360 +ifdef CONFIG_IXP400_LIB_2_0
361 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
362 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
365 +ifdef CONFIG_IXP400_LIB_2_4
368 + -I$(IX_XSCALE_SW)/src/include \
369 + -I$(OSAL_DIR)/common/include/ \
370 + -I$(OSAL_DIR)/common/include/modules/ \
371 + -I$(OSAL_DIR)/common/include/modules/ddk/ \
372 + -I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
373 + -I$(OSAL_DIR)/common/include/modules/ioMem/ \
374 + -I$(OSAL_DIR)/common/os/linux/include/ \
375 + -I$(OSAL_DIR)/common/os/linux/include/core/ \
376 + -I$(OSAL_DIR)/common/os/linux/include/modules/ \
377 + -I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
378 + -I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
379 + -I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
380 + -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
381 + -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
382 + -DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
383 + -DUSE_IXP4XX_CRYPTO
387 + -I$(IX_XSCALE_SW)/src/include \
389 + -I$(OSAL_DIR)/os/linux/include/ \
390 + -I$(OSAL_DIR)/os/linux/include/modules/ \
391 + -I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
392 + -I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
393 + -I$(OSAL_DIR)/os/linux/include/core/ \
394 + -I$(OSAL_DIR)/os/linux/include/platforms/ \
395 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
396 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
397 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
398 + -I$(OSAL_DIR)/os/linux/include/core/ \
399 + -I$(OSAL_DIR)/include/ \
400 + -I$(OSAL_DIR)/include/modules/ \
401 + -I$(OSAL_DIR)/include/modules/bufferMgt/ \
402 + -I$(OSAL_DIR)/include/modules/ioMem/ \
403 + -I$(OSAL_DIR)/include/platforms/ \
404 + -I$(OSAL_DIR)/include/platforms/ixp400/ \
405 + -DUSE_IXP4XX_CRYPTO
408 +ifdef CONFIG_IXP400_LIB_1_4
411 + -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
412 + -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
413 + -DUSE_IXP4XX_CRYPTO
416 +IXPDIR = ixp-version-is-not-supported
419 +ifeq ($(CONFIG_CPU_IXP46X),y)
420 +IXP_CFLAGS += -D__ixp46X
422 +ifeq ($(CONFIG_CPU_IXP43X),y)
423 +IXP_CFLAGS += -D__ixp43X
425 +IXP_CFLAGS += -D__ixp42X
429 +obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
432 +EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
435 +-include $(TOPDIR)/Rules.make
439 +++ b/crypto/ocf/ocfnull/Makefile
441 +# for SGlinux builds
442 +-include $(ROOTDIR)/modules/.config
444 +obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
447 +EXTRA_CFLAGS += -I$(obj)/..
450 +-include $(TOPDIR)/Rules.make
454 +++ b/crypto/ocf/ep80579/Makefile
456 +#########################################################################
459 +# all - builds everything and installs
460 +# install - identical to all
461 +# depend - build dependencies
462 +# clean - clears derived objects except the .depend files
463 +# distclean- clears all derived objects and the .depend file
466 +# This file is provided under a dual BSD/GPLv2 license. When using or
467 +# redistributing this file, you may do so under either license.
469 +# GPL LICENSE SUMMARY
471 +# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
473 +# This program is free software; you can redistribute it and/or modify
474 +# it under the terms of version 2 of the GNU General Public License as
475 +# published by the Free Software Foundation.
477 +# This program is distributed in the hope that it will be useful, but
478 +# WITHOUT ANY WARRANTY; without even the implied warranty of
479 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
480 +# General Public License for more details.
482 +# You should have received a copy of the GNU General Public License
483 +# along with this program; if not, write to the Free Software
484 +# Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
485 +# The full GNU General Public License is included in this distribution
486 +# in the file called LICENSE.GPL.
488 +# Contact Information:
493 +# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
494 +# All rights reserved.
496 +# Redistribution and use in source and binary forms, with or without
497 +# modification, are permitted provided that the following conditions
500 +# * Redistributions of source code must retain the above copyright
501 +# notice, this list of conditions and the following disclaimer.
502 +# * Redistributions in binary form must reproduce the above copyright
503 +# notice, this list of conditions and the following disclaimer in
504 +# the documentation and/or other materials provided with the
506 +# * Neither the name of Intel Corporation nor the names of its
507 +# contributors may be used to endorse or promote products derived
508 +# from this software without specific prior written permission.
510 +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
511 +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
512 +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
513 +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
514 +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
515 +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
516 +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
517 +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
518 +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
519 +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
520 +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
523 +# version: Security.L.1.0.130
524 +############################################################################
527 +####################Common variables and definitions########################
529 +# Ensure The ENV_DIR environmental var is defined.
531 +$(error ICP_ENV_DIR is undefined. Please set the path to your environment makefile \
532 + "-> setenv ICP_ENV_DIR <path>")
535 +#Add your project environment Makefile
536 +include $(ICP_ENV_DIR)/environment.mk
538 +#include the makefile with all the default and common Make variable definitions
539 +include $(ICP_BUILDSYSTEM_PATH)/build_files/common.mk
541 +#Add the name for the executable, Library or Module output definitions
542 +OUTPUT_NAME= icp_ocf
544 +# List of Source Files to be compiled
545 +SOURCES= icp_common.c icp_sym.c icp_asym.c
547 +#common includes between all supported OSes
548 +INCLUDES= -I $(ICP_API_DIR) -I$(ICP_LAC_API) \
549 +-I$(ICP_OCF_SRC_DIR)
551 +# The location of the os level makefile needs to be changed.
552 +include $(ICP_ENV_DIR)/$(ICP_OS)_$(ICP_OS_LEVEL).mk
554 +# On the line directly below list the outputs you wish to build for,
555 +# e.g "lib_static lib_shared exe module" as show below
558 +###################Include rules makefiles########################
559 +include $(ICP_BUILDSYSTEM_PATH)/build_files/rules.mk
560 +###################End of Rules inclusion#########################
564 +++ b/crypto/ocf/pasemi/Makefile
566 +# for SGlinux builds
567 +-include $(ROOTDIR)/modules/.config
569 +obj-$(CONFIG_OCF_PASEMI) += pasemi.o
572 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
575 +-include $(TOPDIR)/Rules.make
579 +++ b/crypto/ocf/Config.in
581 +#############################################################################
583 +mainmenu_option next_comment
584 +comment 'OCF Configuration'
585 +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
586 +dep_mbool ' enable fips RNG checks (fips check on RNG data before use)' \
587 + CONFIG_OCF_FIPS $CONFIG_OCF_OCF
588 +dep_mbool ' enable harvesting entropy for /dev/random' \
589 + CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
590 +dep_tristate ' cryptodev (user space support)' \
591 + CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
592 +dep_tristate ' cryptosoft (software crypto engine)' \
593 + CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
594 +dep_tristate ' safenet (HW crypto engine)' \
595 + CONFIG_OCF_SAFE $CONFIG_OCF_OCF
596 +dep_tristate ' IXP4xx (HW crypto engine)' \
597 + CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
598 +dep_mbool ' Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
599 + CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
600 +dep_tristate ' hifn (HW crypto engine)' \
601 + CONFIG_OCF_HIFN $CONFIG_OCF_OCF
602 +dep_tristate ' talitos (HW crypto engine)' \
603 + CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
604 +dep_tristate ' pasemi (HW crypto engine)' \
605 + CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
606 +dep_tristate ' ep80579 (HW crypto engine)' \
607 + CONFIG_OCF_EP80579 $CONFIG_OCF_OCF
608 +dep_tristate ' ocfnull (does no crypto)' \
609 + CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
610 +dep_tristate ' ocf-bench (HW crypto in-kernel benchmark)' \
611 + CONFIG_OCF_BENCH $CONFIG_OCF_OCF
614 +#############################################################################
616 +++ b/crypto/ocf/Kconfig
618 +menu "OCF Configuration"
621 + tristate "OCF (Open Cryptograhic Framework)"
623 + A linux port of the OpenBSD/FreeBSD crypto framework.
625 +config OCF_RANDOMHARVEST
626 + bool "crypto random --- harvest entropy for /dev/random"
629 + Includes code to harvest random numbers from devices that support it.
632 + bool "enable fips RNG checks"
633 + depends on OCF_OCF && OCF_RANDOMHARVEST
635 + Run all RNG provided data through a fips check before
636 + adding it /dev/random's entropy pool.
638 +config OCF_CRYPTODEV
639 + tristate "cryptodev (user space support)"
642 + The user space API to access crypto hardware.
644 +config OCF_CRYPTOSOFT
645 + tristate "cryptosoft (software crypto engine)"
648 + A software driver for the OCF framework that uses
649 + the kernel CryptoAPI.
652 + tristate "safenet (HW crypto engine)"
655 + A driver for a number of the safenet Excel crypto accelerators.
656 + Currently tested and working on the 1141 and 1741.
659 + tristate "IXP4xx (HW crypto engine)"
662 + XScale IXP4xx crypto accelerator driver. Requires the
663 + Intel Access library.
665 +config OCF_IXP4XX_SHA1_MD5
666 + bool "IXP4xx SHA1 and MD5 Hashing"
667 + depends on OCF_IXP4XX
669 + Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
670 + Note: this is MUCH slower than using cryptosoft (software crypto engine).
673 + tristate "hifn (HW crypto engine)"
676 + OCF driver for various HIFN based crypto accelerators.
677 + (7951, 7955, 7956, 7751, 7811)
680 + tristate "Hifn HIPP (HW packet crypto engine)"
683 + OCF driver for various HIFN (HIPP) based crypto accelerators
687 + tristate "talitos (HW crypto engine)"
690 + OCF driver for Freescale's security engine (SEC/talitos).
693 + tristate "pasemi (HW crypto engine)"
694 + depends on OCF_OCF && PPC_PASEMI
696 + OCF driver for the PA Semi PWRficient DMA Engine
699 + tristate "ep80579 (HW crypto engine)"
702 + OCF driver for the Intel EP80579 Integrated Processor Product Line.
705 + tristate "ocfnull (fake crypto engine)"
708 + OCF driver for measuring ipsec overheads (does no crypto)
711 + tristate "ocf-bench (HW crypto in-kernel benchmark)"
714 + A very simple encryption test for the in-kernel interface
715 + of OCF. Also includes code to benchmark the IXP Access library
720 +++ b/crypto/ocf/README
722 +README - ocf-linux-20071215
723 +---------------------------
725 +This README provides instructions for getting ocf-linux compiled and
726 +operating in a generic linux environment. For other information you
727 +might like to visit the home page for this project:
729 + http://ocf-linux.sourceforge.net/
734 + Not much in this file for now, just some notes. I usually build
735 + the ocf support as modules but it can be built into the kernel as
738 + * mknod /dev/crypto c 10 70
740 + * to add OCF to your kernel source, you have two options. Apply
741 + the kernel specific patch:
743 + cd linux-2.4*; gunzip < ocf-linux-24-XXXXXXXX.patch.gz | patch -p1
744 + cd linux-2.6*; gunzip < ocf-linux-26-XXXXXXXX.patch.gz | patch -p1
746 + if you do one of the above, then you can proceed to the next step,
747 + or you can do the above process by hand with using the patches against
748 + linux-2.4.35 and 2.6.23 to include the ocf code under crypto/ocf.
749 + Here's how to add it:
751 + for 2.4.35 (and later)
753 + cd linux-2.4.35/crypto
754 + tar xvzf ocf-linux.tar.gz
756 + patch -p1 < crypto/ocf/patches/linux-2.4.35-ocf.patch
758 + for 2.6.23 (and later), find the kernel patch specific (or nearest)
759 + to your kernel versions and then:
761 + cd linux-2.6.NN/crypto
762 + tar xvzf ocf-linux.tar.gz
764 + patch -p1 < crypto/ocf/patches/linux-2.6.NN-ocf.patch
766 + It should be easy to take this patch and apply it to other more
767 + recent versions of the kernels. The same patches should also work
768 + relatively easily on kernels as old as 2.6.11 and 2.4.18.
770 + * under 2.4 if you are on a non-x86 platform, you may need to:
772 + cp linux-2.X.x/include/asm-i386/kmap_types.h linux-2.X.x/include/asm-YYY
774 + so that you can build the kernel crypto support needed for the cryptosoft
777 + * For simplicity you should enable all the crypto support in your kernel
778 + except for the test driver. Likewise for the OCF options. Do not
779 + enable OCF crypto drivers for HW that you do not have (for example
780 + ixp4xx will not compile on non-Xscale systems).
782 + * make sure that cryptodev.h (from ocf-linux.tar.gz) is installed as
783 + crypto/cryptodev.h in an include directory that is used for building
784 + applications for your platform. For example on a host system that
787 + /usr/include/crypto/cryptodev.h
789 + * patch your openssl-0.9.8i code with the openssl-0.9.8i.patch.
790 + (NOTE: there is no longer a need to patch ssh). The patch is against:
793 + If you need a patch for an older version of openssl, you should look
794 + to older OCF releases. This patch is unlikely to work on older
797 + openssl-0.9.8i.patch
798 + - enables --with-cryptodev for non BSD systems
799 + - adds -cpu option to openssl speed for calculating CPU load
801 + - fixes null pointer in openssl speed multi thread output.
802 + - fixes test keys to work with linux crypto's more stringent
804 + - adds MD5/SHA acceleration (Ronen Shitrit), only enabled
805 + with the --with-cryptodev-digests option
806 + - fixes bug in engine code caching.
808 + * build crypto-tools-XXXXXXXX.tar.gz if you want to try some of the BSD
809 + tools for testing OCF (ie., cryptotest).
811 +How to load the OCF drivers
812 +---------------------------
814 + First insert the base modules:
819 + You can then install the software OCF driver with:
823 + and one or more of the OCF HW drivers with:
830 + all the drivers take a debug option to enable verbose debug so that
831 + you can see what is going on. For debug you load them as:
833 + insmod ocf crypto_debug=1
834 + insmod cryptodev cryptodev_debug=1
835 + insmod cryptosoft swcr_debug=1
837 + You may load more than one OCF crypto driver but then there is no guarantee
838 + as to which will be used.
840 + You can also enable debug at run time on 2.6 systems with the following:
842 + echo 1 > /sys/module/ocf/parameters/crypto_debug
843 + echo 1 > /sys/module/cryptodev/parameters/cryptodev_debug
844 + echo 1 > /sys/module/cryptosoft/parameters/swcr_debug
845 + echo 1 > /sys/module/hifn7751/parameters/hifn_debug
846 + echo 1 > /sys/module/safe/parameters/safe_debug
847 + echo 1 > /sys/module/ixp4xx/parameters/ixp_debug
850 +Testing the OCF support
851 +-----------------------
853 + run "cryptotest", it should do a short test for a couple of
854 + des packets. If it does everything is working.
856 + If this works, then ssh will use the driver when invoked as:
858 + ssh -c 3des username@host
860 + to see for sure that it is operating, enable debug as defined above.
862 + To get a better idea of performance run:
864 + cryptotest 100 4096
866 + There are more options to cryptotest, see the help.
868 + It is also possible to use openssl to test the speed of the crypto
871 + openssl speed -evp des -engine cryptodev -elapsed
872 + openssl speed -evp des3 -engine cryptodev -elapsed
873 + openssl speed -evp aes128 -engine cryptodev -elapsed
875 + and multiple threads (10) with:
877 + openssl speed -evp des -engine cryptodev -elapsed -multi 10
878 + openssl speed -evp des3 -engine cryptodev -elapsed -multi 10
879 + openssl speed -evp aes128 -engine cryptodev -elapsed -multi 10
881 + for public key testing you can try:
884 + openssl speed -engine cryptodev rsa -elapsed
885 + openssl speed -engine cryptodev dsa -elapsed
888 +david_mccullough@securecomputing.com
890 +++ b/crypto/ocf/hifn/hifn7751reg.h
892 +/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
893 +/* $OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $ */
896 + * Invertex AEON / Hifn 7751 driver
897 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
898 + * Copyright (c) 1999 Theo de Raadt
899 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
900 + * http://www.netsec.net
902 + * Please send any comments, feedback, bug-fixes, or feature requests to
903 + * software@invertex.com.
905 + * Redistribution and use in source and binary forms, with or without
906 + * modification, are permitted provided that the following conditions
909 + * 1. Redistributions of source code must retain the above copyright
910 + * notice, this list of conditions and the following disclaimer.
911 + * 2. Redistributions in binary form must reproduce the above copyright
912 + * notice, this list of conditions and the following disclaimer in the
913 + * documentation and/or other materials provided with the distribution.
914 + * 3. The name of the author may not be used to endorse or promote products
915 + * derived from this software without specific prior written permission.
918 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
919 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
920 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
921 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
922 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
923 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
924 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
925 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
926 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
927 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
929 + * Effort sponsored in part by the Defense Advanced Research Projects
930 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
931 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
938 + * Some PCI configuration space offset defines. The names were made
939 + * identical to the names used by the Linux kernel.
941 +#define HIFN_BAR0 PCIR_BAR(0) /* PUC register map */
942 +#define HIFN_BAR1 PCIR_BAR(1) /* DMA register map */
943 +#define HIFN_TRDY_TIMEOUT 0x40
944 +#define HIFN_RETRY_TIMEOUT 0x41
947 + * PCI vendor and device identifiers
948 + * (the names are preserved from their OpenBSD source).
950 +#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
951 +#define PCI_PRODUCT_HIFN_7751 0x0005 /* 7751 */
952 +#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
953 +#define PCI_PRODUCT_HIFN_7811 0x0007 /* 7811 */
954 +#define PCI_PRODUCT_HIFN_7855 0x001f /* 7855 */
955 +#define PCI_PRODUCT_HIFN_7951 0x0012 /* 7951 */
956 +#define PCI_PRODUCT_HIFN_7955 0x0020 /* 7954/7955 */
957 +#define PCI_PRODUCT_HIFN_7956 0x001d /* 7956 */
959 +#define PCI_VENDOR_INVERTEX 0x14e1 /* Invertex */
960 +#define PCI_PRODUCT_INVERTEX_AEON 0x0005 /* AEON */
962 +#define PCI_VENDOR_NETSEC 0x1660 /* NetSec */
963 +#define PCI_PRODUCT_NETSEC_7751 0x7751 /* 7751 */
966 + * The values below should multiple of 4 -- and be large enough to handle
967 + * any command the driver implements.
969 + * MAX_COMMAND = base command + mac command + encrypt command +
970 + * mac-key + rc4-key
971 + * MAX_RESULT = base result + mac result + mac + encrypt result
975 +#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
976 +#define HIFN_MAX_RESULT (8 + 4 + 20 + 4)
981 + * Holds an individual descriptor for any of the rings.
983 +typedef struct hifn_desc {
984 + volatile u_int32_t l; /* length and status bits */
985 + volatile u_int32_t p;
989 + * Masks for the "length" field of struct hifn_desc.
991 +#define HIFN_D_LENGTH 0x0000ffff /* length bit mask */
992 +#define HIFN_D_MASKDONEIRQ 0x02000000 /* mask the done interrupt */
993 +#define HIFN_D_DESTOVER 0x04000000 /* destination overflow */
994 +#define HIFN_D_OVER 0x08000000 /* overflow */
995 +#define HIFN_D_LAST 0x20000000 /* last descriptor in chain */
996 +#define HIFN_D_JUMP 0x40000000 /* jump descriptor */
997 +#define HIFN_D_VALID 0x80000000 /* valid bit */
1001 + * Processing Unit Registers (offset from BASEREG0)
1003 +#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
1004 +#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
1005 +#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
1006 +#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
1007 +#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
1008 +#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
1009 +#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
1010 +#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
1011 +#define HIFN_0_PUCTRL2 0x28 /* Processing Unit Control (2nd map) */
1012 +#define HIFN_0_MUTE1 0x80
1013 +#define HIFN_0_MUTE2 0x90
1014 +#define HIFN_0_SPACESIZE 0x100 /* Register space size */
1016 +/* Processing Unit Control Register (HIFN_0_PUCTRL) */
1017 +#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
1018 +#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
1019 +#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
1020 +#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
1021 +#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
1023 +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
1024 +#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
1025 +#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
1026 +#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
1027 +#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
1028 +#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
1029 +#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
1030 +#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
1031 +#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
1032 +#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
1033 +#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
1035 +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
1036 +#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
1037 +#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
1038 +#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
1039 +#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
1040 +#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
1041 +#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
1042 +#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
1043 +#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
1044 +#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
1045 +#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
1046 +#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
1047 +#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
1048 +#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
1049 +#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
1050 +#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
1051 +#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
1052 +#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
1053 +#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
1054 +#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
1055 +#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
1056 +#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
1057 +#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
1058 +#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
1060 +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
1061 +#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
1062 +#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
1063 +#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
1064 +#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
1065 +#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
1066 +#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
1067 +#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
1068 +#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
1069 +#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
1070 +#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
1072 +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
1073 +#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
1074 +#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
1075 +#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
1076 +#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
1077 +#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
1078 +#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
1079 +#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
1080 +#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
1081 +#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
1082 +#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
1083 +#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
1084 +#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
1085 +#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
1086 +#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
1087 +#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
1088 +#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
1089 +#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
1091 +/* FIFO Status Register (HIFN_0_FIFOSTAT) */
1092 +#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
1093 +#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
1095 +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
1096 +#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as this value */
1099 + * DMA Interface Registers (offset from BASEREG1)
1101 +#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
1102 +#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
1103 +#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
1104 +#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
1105 +#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
1106 +#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
1107 +#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
1108 +#define HIFN_1_PLL 0x4c /* 7955/7956: PLL config */
1109 +#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
1110 +#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
1111 +#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
1112 +#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
1113 +#define HIFN_1_DMA_CNFG2 0x6c /* 7955/7956: dma config #2 */
1114 +#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
1115 +#define HIFN_1_REVID 0x98 /* Revision ID */
1117 +#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
1118 +#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
1119 +#define HIFN_1_PUB_OPLEN 0x304 /* 7951-compat Public Operand Length */
1120 +#define HIFN_1_PUB_OP 0x308 /* 7951-compat Public Operand */
1121 +#define HIFN_1_PUB_STATUS 0x30c /* 7951-compat Public Status */
1122 +#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
1123 +#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
1124 +#define HIFN_1_RNG_DATA 0x318 /* RNG data */
1125 +#define HIFN_1_PUB_MODE 0x320 /* PK mode */
1126 +#define HIFN_1_PUB_FIFO_OPLEN 0x380 /* first element of oplen fifo */
1127 +#define HIFN_1_PUB_FIFO_OP 0x384 /* first element of op fifo */
1128 +#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
1129 +#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
1131 +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
1132 +#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
1133 +#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
1134 +#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
1135 +#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
1136 +#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
1137 +#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
1138 +#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
1139 +#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
1140 +#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
1141 +#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
1142 +#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
1143 +#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
1144 +#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
1145 +#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
1146 +#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
1147 +#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
1148 +#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
1149 +#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
1150 +#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
1151 +#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
1152 +#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
1153 +#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
1154 +#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
1155 +#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
1156 +#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
1157 +#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
1158 +#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
1159 +#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
1160 +#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
1161 +#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
1162 +#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
1163 +#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
1164 +#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
1165 +#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
1166 +#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
1167 +#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
1168 +#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
1169 +#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
1171 +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
1172 +#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
1173 +#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
1174 +#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
1175 +#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
1176 +#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
1177 +#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
1178 +#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
1179 +#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
1180 +#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
1181 +#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
1182 +#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
1183 +#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
1184 +#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
1185 +#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
1186 +#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
1187 +#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
1188 +#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
1189 +#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
1190 +#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
1191 +#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
1192 +#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
1193 +#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
1195 +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
1196 +#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
1197 +#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
1198 +#define HIFN_DMACNFG_UNLOCK 0x00000800
1199 +#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
1200 +#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
1201 +#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
1202 +#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
1203 +#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
1205 +/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
1206 +#define HIFN_DMACNFG2_PKSWAP32 (1 << 19) /* swap the OPLEN/OP reg */
1207 +#define HIFN_DMACNFG2_PKSWAP8 (1 << 18) /* swap the bits of OPLEN/OP */
1208 +#define HIFN_DMACNFG2_BAR0_SWAP32 (1<<17) /* swap the bytes of BAR0 */
1209 +#define HIFN_DMACNFG2_BAR1_SWAP8 (1<<16) /* swap the bits of BAR0 */
1210 +#define HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
1211 +#define HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
1212 +#define HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
1213 +#define HIFN_DMACNFG2_TGT_READ_BURST_SHIFT 0
1215 +/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
1216 +#define HIFN_7811_RNGENA_ENA 0x00000001 /* enable RNG */
1218 +/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
1219 +#define HIFN_7811_RNGCFG_PRE1 0x00000f00 /* first prescalar */
1220 +#define HIFN_7811_RNGCFG_OPRE 0x00000080 /* output prescalar */
1221 +#define HIFN_7811_RNGCFG_DEFL 0x00000f80 /* 2 words/ 1/100 sec */
1223 +/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
1224 +#define HIFN_7811_RNGSTS_RDY 0x00004000 /* two numbers in FIFO */
1225 +#define HIFN_7811_RNGSTS_UFL 0x00001000 /* rng underflow */
1227 +/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
1228 +#define HIFN_MIPSRST_BAR2SIZE 0xffff0000 /* sdram size */
1229 +#define HIFN_MIPSRST_GPRAMINIT 0x00008000 /* gpram can be accessed */
1230 +#define HIFN_MIPSRST_CRAMINIT 0x00004000 /* ctxram can be accessed */
1231 +#define HIFN_MIPSRST_LED2 0x00000400 /* external LED2 */
1232 +#define HIFN_MIPSRST_LED1 0x00000200 /* external LED1 */
1233 +#define HIFN_MIPSRST_LED0 0x00000100 /* external LED0 */
1234 +#define HIFN_MIPSRST_MIPSDIS 0x00000004 /* disable MIPS */
1235 +#define HIFN_MIPSRST_MIPSRST 0x00000002 /* warm reset MIPS */
1236 +#define HIFN_MIPSRST_MIPSCOLD 0x00000001 /* cold reset MIPS */
1238 +/* Public key reset register (HIFN_1_PUB_RESET) */
1239 +#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
1241 +/* Public operation register (HIFN_1_PUB_OP) */
1242 +#define HIFN_PUBOP_AOFFSET 0x0000003e /* A offset */
1243 +#define HIFN_PUBOP_BOFFSET 0x00000fc0 /* B offset */
1244 +#define HIFN_PUBOP_MOFFSET 0x0003f000 /* M offset */
1245 +#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
1246 +#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
1247 +#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
1248 +#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
1249 +#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
1250 +#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
1251 +#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
1252 +#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
1253 +#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
1254 +#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
1255 +#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
1256 +#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
1257 +#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular Red */
1258 +#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular Exp */
1260 +/* Public operand length register (HIFN_1_PUB_OPLEN) */
1261 +#define HIFN_PUBOPLEN_MODLEN 0x0000007f
1262 +#define HIFN_PUBOPLEN_EXPLEN 0x0003ff80
1263 +#define HIFN_PUBOPLEN_REDLEN 0x003c0000
1265 +/* Public status register (HIFN_1_PUB_STATUS) */
1266 +#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
1267 +#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
1268 +#define HIFN_PUBSTS_FIFO_EMPTY 0x00000100 /* fifo empty */
1269 +#define HIFN_PUBSTS_FIFO_FULL 0x00000200 /* fifo full */
1270 +#define HIFN_PUBSTS_FIFO_OVFL 0x00000400 /* fifo overflow */
1271 +#define HIFN_PUBSTS_FIFO_WRITE 0x000f0000 /* fifo write */
1272 +#define HIFN_PUBSTS_FIFO_READ 0x0f000000 /* fifo read */
1274 +/* Public interrupt enable register (HIFN_1_PUB_IEN) */
1275 +#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
1277 +/* Random number generator config register (HIFN_1_RNG_CONFIG) */
1278 +#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
1281 + * Register offsets in register set 1
1284 +#define HIFN_UNLOCK_SECRET1 0xf4
1285 +#define HIFN_UNLOCK_SECRET2 0xfc
1288 + * PLL config register
1290 + * This register is present only on 7954/7955/7956 parts. It must be
1291 + * programmed according to the bus interface method used by the h/w.
1292 + * Note that the parts require a stable clock. Since the PCI clock
1293 + * may vary the reference clock must usually be used. To avoid
1294 + * overclocking the core logic, setup must be done carefully, refer
1295 + * to the driver for details. The exact multiplier required varies
1296 + * by part and system configuration; refer to the Hifn documentation.
1298 +#define HIFN_PLL_REF_SEL 0x00000001 /* REF/HBI clk selection */
1299 +#define HIFN_PLL_BP 0x00000002 /* bypass (used during setup) */
1300 +/* bit 2 reserved */
1301 +#define HIFN_PLL_PK_CLK_SEL 0x00000008 /* public key clk select */
1302 +#define HIFN_PLL_PE_CLK_SEL 0x00000010 /* packet engine clk select */
1303 +/* bits 5-9 reserved */
1304 +#define HIFN_PLL_MBSET 0x00000400 /* must be set to 1 */
1305 +#define HIFN_PLL_ND 0x00003800 /* Fpll_ref multiplier select */
1306 +#define HIFN_PLL_ND_SHIFT 11
1307 +#define HIFN_PLL_ND_2 0x00000000 /* 2x */
1308 +#define HIFN_PLL_ND_4 0x00000800 /* 4x */
1309 +#define HIFN_PLL_ND_6 0x00001000 /* 6x */
1310 +#define HIFN_PLL_ND_8 0x00001800 /* 8x */
1311 +#define HIFN_PLL_ND_10 0x00002000 /* 10x */
1312 +#define HIFN_PLL_ND_12 0x00002800 /* 12x */
1313 +/* bits 14-15 reserved */
1314 +#define HIFN_PLL_IS 0x00010000 /* charge pump current select */
1315 +/* bits 17-31 reserved */
1318 + * Board configuration specifies only these bits.
1320 +#define HIFN_PLL_CONFIG (HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
1323 + * Public Key Engine Mode Register
1325 +#define HIFN_PKMODE_HOSTINVERT (1 << 0) /* HOST INVERT */
1326 +#define HIFN_PKMODE_ENHANCED (1 << 1) /* Enable enhanced mode */
1329 +/*********************************************************************
1330 + * Structs for board commands
1332 + *********************************************************************/
1335 + * Structure to help build up the command data structure.
1337 +typedef struct hifn_base_command {
1338 + volatile u_int16_t masks;
1339 + volatile u_int16_t session_num;
1340 + volatile u_int16_t total_source_count;
1341 + volatile u_int16_t total_dest_count;
1342 +} hifn_base_command_t;
1344 +#define HIFN_BASE_CMD_MAC 0x0400
1345 +#define HIFN_BASE_CMD_CRYPT 0x0800
1346 +#define HIFN_BASE_CMD_DECODE 0x2000
1347 +#define HIFN_BASE_CMD_SRCLEN_M 0xc000
1348 +#define HIFN_BASE_CMD_SRCLEN_S 14
1349 +#define HIFN_BASE_CMD_DSTLEN_M 0x3000
1350 +#define HIFN_BASE_CMD_DSTLEN_S 12
1351 +#define HIFN_BASE_CMD_LENMASK_HI 0x30000
1352 +#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
1355 + * Structure to help build up the command data structure.
1357 +typedef struct hifn_crypt_command {
1358 + volatile u_int16_t masks;
1359 + volatile u_int16_t header_skip;
1360 + volatile u_int16_t source_count;
1361 + volatile u_int16_t reserved;
1362 +} hifn_crypt_command_t;
1364 +#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
1365 +#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
1366 +#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
1367 +#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
1368 +#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
1369 +#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
1370 +#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
1371 +#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
1372 +#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
1373 +#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
1374 +#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
1375 +#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
1376 +#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
1378 +#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
1379 +#define HIFN_CRYPT_CMD_SRCLEN_S 14
1381 +#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
1382 +#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
1383 +#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
1384 +#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
1387 + * Structure to help build up the command data structure.
1389 +typedef struct hifn_mac_command {
1390 + volatile u_int16_t masks;
1391 + volatile u_int16_t header_skip;
1392 + volatile u_int16_t source_count;
1393 + volatile u_int16_t reserved;
1394 +} hifn_mac_command_t;
1396 +#define HIFN_MAC_CMD_ALG_MASK 0x0001
1397 +#define HIFN_MAC_CMD_ALG_SHA1 0x0000
1398 +#define HIFN_MAC_CMD_ALG_MD5 0x0001
1399 +#define HIFN_MAC_CMD_MODE_MASK 0x000c
1400 +#define HIFN_MAC_CMD_MODE_HMAC 0x0000
1401 +#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
1402 +#define HIFN_MAC_CMD_MODE_HASH 0x0008
1403 +#define HIFN_MAC_CMD_MODE_FULL 0x0004
1404 +#define HIFN_MAC_CMD_TRUNC 0x0010
1405 +#define HIFN_MAC_CMD_RESULT 0x0020
1406 +#define HIFN_MAC_CMD_APPEND 0x0040
1407 +#define HIFN_MAC_CMD_SRCLEN_M 0xc000
1408 +#define HIFN_MAC_CMD_SRCLEN_S 14
1411 + * MAC POS IPsec initiates authentication after encryption on encodes
1412 + * and before decryption on decodes.
1414 +#define HIFN_MAC_CMD_POS_IPSEC 0x0200
1415 +#define HIFN_MAC_CMD_NEW_KEY 0x0800
1418 + * The poll frequency and poll scalar defines are unshifted values used
1419 + * to set fields in the DMA Configuration Register.
1421 +#ifndef HIFN_POLL_FREQUENCY
1422 +#define HIFN_POLL_FREQUENCY 0x1
1425 +#ifndef HIFN_POLL_SCALAR
1426 +#define HIFN_POLL_SCALAR 0x0
1429 +#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
1430 +#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
1431 +#endif /* __HIFN_H__ */
1433 +++ b/crypto/ocf/hifn/hifn7751var.h
1435 +/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
1436 +/* $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $ */
1439 + * Invertex AEON / Hifn 7751 driver
1440 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
1441 + * Copyright (c) 1999 Theo de Raadt
1442 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
1443 + * http://www.netsec.net
1445 + * Please send any comments, feedback, bug-fixes, or feature requests to
1446 + * software@invertex.com.
1448 + * Redistribution and use in source and binary forms, with or without
1449 + * modification, are permitted provided that the following conditions
1452 + * 1. Redistributions of source code must retain the above copyright
1453 + * notice, this list of conditions and the following disclaimer.
1454 + * 2. Redistributions in binary form must reproduce the above copyright
1455 + * notice, this list of conditions and the following disclaimer in the
1456 + * documentation and/or other materials provided with the distribution.
1457 + * 3. The name of the author may not be used to endorse or promote products
1458 + * derived from this software without specific prior written permission.
1461 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1462 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1463 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1464 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1465 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1466 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1467 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1468 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1469 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1470 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1472 + * Effort sponsored in part by the Defense Advanced Research Projects
1473 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
1474 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
1478 +#ifndef __HIFN7751VAR_H__
1479 +#define __HIFN7751VAR_H__
1484 + * Some configurable values for the driver. By default command+result
1485 + * descriptor rings are the same size. The src+dst descriptor rings
1486 + * are sized at 3.5x the number of potential commands. Slower parts
1487 + * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
1488 + * src+cmd/result descriptors. It's not clear that increasing the size
1489 + * of the descriptor rings helps performance significantly as other
1490 + * factors tend to come into play (e.g. copying misaligned packets).
1492 +#define HIFN_D_CMD_RSIZE 24 /* command descriptors */
1493 +#define HIFN_D_SRC_RSIZE ((HIFN_D_CMD_RSIZE * 7) / 2) /* source descriptors */
1494 +#define HIFN_D_RES_RSIZE HIFN_D_CMD_RSIZE /* result descriptors */
1495 +#define HIFN_D_DST_RSIZE HIFN_D_SRC_RSIZE /* destination descriptors */
1498 + * Length values for cryptography
1500 +#define HIFN_DES_KEY_LENGTH 8
1501 +#define HIFN_3DES_KEY_LENGTH 24
1502 +#define HIFN_MAX_CRYPT_KEY_LENGTH HIFN_3DES_KEY_LENGTH
1503 +#define HIFN_IV_LENGTH 8
1504 +#define HIFN_AES_IV_LENGTH 16
1505 +#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
1508 + * Length values for authentication
1510 +#define HIFN_MAC_KEY_LENGTH 64
1511 +#define HIFN_MD5_LENGTH 16
1512 +#define HIFN_SHA1_LENGTH 20
1513 +#define HIFN_MAC_TRUNC_LENGTH 12
1515 +#define MAX_SCATTER 64
1518 + * Data structure to hold all 4 rings and any other ring related data.
1522 + * Descriptor rings. We add +1 to the size to accomidate the
1523 + * jump descriptor.
1525 + struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
1526 + struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
1527 + struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
1528 + struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
1530 + struct hifn_command *hifn_commands[HIFN_D_RES_RSIZE];
1532 + u_char command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
1533 + u_char result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
1534 + u_int32_t slop[HIFN_D_CMD_RSIZE];
1536 + u_int64_t test_src, test_dst;
1539 + * Our current positions for insertion and removal from the desriptor
1542 + int cmdi, srci, dsti, resi;
1543 + volatile int cmdu, srcu, dstu, resu;
1544 + int cmdk, srck, dstk, resk;
1547 +struct hifn_session {
1550 + u_int8_t hs_iv[HIFN_MAX_IV_LENGTH];
1553 +#define HIFN_RING_SYNC(sc, r, i, f) \
1554 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1556 +#define HIFN_CMDR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), cmdr, (i), (f))
1557 +#define HIFN_RESR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), resr, (i), (f))
1558 +#define HIFN_SRCR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), srcr, (i), (f))
1559 +#define HIFN_DSTR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), dstr, (i), (f))
1561 +#define HIFN_CMD_SYNC(sc, i, f) \
1562 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1564 +#define HIFN_RES_SYNC(sc, i, f) \
1565 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1567 +typedef int bus_size_t;
1570 + * Holds data specific to a single HIFN board.
1572 +struct hifn_softc {
1573 + softc_device_decl sc_dev;
1575 + struct pci_dev *sc_pcidev; /* PCI device pointer */
1576 + spinlock_t sc_mtx; /* per-instance lock */
1578 + int sc_num; /* for multiple devs */
1580 + ocf_iomem_t sc_bar0;
1581 + bus_size_t sc_bar0_lastreg;/* bar0 last reg written */
1582 + ocf_iomem_t sc_bar1;
1583 + bus_size_t sc_bar1_lastreg;/* bar1 last reg written */
1587 + u_int32_t sc_dmaier;
1588 + u_int32_t sc_drammodel; /* 1=dram, 0=sram */
1589 + u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
1591 + struct hifn_dma *sc_dma;
1592 + dma_addr_t sc_dma_physaddr;/* physical address of sc_dma */
1598 + struct hifn_session *sc_sessions;
1601 +#define HIFN_HAS_RNG 0x1 /* includes random number generator */
1602 +#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
1603 +#define HIFN_HAS_AES 0x4 /* includes AES support */
1604 +#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
1605 +#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
1607 + struct timer_list sc_tickto; /* for managing DMA */
1610 + int sc_rnghz; /* RNG polling frequency */
1612 + int sc_c_busy; /* command ring busy */
1613 + int sc_s_busy; /* source data ring busy */
1614 + int sc_d_busy; /* destination data ring busy */
1615 + int sc_r_busy; /* result ring busy */
1616 + int sc_active; /* for initial countdown */
1617 + int sc_needwakeup; /* ops q'd wating on resources */
1618 + int sc_curbatch; /* # ops submitted w/o int */
1620 +#ifdef HIFN_VULCANDEV
1621 + struct cdev *sc_pkdev;
1625 +#define HIFN_LOCK(_sc) spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
1626 +#define HIFN_UNLOCK(_sc) spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
1631 + * This is the control structure used to pass commands to hifn_encrypt().
1635 + * Flags is the bitwise "or" values for command configuration. A single
1636 + * encrypt direction needs to be set:
1638 + * HIFN_ENCODE or HIFN_DECODE
1640 + * To use cryptography, a single crypto algorithm must be included:
1642 + * HIFN_CRYPT_3DES or HIFN_CRYPT_DES
1644 + * To use authentication is used, a single MAC algorithm must be included:
1646 + * HIFN_MAC_MD5 or HIFN_MAC_SHA1
1648 + * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
1649 + * If the value below is set, hash values are truncated or assumed
1650 + * truncated to 12 bytes:
1654 + * Keys for encryption and authentication can be sent as part of a command,
1655 + * or the last key value used with a particular session can be retrieved
1656 + * and used again if either of these flags are not specified.
1658 + * HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
1662 + * A number between 0 and 2048 (for DRAM models) or a number between
1663 + * 0 and 768 (for SRAM models). Those who don't want to use session
1664 + * numbers should leave value at zero and send a new crypt key and/or
1665 + * new MAC key on every command. If you use session numbers and
1666 + * don't send a key with a command, the last key sent for that same
1667 + * session number will be used.
1669 + * Warning: Using session numbers and multiboard at the same time
1670 + * is currently broken.
1674 + * Either fill in the mbuf pointer and npa=0 or
1675 + * fill packp[] and packl[] and set npa to > 0
1679 + * The number of bytes of the source_buf that are skipped over before
1680 + * authentication begins. This must be a number between 0 and 2^16-1
1681 + * and can be used by IPsec implementers to skip over IP headers.
1682 + * *** Value ignored if authentication not used ***
1684 + * crypt_header_skip
1685 + * -----------------
1686 + * The number of bytes of the source_buf that are skipped over before
1687 + * the cryptographic operation begins. This must be a number between 0
1688 + * and 2^16-1. For IPsec, this number will always be 8 bytes larger
1689 + * than the auth_header_skip (to skip over the ESP header).
1690 + * *** Value ignored if cryptography not used ***
1693 +struct hifn_operand {
1695 + struct sk_buff *skb;
1697 + unsigned char *buf;
1700 + bus_size_t mapsize;
1703 + dma_addr_t ds_addr;
1705 + } segs[MAX_SCATTER];
1708 +struct hifn_command {
1709 + u_int16_t session_num;
1710 + u_int16_t base_masks, cry_masks, mac_masks;
1711 + u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
1713 + int sloplen, slopidx;
1715 + struct hifn_operand src;
1716 + struct hifn_operand dst;
1718 + struct hifn_softc *softc;
1719 + struct cryptop *crp;
1720 + struct cryptodesc *enccrd, *maccrd;
1723 +#define src_skb src.u.skb
1724 +#define src_io src.u.io
1725 +#define src_map src.map
1726 +#define src_mapsize src.mapsize
1727 +#define src_segs src.segs
1728 +#define src_nsegs src.nsegs
1729 +#define src_buf src.u.buf
1731 +#define dst_skb dst.u.skb
1732 +#define dst_io dst.u.io
1733 +#define dst_map dst.map
1734 +#define dst_mapsize dst.mapsize
1735 +#define dst_segs dst.segs
1736 +#define dst_nsegs dst.nsegs
1737 +#define dst_buf dst.u.buf
1740 + * Return values for hifn_crypto()
1742 +#define HIFN_CRYPTO_SUCCESS 0
1743 +#define HIFN_CRYPTO_BAD_INPUT (-1)
1744 +#define HIFN_CRYPTO_RINGS_FULL (-2)
1746 +/**************************************************************************
1748 + * Function: hifn_crypto
1750 + * Purpose: Called by external drivers to begin an encryption on the
1753 + * Blocking/Non-blocking Issues
1754 + * ============================
1755 + * The driver cannot block in hifn_crypto (no calls to tsleep) currently.
1756 + * hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
1757 + * room in any of the rings for the request to proceed.
1761 + * 0 for success, negative values on error
1763 + * Defines for negative error codes are:
1765 + * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings.
1766 + * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking
1767 + * behaviour was requested.
1769 + *************************************************************************/
1772 + * Convert back and forth from 'sid' to 'card' and 'session'
1774 +#define HIFN_CARD(sid) (((sid) & 0xf0000000) >> 28)
1775 +#define HIFN_SESSION(sid) ((sid) & 0x000007ff)
1776 +#define HIFN_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff))
1778 +#endif /* _KERNEL */
1780 +struct hifn_stats {
1781 + u_int64_t hst_ibytes;
1782 + u_int64_t hst_obytes;
1783 + u_int32_t hst_ipackets;
1784 + u_int32_t hst_opackets;
1785 + u_int32_t hst_invalid;
1786 + u_int32_t hst_nomem; /* malloc or one of hst_nomem_* */
1787 + u_int32_t hst_abort;
1788 + u_int32_t hst_noirq; /* IRQ for no reason */
1789 + u_int32_t hst_totbatch; /* ops submitted w/o interrupt */
1790 + u_int32_t hst_maxbatch; /* max ops submitted together */
1791 + u_int32_t hst_unaligned; /* unaligned src caused copy */
1793 + * The following divides hst_nomem into more specific buckets.
1795 + u_int32_t hst_nomem_map; /* bus_dmamap_create failed */
1796 + u_int32_t hst_nomem_load; /* bus_dmamap_load_* failed */
1797 + u_int32_t hst_nomem_mbuf; /* MGET* failed */
1798 + u_int32_t hst_nomem_mcl; /* MCLGET* failed */
1799 + u_int32_t hst_nomem_cr; /* out of command/result descriptor */
1800 + u_int32_t hst_nomem_sd; /* out of src/dst descriptors */
1803 +#endif /* __HIFN7751VAR_H__ */
1805 +++ b/crypto/ocf/hifn/hifn7751.c
1807 +/* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
1810 + * Invertex AEON / Hifn 7751 driver
1811 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
1812 + * Copyright (c) 1999 Theo de Raadt
1813 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
1814 + * http://www.netsec.net
1815 + * Copyright (c) 2003 Hifn Inc.
1817 + * This driver is based on a previous driver by Invertex, for which they
1818 + * requested: Please send any comments, feedback, bug-fixes, or feature
1819 + * requests to software@invertex.com.
1821 + * Redistribution and use in source and binary forms, with or without
1822 + * modification, are permitted provided that the following conditions
1825 + * 1. Redistributions of source code must retain the above copyright
1826 + * notice, this list of conditions and the following disclaimer.
1827 + * 2. Redistributions in binary form must reproduce the above copyright
1828 + * notice, this list of conditions and the following disclaimer in the
1829 + * documentation and/or other materials provided with the distribution.
1830 + * 3. The name of the author may not be used to endorse or promote products
1831 + * derived from this software without specific prior written permission.
1833 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1834 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1835 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1836 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1837 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1838 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1839 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1840 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1841 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1842 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1844 + * Effort sponsored in part by the Defense Advanced Research Projects
1845 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
1846 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
1849 +__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
1853 + * Driver for various Hifn encryption processors.
1855 +#ifndef AUTOCONF_INCLUDED
1856 +#include <linux/config.h>
1858 +#include <linux/module.h>
1859 +#include <linux/init.h>
1860 +#include <linux/list.h>
1861 +#include <linux/slab.h>
1862 +#include <linux/wait.h>
1863 +#include <linux/sched.h>
1864 +#include <linux/pci.h>
1865 +#include <linux/delay.h>
1866 +#include <linux/interrupt.h>
1867 +#include <linux/spinlock.h>
1868 +#include <linux/random.h>
1869 +#include <linux/version.h>
1870 +#include <linux/skbuff.h>
1871 +#include <asm/io.h>
1873 +#include <cryptodev.h>
1875 +#include <hifn/hifn7751reg.h>
1876 +#include <hifn/hifn7751var.h>
1879 +#define DPRINTF(a...) if (hifn_debug) { \
1880 + printk("%s: ", sc ? \
1881 + device_get_nameunit(sc->sc_dev) : "hifn"); \
1885 +#define DPRINTF(a...)
1889 +pci_get_revid(struct pci_dev *dev)
1892 + pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
1896 +static struct hifn_stats hifnstats;
1898 +#define debug hifn_debug
1899 +int hifn_debug = 0;
1900 +module_param(hifn_debug, int, 0644);
1901 +MODULE_PARM_DESC(hifn_debug, "Enable debug");
1903 +int hifn_maxbatch = 1;
1904 +module_param(hifn_maxbatch, int, 0644);
1905 +MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
1908 +char *hifn_pllconfig = NULL;
1909 +MODULE_PARM(hifn_pllconfig, "s");
1911 +char hifn_pllconfig[32]; /* This setting is RO after loading */
1912 +module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
1914 +MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
1916 +#ifdef HIFN_VULCANDEV
1917 +#include <sys/conf.h>
1918 +#include <sys/uio.h>
1920 +static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
1924 + * Prototypes and count for the pci_device structure
1926 +static int hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
1927 +static void hifn_remove(struct pci_dev *dev);
1929 +static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
1930 +static int hifn_freesession(device_t, u_int64_t);
1931 +static int hifn_process(device_t, struct cryptop *, int);
1933 +static device_method_t hifn_methods = {
1934 + /* crypto device methods */
1935 + DEVMETHOD(cryptodev_newsession, hifn_newsession),
1936 + DEVMETHOD(cryptodev_freesession,hifn_freesession),
1937 + DEVMETHOD(cryptodev_process, hifn_process),
1940 +static void hifn_reset_board(struct hifn_softc *, int);
1941 +static void hifn_reset_puc(struct hifn_softc *);
1942 +static void hifn_puc_wait(struct hifn_softc *);
1943 +static int hifn_enable_crypto(struct hifn_softc *);
1944 +static void hifn_set_retry(struct hifn_softc *sc);
1945 +static void hifn_init_dma(struct hifn_softc *);
1946 +static void hifn_init_pci_registers(struct hifn_softc *);
1947 +static int hifn_sramsize(struct hifn_softc *);
1948 +static int hifn_dramsize(struct hifn_softc *);
1949 +static int hifn_ramtype(struct hifn_softc *);
1950 +static void hifn_sessions(struct hifn_softc *);
1951 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
1952 +static irqreturn_t hifn_intr(int irq, void *arg);
1954 +static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
1956 +static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
1957 +static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
1958 +static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
1959 +static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
1960 +static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
1961 +static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
1962 +static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
1963 +static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
1964 +static int hifn_init_pubrng(struct hifn_softc *);
1965 +static void hifn_tick(unsigned long arg);
1966 +static void hifn_abort(struct hifn_softc *);
1967 +static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
1969 +static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
1970 +static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
1972 +#ifdef CONFIG_OCF_RANDOMHARVEST
1973 +static int hifn_read_random(void *arg, u_int32_t *buf, int len);
1976 +#define HIFN_MAX_CHIPS 8
1977 +static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
1979 +static __inline u_int32_t
1980 +READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
1982 + u_int32_t v = readl(sc->sc_bar0 + reg);
1983 + sc->sc_bar0_lastreg = (bus_size_t) -1;
1986 +#define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
1988 +static __inline u_int32_t
1989 +READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
1991 + u_int32_t v = readl(sc->sc_bar1 + reg);
1992 + sc->sc_bar1_lastreg = (bus_size_t) -1;
1995 +#define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
1998 + * map in a given buffer (great on some arches :-)
2002 +pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
2004 + struct iovec *iov = uio->uio_iov;
2006 + DPRINTF("%s()\n", __FUNCTION__);
2009 + for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
2010 + buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
2011 + iov->iov_base, iov->iov_len,
2012 + PCI_DMA_BIDIRECTIONAL);
2013 + buf->segs[buf->nsegs].ds_len = iov->iov_len;
2014 + buf->mapsize += iov->iov_len;
2018 + /* identify this buffer by the first segment */
2019 + buf->map = (void *) buf->segs[0].ds_addr;
2024 + * map in a given sk_buff
2028 +pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
2032 + DPRINTF("%s()\n", __FUNCTION__);
2036 + buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
2037 + skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
2038 + buf->segs[0].ds_len = skb_headlen(skb);
2039 + buf->mapsize += buf->segs[0].ds_len;
2043 + for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
2044 + buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
2045 + buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
2046 + page_address(skb_shinfo(skb)->frags[i].page) +
2047 + skb_shinfo(skb)->frags[i].page_offset,
2048 + buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
2049 + buf->mapsize += buf->segs[buf->nsegs].ds_len;
2053 + /* identify this buffer by the first segment */
2054 + buf->map = (void *) buf->segs[0].ds_addr;
2059 + * map in a given contiguous buffer
2063 +pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
2065 + DPRINTF("%s()\n", __FUNCTION__);
2068 + buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
2069 + b, len, PCI_DMA_BIDIRECTIONAL);
2070 + buf->segs[0].ds_len = len;
2071 + buf->mapsize += buf->segs[0].ds_len;
2074 + /* identify this buffer by the first segment */
2075 + buf->map = (void *) buf->segs[0].ds_addr;
2079 +#if 0 /* not needed at this time */
2081 +pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
2085 + DPRINTF("%s()\n", __FUNCTION__);
2086 + for (i = 0; i < buf->nsegs; i++)
2087 + pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
2088 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
2093 +pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
2096 + DPRINTF("%s()\n", __FUNCTION__);
2097 + for (i = 0; i < buf->nsegs; i++) {
2098 + pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
2099 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
2100 + buf->segs[i].ds_addr = 0;
2101 + buf->segs[i].ds_len = 0;
2109 +hifn_partname(struct hifn_softc *sc)
2111 + /* XXX sprintf numbers when not decoded */
2112 + switch (pci_get_vendor(sc->sc_pcidev)) {
2113 + case PCI_VENDOR_HIFN:
2114 + switch (pci_get_device(sc->sc_pcidev)) {
2115 + case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
2116 + case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
2117 + case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
2118 + case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
2119 + case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
2120 + case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
2122 + return "Hifn unknown-part";
2123 + case PCI_VENDOR_INVERTEX:
2124 + switch (pci_get_device(sc->sc_pcidev)) {
2125 + case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
2127 + return "Invertex unknown-part";
2128 + case PCI_VENDOR_NETSEC:
2129 + switch (pci_get_device(sc->sc_pcidev)) {
2130 + case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
2132 + return "NetSec unknown-part";
2134 + return "Unknown-vendor unknown-part";
2138 +checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
2140 + struct hifn_softc *sc = pci_get_drvdata(dev);
2142 + device_printf(sc->sc_dev, "Warning, %s %u out of range, "
2143 + "using max %u\n", what, v, max);
2145 + } else if (v < min) {
2146 + device_printf(sc->sc_dev, "Warning, %s %u out of range, "
2147 + "using min %u\n", what, v, min);
2154 + * Select PLL configuration for 795x parts. This is complicated in
2155 + * that we cannot determine the optimal parameters without user input.
2156 + * The reference clock is derived from an external clock through a
2157 + * multiplier. The external clock is either the host bus (i.e. PCI)
2158 + * or an external clock generator. When using the PCI bus we assume
2159 + * the clock is either 33 or 66 MHz; for an external source we cannot
2162 + * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
2163 + * for an external source, followed by the frequency. We calculate
2164 + * the appropriate multiplier and PLL register contents accordingly.
2165 + * When no configuration is given we default to "pci66" since that
2166 + * always will allow the card to work. If a card is using the PCI
2167 + * bus clock and in a 33MHz slot then it will be operating at half
2168 + * speed until the correct information is provided.
2170 + * We use a default setting of "ext66" because according to Mike Ham
2171 + * of HiFn, almost every board in existence has an external crystal
2172 + * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
2173 + * because PCI33 can have clocks from 0 to 33Mhz, and some have
2174 + * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
2177 +hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
2179 + const char *pllspec = hifn_pllconfig;
2180 + u_int freq, mul, fl, fh;
2181 + u_int32_t pllconfig;
2184 + if (pllspec == NULL)
2185 + pllspec = "ext66";
2188 + if (strncmp(pllspec, "ext", 3) == 0) {
2190 + pllconfig |= HIFN_PLL_REF_SEL;
2191 + switch (pci_get_device(dev)) {
2192 + case PCI_PRODUCT_HIFN_7955:
2193 + case PCI_PRODUCT_HIFN_7956:
2194 + fl = 20, fh = 100;
2197 + case PCI_PRODUCT_HIFN_7954:
2202 + } else if (strncmp(pllspec, "pci", 3) == 0)
2204 + freq = strtoul(pllspec, &nxt, 10);
2205 + if (nxt == pllspec)
2208 + freq = checkmaxmin(dev, "frequency", freq, fl, fh);
2210 + * Calculate multiplier. We target a Fck of 266 MHz,
2211 + * allowing only even values, possibly rounded down.
2212 + * Multipliers > 8 must set the charge pump current.
2214 + mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
2215 + pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
2217 + pllconfig |= HIFN_PLL_IS;
2222 + * Attach an interface that successfully probed.
2225 +hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2227 + struct hifn_softc *sc = NULL;
2229 + u_int16_t ena, rev;
2231 + unsigned long mem_start, mem_len;
2232 + static int num_chips = 0;
2234 + DPRINTF("%s()\n", __FUNCTION__);
2236 + if (pci_enable_device(dev) < 0)
2239 + if (pci_set_mwi(dev))
2243 + printk("hifn: found device with no IRQ assigned. check BIOS settings!");
2244 + pci_disable_device(dev);
2248 + sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
2251 + memset(sc, 0, sizeof(*sc));
2253 + softc_device_init(sc, "hifn", num_chips, hifn_methods);
2255 + sc->sc_pcidev = dev;
2258 + sc->sc_num = num_chips++;
2259 + if (sc->sc_num < HIFN_MAX_CHIPS)
2260 + hifn_chip_idx[sc->sc_num] = sc;
2262 + pci_set_drvdata(sc->sc_pcidev, sc);
2264 + spin_lock_init(&sc->sc_mtx);
2266 + /* XXX handle power management */
2269 + * The 7951 and 795x have a random number generator and
2270 + * public key support; note this.
2272 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2273 + (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
2274 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
2275 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
2276 + sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
2278 + * The 7811 has a random number generator and
2279 + * we also note it's identity 'cuz of some quirks.
2281 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2282 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
2283 + sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
2286 + * The 795x parts support AES.
2288 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2289 + (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
2290 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
2291 + sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
2293 + * Select PLL configuration. This depends on the
2294 + * bus and board design and must be manually configured
2295 + * if the default setting is unacceptable.
2297 + hifn_getpllconfig(dev, &sc->sc_pllconfig);
2301 + * Setup PCI resources. Note that we record the bus
2302 + * tag and handle for each register mapping, this is
2303 + * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
2304 + * and WRITE_REG_1 macros throughout the driver.
2306 + mem_start = pci_resource_start(sc->sc_pcidev, 0);
2307 + mem_len = pci_resource_len(sc->sc_pcidev, 0);
2308 + sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
2309 + if (!sc->sc_bar0) {
2310 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
2313 + sc->sc_bar0_lastreg = (bus_size_t) -1;
2315 + mem_start = pci_resource_start(sc->sc_pcidev, 1);
2316 + mem_len = pci_resource_len(sc->sc_pcidev, 1);
2317 + sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
2318 + if (!sc->sc_bar1) {
2319 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
2322 + sc->sc_bar1_lastreg = (bus_size_t) -1;
2324 + /* fix up the bus size */
2325 + if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
2326 + device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
2329 + if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
2330 + device_printf(sc->sc_dev,
2331 + "No usable consistent DMA configuration, aborting.\n");
2335 + hifn_set_retry(sc);
2338 + * Setup the area where the Hifn DMA's descriptors
2339 + * and associated data structures.
2341 + sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
2342 + sizeof(*sc->sc_dma),
2343 + &sc->sc_dma_physaddr);
2344 + if (!sc->sc_dma) {
2345 + device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
2348 + bzero(sc->sc_dma, sizeof(*sc->sc_dma));
2351 + * Reset the board and do the ``secret handshake''
2352 + * to enable the crypto support. Then complete the
2353 + * initialization procedure by setting up the interrupt
2354 + * and hooking in to the system crypto support so we'll
2355 + * get used for system services like the crypto device,
2356 + * IPsec, RNG device, etc.
2358 + hifn_reset_board(sc, 0);
2360 + if (hifn_enable_crypto(sc) != 0) {
2361 + device_printf(sc->sc_dev, "crypto enabling failed\n");
2364 + hifn_reset_puc(sc);
2366 + hifn_init_dma(sc);
2367 + hifn_init_pci_registers(sc);
2369 + pci_set_master(sc->sc_pcidev);
2371 + /* XXX can't dynamically determine ram type for 795x; force dram */
2372 + if (sc->sc_flags & HIFN_IS_7956)
2373 + sc->sc_drammodel = 1;
2374 + else if (hifn_ramtype(sc))
2377 + if (sc->sc_drammodel == 0)
2378 + hifn_sramsize(sc);
2380 + hifn_dramsize(sc);
2383 + * Workaround for NetSec 7751 rev A: half ram size because two
2384 + * of the address lines were left floating
2386 + if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
2387 + pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
2388 + pci_get_revid(dev) == 0x61) /*XXX???*/
2389 + sc->sc_ramsize >>= 1;
2392 + * Arrange the interrupt line.
2394 + rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
2396 + device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
2399 + sc->sc_irq = dev->irq;
2401 + hifn_sessions(sc);
2404 + * NB: Keep only the low 16 bits; this masks the chip id
2407 + rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
2409 + rseg = sc->sc_ramsize / 1024;
2411 + if (sc->sc_ramsize >= (1024 * 1024)) {
2415 + device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
2416 + hifn_partname(sc), rev,
2417 + rseg, rbase, sc->sc_drammodel ? 'd' : 's');
2418 + if (sc->sc_flags & HIFN_IS_7956)
2419 + printf(", pll=0x%x<%s clk, %ux mult>",
2421 + sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
2422 + 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
2425 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
2426 + if (sc->sc_cid < 0) {
2427 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
2431 + WRITE_REG_0(sc, HIFN_0_PUCNFG,
2432 + READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
2433 + ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2436 + case HIFN_PUSTAT_ENA_2:
2437 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
2438 + crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
2439 + if (sc->sc_flags & HIFN_HAS_AES)
2440 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
2442 + case HIFN_PUSTAT_ENA_1:
2443 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
2444 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
2445 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
2446 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
2447 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
2451 + if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
2452 + hifn_init_pubrng(sc);
2454 + init_timer(&sc->sc_tickto);
2455 + sc->sc_tickto.function = hifn_tick;
2456 + sc->sc_tickto.data = (unsigned long) sc->sc_num;
2457 + mod_timer(&sc->sc_tickto, jiffies + HZ);
2462 + if (sc->sc_cid >= 0)
2463 + crypto_unregister_all(sc->sc_cid);
2464 + if (sc->sc_irq != -1)
2465 + free_irq(sc->sc_irq, sc);
2467 + /* Turn off DMA polling */
2468 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2469 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2471 + pci_free_consistent(sc->sc_pcidev,
2472 + sizeof(*sc->sc_dma),
2473 + sc->sc_dma, sc->sc_dma_physaddr);
2480 + * Detach an interface that successfully probed.
2483 +hifn_remove(struct pci_dev *dev)
2485 + struct hifn_softc *sc = pci_get_drvdata(dev);
2486 + unsigned long l_flags;
2488 + DPRINTF("%s()\n", __FUNCTION__);
2490 + KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
2492 + /* disable interrupts */
2494 + WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
2497 + /*XXX other resources */
2498 + del_timer_sync(&sc->sc_tickto);
2500 + /* Turn off DMA polling */
2501 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2502 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2504 + crypto_unregister_all(sc->sc_cid);
2506 + free_irq(sc->sc_irq, sc);
2508 + pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
2509 + sc->sc_dma, sc->sc_dma_physaddr);
2514 +hifn_init_pubrng(struct hifn_softc *sc)
2518 + DPRINTF("%s()\n", __FUNCTION__);
2520 + if ((sc->sc_flags & HIFN_IS_7811) == 0) {
2521 + /* Reset 7951 public key/rng engine */
2522 + WRITE_REG_1(sc, HIFN_1_PUB_RESET,
2523 + READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
2525 + for (i = 0; i < 100; i++) {
2527 + if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
2528 + HIFN_PUBRST_RESET) == 0)
2533 + device_printf(sc->sc_dev, "public key init failed\n");
2538 + /* Enable the rng, if available */
2539 +#ifdef CONFIG_OCF_RANDOMHARVEST
2540 + if (sc->sc_flags & HIFN_HAS_RNG) {
2541 + if (sc->sc_flags & HIFN_IS_7811) {
2543 + r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
2544 + if (r & HIFN_7811_RNGENA_ENA) {
2545 + r &= ~HIFN_7811_RNGENA_ENA;
2546 + WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
2548 + WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
2549 + HIFN_7811_RNGCFG_DEFL);
2550 + r |= HIFN_7811_RNGENA_ENA;
2551 + WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
2553 + WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
2554 + READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
2557 + sc->sc_rngfirst = 1;
2558 + crypto_rregister(sc->sc_cid, hifn_read_random, sc);
2562 + /* Enable public key engine, if available */
2563 + if (sc->sc_flags & HIFN_HAS_PUBLIC) {
2564 + WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
2565 + sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
2566 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2567 +#ifdef HIFN_VULCANDEV
2568 + sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
2569 + UID_ROOT, GID_WHEEL, 0666,
2571 + sc->sc_pkdev->si_drv1 = sc;
2578 +#ifdef CONFIG_OCF_RANDOMHARVEST
2580 +hifn_read_random(void *arg, u_int32_t *buf, int len)
2582 + struct hifn_softc *sc = (struct hifn_softc *) arg;
2589 + if (sc->sc_flags & HIFN_IS_7811) {
2590 + /* ONLY VALID ON 7811!!!! */
2591 + for (i = 0; i < 5; i++) {
2592 + sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
2593 + if (sts & HIFN_7811_RNGSTS_UFL) {
2594 + device_printf(sc->sc_dev,
2595 + "RNG underflow: disabling\n");
2596 + /* DAVIDM perhaps return -1 */
2599 + if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
2603 + * There are at least two words in the RNG FIFO
2607 + buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
2609 + buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
2612 + buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
2614 + /* NB: discard first data read */
2615 + if (sc->sc_rngfirst) {
2616 + sc->sc_rngfirst = 0;
2622 +#endif /* CONFIG_OCF_RANDOMHARVEST */
2625 +hifn_puc_wait(struct hifn_softc *sc)
2628 + int reg = HIFN_0_PUCTRL;
2630 + if (sc->sc_flags & HIFN_IS_7956) {
2631 + reg = HIFN_0_PUCTRL2;
2634 + for (i = 5000; i > 0; i--) {
2636 + if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
2640 + device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
2641 + READ_REG_0(sc, HIFN_0_PUCTRL));
2645 + * Reset the processing unit.
2648 +hifn_reset_puc(struct hifn_softc *sc)
2650 + /* Reset processing unit */
2651 + int reg = HIFN_0_PUCTRL;
2653 + if (sc->sc_flags & HIFN_IS_7956) {
2654 + reg = HIFN_0_PUCTRL2;
2656 + WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
2658 + hifn_puc_wait(sc);
2662 + * Set the Retry and TRDY registers; note that we set them to
2663 + * zero because the 7811 locks up when forced to retry (section
2664 + * 3.6 of "Specification Update SU-0014-04". Not clear if we
2665 + * should do this for all Hifn parts, but it doesn't seem to hurt.
2668 +hifn_set_retry(struct hifn_softc *sc)
2670 + DPRINTF("%s()\n", __FUNCTION__);
2671 + /* NB: RETRY only responds to 8-bit reads/writes */
2672 + pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
2673 + pci_write_config_dword(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
2677 + * Resets the board. Values in the regesters are left as is
2678 + * from the reset (i.e. initial values are assigned elsewhere).
2681 +hifn_reset_board(struct hifn_softc *sc, int full)
2685 + DPRINTF("%s()\n", __FUNCTION__);
2687 + * Set polling in the DMA configuration register to zero. 0x7 avoids
2688 + * resetting the board and zeros out the other fields.
2690 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2691 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2694 + * Now that polling has been disabled, we have to wait 1 ms
2695 + * before resetting the board.
2699 + /* Reset the DMA unit */
2701 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
2704 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
2705 + HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
2706 + hifn_reset_puc(sc);
2709 + KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
2710 + bzero(sc->sc_dma, sizeof(*sc->sc_dma));
2712 + /* Bring dma unit out of reset */
2713 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2714 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2716 + hifn_puc_wait(sc);
2717 + hifn_set_retry(sc);
2719 + if (sc->sc_flags & HIFN_IS_7811) {
2720 + for (reg = 0; reg < 1000; reg++) {
2721 + if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
2722 + HIFN_MIPSRST_CRAMINIT)
2727 + device_printf(sc->sc_dev, ": cram init timeout\n");
2729 + /* set up DMA configuration register #2 */
2730 + /* turn off all PK and BAR0 swaps */
2731 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
2732 + (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
2733 + (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
2734 + (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
2735 + (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
2740 +hifn_next_signature(u_int32_t a, u_int cnt)
2745 + for (i = 0; i < cnt; i++) {
2747 + /* get the parity */
2748 + v = a & 0x80080125;
2755 + a = (v & 1) ^ (a << 1);
2763 + * Checks to see if crypto is already enabled. If crypto isn't enable,
2764 + * "hifn_enable_crypto" is called to enable it. The check is important,
2765 + * as enabling crypto twice will lock the board.
2768 +hifn_enable_crypto(struct hifn_softc *sc)
2770 + u_int32_t dmacfg, ramcfg, encl, addr, i;
2771 + char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2772 + 0x00, 0x00, 0x00, 0x00 };
2774 + DPRINTF("%s()\n", __FUNCTION__);
2776 + ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
2777 + dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
2780 + * The RAM config register's encrypt level bit needs to be set before
2781 + * every read performed on the encryption level register.
2783 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
2785 + encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2788 + * Make sure we don't re-unlock. Two unlocks kills chip until the
2791 + if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
2794 + device_printf(sc->sc_dev,
2795 + "Strong crypto already enabled!\n");
2800 + if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
2803 + device_printf(sc->sc_dev,
2804 + "Unknown encryption level 0x%x\n", encl);
2809 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
2810 + HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2812 + addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
2814 + WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
2817 + for (i = 0; i <= 12; i++) {
2818 + addr = hifn_next_signature(addr, offtbl[i] + 0x101);
2819 + WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
2824 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
2825 + encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2829 + if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
2830 + device_printf(sc->sc_dev, "Engine is permanently "
2831 + "locked until next system reset!\n");
2833 + device_printf(sc->sc_dev, "Engine enabled "
2834 + "successfully!\n");
2839 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
2840 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
2843 + case HIFN_PUSTAT_ENA_1:
2844 + case HIFN_PUSTAT_ENA_2:
2846 + case HIFN_PUSTAT_ENA_0:
2848 + device_printf(sc->sc_dev, "disabled\n");
2856 + * Give initial values to the registers listed in the "Register Space"
2857 + * section of the HIFN Software Development reference manual.
2860 +hifn_init_pci_registers(struct hifn_softc *sc)
2862 + DPRINTF("%s()\n", __FUNCTION__);
2864 + /* write fixed values needed by the Initialization registers */
2865 + WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
2866 + WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
2867 + WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
2869 + /* write all 4 ring address registers */
2870 + WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
2871 + offsetof(struct hifn_dma, cmdr[0]));
2872 + WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
2873 + offsetof(struct hifn_dma, srcr[0]));
2874 + WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
2875 + offsetof(struct hifn_dma, dstr[0]));
2876 + WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
2877 + offsetof(struct hifn_dma, resr[0]));
2881 + /* write status register */
2882 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
2883 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
2884 + HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
2885 + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
2886 + HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
2887 + HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
2888 + HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
2889 + HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
2890 + HIFN_DMACSR_S_WAIT |
2891 + HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
2892 + HIFN_DMACSR_C_WAIT |
2893 + HIFN_DMACSR_ENGINE |
2894 + ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
2895 + HIFN_DMACSR_PUBDONE : 0) |
2896 + ((sc->sc_flags & HIFN_IS_7811) ?
2897 + HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
2899 + sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
2900 + sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
2901 + HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
2902 + HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
2903 + ((sc->sc_flags & HIFN_IS_7811) ?
2904 + HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
2905 + sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2906 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2909 + if (sc->sc_flags & HIFN_IS_7956) {
2912 + WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
2913 + HIFN_PUCNFG_TCALLPHASES |
2914 + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
2916 + /* turn off the clocks and insure bypass is set */
2917 + pll = READ_REG_1(sc, HIFN_1_PLL);
2918 + pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
2919 + | HIFN_PLL_BP | HIFN_PLL_MBSET;
2920 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
2921 + DELAY(10*1000); /* 10ms */
2923 + /* change configuration */
2924 + pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
2925 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
2926 + DELAY(10*1000); /* 10ms */
2928 + /* disable bypass */
2929 + pll &= ~HIFN_PLL_BP;
2930 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
2931 + /* enable clocks with new configuration */
2932 + pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
2933 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
2935 + WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
2936 + HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
2937 + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
2938 + (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
2941 + WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
2942 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2943 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
2944 + ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
2945 + ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
2949 + * The maximum number of sessions supported by the card
2950 + * is dependent on the amount of context ram, which
2951 + * encryption algorithms are enabled, and how compression
2952 + * is configured. This should be configured before this
2953 + * routine is called.
2956 +hifn_sessions(struct hifn_softc *sc)
2961 + DPRINTF("%s()\n", __FUNCTION__);
2963 + pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
2965 + if (pucnfg & HIFN_PUCNFG_COMPSING) {
2966 + if (pucnfg & HIFN_PUCNFG_ENCCNFG)
2971 + * 7955/7956 has internal context memory of 32K
2973 + if (sc->sc_flags & HIFN_IS_7956)
2974 + sc->sc_maxses = 32768 / ctxsize;
2976 + sc->sc_maxses = 1 +
2977 + ((sc->sc_ramsize - 32768) / ctxsize);
2979 + sc->sc_maxses = sc->sc_ramsize / 16384;
2981 + if (sc->sc_maxses > 2048)
2982 + sc->sc_maxses = 2048;
2986 + * Determine ram type (sram or dram). Board should be just out of a reset
2987 + * state when this is called.
2990 +hifn_ramtype(struct hifn_softc *sc)
2992 + u_int8_t data[8], dataexpect[8];
2995 + for (i = 0; i < sizeof(data); i++)
2996 + data[i] = dataexpect[i] = 0x55;
2997 + if (hifn_writeramaddr(sc, 0, data))
2999 + if (hifn_readramaddr(sc, 0, data))
3001 + if (bcmp(data, dataexpect, sizeof(data)) != 0) {
3002 + sc->sc_drammodel = 1;
3006 + for (i = 0; i < sizeof(data); i++)
3007 + data[i] = dataexpect[i] = 0xaa;
3008 + if (hifn_writeramaddr(sc, 0, data))
3010 + if (hifn_readramaddr(sc, 0, data))
3012 + if (bcmp(data, dataexpect, sizeof(data)) != 0) {
3013 + sc->sc_drammodel = 1;
3020 +#define HIFN_SRAM_MAX (32 << 20)
3021 +#define HIFN_SRAM_STEP_SIZE 16384
3022 +#define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
3025 +hifn_sramsize(struct hifn_softc *sc)
3029 + u_int8_t dataexpect[sizeof(data)];
3032 + for (i = 0; i < sizeof(data); i++)
3033 + data[i] = dataexpect[i] = i ^ 0x5a;
3035 + for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
3036 + a = i * HIFN_SRAM_STEP_SIZE;
3037 + bcopy(&i, data, sizeof(i));
3038 + hifn_writeramaddr(sc, a, data);
3041 + for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
3042 + a = i * HIFN_SRAM_STEP_SIZE;
3043 + bcopy(&i, dataexpect, sizeof(i));
3044 + if (hifn_readramaddr(sc, a, data) < 0)
3046 + if (bcmp(data, dataexpect, sizeof(data)) != 0)
3048 + sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
3055 + * XXX For dram boards, one should really try all of the
3056 + * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
3057 + * is already set up correctly.
3060 +hifn_dramsize(struct hifn_softc *sc)
3064 + if (sc->sc_flags & HIFN_IS_7956) {
3066 + * 7955/7956 have a fixed internal ram of only 32K.
3068 + sc->sc_ramsize = 32768;
3070 + cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
3071 + HIFN_PUCNFG_DRAMMASK;
3072 + sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
3078 +hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
3080 + struct hifn_dma *dma = sc->sc_dma;
3082 + DPRINTF("%s()\n", __FUNCTION__);
3084 + if (dma->cmdi == HIFN_D_CMD_RSIZE) {
3086 + dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3088 + dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
3089 + HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
3090 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3092 + *cmdp = dma->cmdi++;
3093 + dma->cmdk = dma->cmdi;
3095 + if (dma->srci == HIFN_D_SRC_RSIZE) {
3097 + dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3099 + dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
3100 + HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
3101 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3103 + *srcp = dma->srci++;
3104 + dma->srck = dma->srci;
3106 + if (dma->dsti == HIFN_D_DST_RSIZE) {
3108 + dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3110 + dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
3111 + HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
3112 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3114 + *dstp = dma->dsti++;
3115 + dma->dstk = dma->dsti;
3117 + if (dma->resi == HIFN_D_RES_RSIZE) {
3119 + dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3121 + dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
3122 + HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
3123 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3125 + *resp = dma->resi++;
3126 + dma->resk = dma->resi;
3130 +hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
3132 + struct hifn_dma *dma = sc->sc_dma;
3133 + hifn_base_command_t wc;
3134 + const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
3135 + int r, cmdi, resi, srci, dsti;
3137 + DPRINTF("%s()\n", __FUNCTION__);
3139 + wc.masks = htole16(3 << 13);
3140 + wc.session_num = htole16(addr >> 14);
3141 + wc.total_source_count = htole16(8);
3142 + wc.total_dest_count = htole16(addr & 0x3fff);
3144 + hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
3146 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3147 + HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
3148 + HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
3150 + /* build write command */
3151 + bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
3152 + *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
3153 + bcopy(data, &dma->test_src, sizeof(dma->test_src));
3155 + dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
3156 + + offsetof(struct hifn_dma, test_src));
3157 + dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
3158 + + offsetof(struct hifn_dma, test_dst));
3160 + dma->cmdr[cmdi].l = htole32(16 | masks);
3161 + dma->srcr[srci].l = htole32(8 | masks);
3162 + dma->dstr[dsti].l = htole32(4 | masks);
3163 + dma->resr[resi].l = htole32(4 | masks);
3165 + for (r = 10000; r >= 0; r--) {
3167 + if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
3171 + device_printf(sc->sc_dev, "writeramaddr -- "
3172 + "result[%d](addr %d) still valid\n", resi, addr);
3178 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3179 + HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
3180 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
3186 +hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
3188 + struct hifn_dma *dma = sc->sc_dma;
3189 + hifn_base_command_t rc;
3190 + const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
3191 + int r, cmdi, srci, dsti, resi;
3193 + DPRINTF("%s()\n", __FUNCTION__);
3195 + rc.masks = htole16(2 << 13);
3196 + rc.session_num = htole16(addr >> 14);
3197 + rc.total_source_count = htole16(addr & 0x3fff);
3198 + rc.total_dest_count = htole16(8);
3200 + hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
3202 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3203 + HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
3204 + HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
3206 + bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
3207 + *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
3209 + dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
3210 + offsetof(struct hifn_dma, test_src));
3211 + dma->test_src = 0;
3212 + dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
3213 + offsetof(struct hifn_dma, test_dst));
3214 + dma->test_dst = 0;
3215 + dma->cmdr[cmdi].l = htole32(8 | masks);
3216 + dma->srcr[srci].l = htole32(8 | masks);
3217 + dma->dstr[dsti].l = htole32(8 | masks);
3218 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
3220 + for (r = 10000; r >= 0; r--) {
3222 + if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
3226 + device_printf(sc->sc_dev, "readramaddr -- "
3227 + "result[%d](addr %d) still valid\n", resi, addr);
3231 + bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
3234 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3235 + HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
3236 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
3242 + * Initialize the descriptor rings.
3245 +hifn_init_dma(struct hifn_softc *sc)
3247 + struct hifn_dma *dma = sc->sc_dma;
3250 + DPRINTF("%s()\n", __FUNCTION__);
3252 + hifn_set_retry(sc);
3254 + /* initialize static pointer values */
3255 + for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
3256 + dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
3257 + offsetof(struct hifn_dma, command_bufs[i][0]));
3258 + for (i = 0; i < HIFN_D_RES_RSIZE; i++)
3259 + dma->resr[i].p = htole32(sc->sc_dma_physaddr +
3260 + offsetof(struct hifn_dma, result_bufs[i][0]));
3262 + dma->cmdr[HIFN_D_CMD_RSIZE].p =
3263 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
3264 + dma->srcr[HIFN_D_SRC_RSIZE].p =
3265 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
3266 + dma->dstr[HIFN_D_DST_RSIZE].p =
3267 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
3268 + dma->resr[HIFN_D_RES_RSIZE].p =
3269 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
3271 + dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
3272 + dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
3273 + dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
3277 + * Writes out the raw command buffer space. Returns the
3278 + * command buffer size.
3281 +hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
3283 + struct hifn_softc *sc = NULL;
3284 + u_int8_t *buf_pos;
3285 + hifn_base_command_t *base_cmd;
3286 + hifn_mac_command_t *mac_cmd;
3287 + hifn_crypt_command_t *cry_cmd;
3288 + int using_mac, using_crypt, len, ivlen;
3289 + u_int32_t dlen, slen;
3291 + DPRINTF("%s()\n", __FUNCTION__);
3294 + using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
3295 + using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
3297 + base_cmd = (hifn_base_command_t *)buf_pos;
3298 + base_cmd->masks = htole16(cmd->base_masks);
3299 + slen = cmd->src_mapsize;
3301 + dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
3303 + dlen = cmd->dst_mapsize;
3304 + base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
3305 + base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
3308 + base_cmd->session_num = htole16(
3309 + ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
3310 + ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
3311 + buf_pos += sizeof(hifn_base_command_t);
3314 + mac_cmd = (hifn_mac_command_t *)buf_pos;
3315 + dlen = cmd->maccrd->crd_len;
3316 + mac_cmd->source_count = htole16(dlen & 0xffff);
3318 + mac_cmd->masks = htole16(cmd->mac_masks |
3319 + ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
3320 + mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
3321 + mac_cmd->reserved = 0;
3322 + buf_pos += sizeof(hifn_mac_command_t);
3325 + if (using_crypt) {
3326 + cry_cmd = (hifn_crypt_command_t *)buf_pos;
3327 + dlen = cmd->enccrd->crd_len;
3328 + cry_cmd->source_count = htole16(dlen & 0xffff);
3330 + cry_cmd->masks = htole16(cmd->cry_masks |
3331 + ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
3332 + cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
3333 + cry_cmd->reserved = 0;
3334 + buf_pos += sizeof(hifn_crypt_command_t);
3337 + if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
3338 + bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
3339 + buf_pos += HIFN_MAC_KEY_LENGTH;
3342 + if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
3343 + switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
3344 + case HIFN_CRYPT_CMD_ALG_3DES:
3345 + bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
3346 + buf_pos += HIFN_3DES_KEY_LENGTH;
3348 + case HIFN_CRYPT_CMD_ALG_DES:
3349 + bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
3350 + buf_pos += HIFN_DES_KEY_LENGTH;
3352 + case HIFN_CRYPT_CMD_ALG_RC4:
3357 + clen = MIN(cmd->cklen, len);
3358 + bcopy(cmd->ck, buf_pos, clen);
3361 + } while (len > 0);
3362 + bzero(buf_pos, 4);
3365 + case HIFN_CRYPT_CMD_ALG_AES:
3367 + * AES keys are variable 128, 192 and
3368 + * 256 bits (16, 24 and 32 bytes).
3370 + bcopy(cmd->ck, buf_pos, cmd->cklen);
3371 + buf_pos += cmd->cklen;
3376 + if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
3377 + switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
3378 + case HIFN_CRYPT_CMD_ALG_AES:
3379 + ivlen = HIFN_AES_IV_LENGTH;
3382 + ivlen = HIFN_IV_LENGTH;
3385 + bcopy(cmd->iv, buf_pos, ivlen);
3389 + if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
3390 + bzero(buf_pos, 8);
3394 + return (buf_pos - buf);
3398 +hifn_dmamap_aligned(struct hifn_operand *op)
3400 + struct hifn_softc *sc = NULL;
3403 + DPRINTF("%s()\n", __FUNCTION__);
3405 + for (i = 0; i < op->nsegs; i++) {
3406 + if (op->segs[i].ds_addr & 3)
3408 + if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
3414 +static __inline int
3415 +hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
3417 + struct hifn_dma *dma = sc->sc_dma;
3419 + if (++idx == HIFN_D_DST_RSIZE) {
3420 + dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
3421 + HIFN_D_MASKDONEIRQ);
3422 + HIFN_DSTR_SYNC(sc, idx,
3423 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3430 +hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
3432 + struct hifn_dma *dma = sc->sc_dma;
3433 + struct hifn_operand *dst = &cmd->dst;
3435 + int idx, used = 0, i;
3437 + DPRINTF("%s()\n", __FUNCTION__);
3440 + for (i = 0; i < dst->nsegs - 1; i++) {
3441 + dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
3442 + dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
3444 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3445 + HIFN_DSTR_SYNC(sc, idx,
3446 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3449 + idx = hifn_dmamap_dstwrap(sc, idx);
3452 + if (cmd->sloplen == 0) {
3453 + p = dst->segs[i].ds_addr;
3454 + l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
3455 + dst->segs[i].ds_len;
3457 + p = sc->sc_dma_physaddr +
3458 + offsetof(struct hifn_dma, slop[cmd->slopidx]);
3459 + l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
3460 + sizeof(u_int32_t);
3462 + if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
3463 + dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
3464 + dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
3465 + (dst->segs[i].ds_len - cmd->sloplen));
3467 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3468 + HIFN_DSTR_SYNC(sc, idx,
3469 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3472 + idx = hifn_dmamap_dstwrap(sc, idx);
3475 + dma->dstr[idx].p = htole32(p);
3476 + dma->dstr[idx].l = htole32(l);
3478 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3479 + HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3482 + idx = hifn_dmamap_dstwrap(sc, idx);
3485 + dma->dstu += used;
3489 +static __inline int
3490 +hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
3492 + struct hifn_dma *dma = sc->sc_dma;
3494 + if (++idx == HIFN_D_SRC_RSIZE) {
3495 + dma->srcr[idx].l = htole32(HIFN_D_VALID |
3496 + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
3497 + HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
3498 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3505 +hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
3507 + struct hifn_dma *dma = sc->sc_dma;
3508 + struct hifn_operand *src = &cmd->src;
3510 + u_int32_t last = 0;
3512 + DPRINTF("%s()\n", __FUNCTION__);
3515 + for (i = 0; i < src->nsegs; i++) {
3516 + if (i == src->nsegs - 1)
3517 + last = HIFN_D_LAST;
3519 + dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
3520 + dma->srcr[idx].l = htole32(src->segs[i].ds_len |
3521 + HIFN_D_MASKDONEIRQ | last);
3523 + dma->srcr[idx].l |= htole32(HIFN_D_VALID);
3524 + HIFN_SRCR_SYNC(sc, idx,
3525 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3527 + idx = hifn_dmamap_srcwrap(sc, idx);
3530 + dma->srcu += src->nsegs;
3537 + struct hifn_softc *sc,
3538 + struct hifn_command *cmd,
3539 + struct cryptop *crp,
3542 + struct hifn_dma *dma = sc->sc_dma;
3543 + u_int32_t cmdlen, csr;
3544 + int cmdi, resi, err = 0;
3545 + unsigned long l_flags;
3547 + DPRINTF("%s()\n", __FUNCTION__);
3550 + * need 1 cmd, and 1 res
3552 + * NB: check this first since it's easy.
3555 + if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
3556 + (dma->resu + 1) > HIFN_D_RES_RSIZE) {
3559 + device_printf(sc->sc_dev,
3560 + "cmd/result exhaustion, cmdu %u resu %u\n",
3561 + dma->cmdu, dma->resu);
3564 + hifnstats.hst_nomem_cr++;
3565 + sc->sc_needwakeup |= CRYPTO_SYMQ;
3567 + return (ERESTART);
3570 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
3571 + if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
3572 + hifnstats.hst_nomem_load++;
3576 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
3577 + if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
3578 + hifnstats.hst_nomem_load++;
3583 + if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
3584 + hifnstats.hst_nomem_load++;
3590 + if (hifn_dmamap_aligned(&cmd->src)) {
3591 + cmd->sloplen = cmd->src_mapsize & 3;
3592 + cmd->dst = cmd->src;
3594 + if (crp->crp_flags & CRYPTO_F_IOV) {
3595 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
3598 + } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
3601 + struct mbuf *m, *m0, *mlast;
3603 + KASSERT(cmd->dst_m == cmd->src_m,
3604 + ("hifn_crypto: dst_m initialized improperly"));
3605 + hifnstats.hst_unaligned++;
3607 + * Source is not aligned on a longword boundary.
3608 + * Copy the data to insure alignment. If we fail
3609 + * to allocate mbufs or clusters while doing this
3610 + * we return ERESTART so the operation is requeued
3611 + * at the crypto later, but only if there are
3612 + * ops already posted to the hardware; otherwise we
3613 + * have no guarantee that we'll be re-entered.
3615 + totlen = cmd->src_mapsize;
3616 + if (cmd->src_m->m_flags & M_PKTHDR) {
3618 + MGETHDR(m0, M_DONTWAIT, MT_DATA);
3619 + if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
3625 + MGET(m0, M_DONTWAIT, MT_DATA);
3628 + hifnstats.hst_nomem_mbuf++;
3629 + err = dma->cmdu ? ERESTART : ENOMEM;
3632 + if (totlen >= MINCLSIZE) {
3633 + MCLGET(m0, M_DONTWAIT);
3634 + if ((m0->m_flags & M_EXT) == 0) {
3635 + hifnstats.hst_nomem_mcl++;
3636 + err = dma->cmdu ? ERESTART : ENOMEM;
3643 + m0->m_pkthdr.len = m0->m_len = len;
3646 + while (totlen > 0) {
3647 + MGET(m, M_DONTWAIT, MT_DATA);
3649 + hifnstats.hst_nomem_mbuf++;
3650 + err = dma->cmdu ? ERESTART : ENOMEM;
3655 + if (totlen >= MINCLSIZE) {
3656 + MCLGET(m, M_DONTWAIT);
3657 + if ((m->m_flags & M_EXT) == 0) {
3658 + hifnstats.hst_nomem_mcl++;
3659 + err = dma->cmdu ? ERESTART : ENOMEM;
3660 + mlast->m_next = m;
3668 + m0->m_pkthdr.len += len;
3671 + mlast->m_next = m;
3676 + device_printf(sc->sc_dev,
3677 + "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
3678 + __FILE__, __LINE__);
3683 + device_printf(sc->sc_dev,
3684 + "%s,%d: unaligned contig buffers not implemented\n",
3685 + __FILE__, __LINE__);
3691 + if (cmd->dst_map == NULL) {
3692 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
3693 + if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
3694 + hifnstats.hst_nomem_map++;
3698 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
3699 + if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
3700 + hifnstats.hst_nomem_load++;
3705 + if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
3706 + hifnstats.hst_nomem_load++;
3715 + device_printf(sc->sc_dev,
3716 + "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
3717 + READ_REG_1(sc, HIFN_1_DMA_CSR),
3718 + READ_REG_1(sc, HIFN_1_DMA_IER),
3719 + dma->cmdu, dma->srcu, dma->dstu, dma->resu,
3720 + cmd->src_nsegs, cmd->dst_nsegs);
3725 + if (cmd->src_map == cmd->dst_map) {
3726 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3727 + BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
3729 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3730 + BUS_DMASYNC_PREWRITE);
3731 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3732 + BUS_DMASYNC_PREREAD);
3737 + * need N src, and N dst
3739 + if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
3740 + (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
3743 + device_printf(sc->sc_dev,
3744 + "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
3745 + dma->srcu, cmd->src_nsegs,
3746 + dma->dstu, cmd->dst_nsegs);
3749 + hifnstats.hst_nomem_sd++;
3754 + if (dma->cmdi == HIFN_D_CMD_RSIZE) {
3756 + dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3758 + dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
3759 + HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
3760 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3762 + cmdi = dma->cmdi++;
3763 + cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
3764 + HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
3766 + /* .p for command/result already set */
3767 + dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
3768 + HIFN_D_MASKDONEIRQ);
3770 + dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
3771 + HIFN_CMDR_SYNC(sc, cmdi,
3772 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3776 + * We don't worry about missing an interrupt (which a "command wait"
3777 + * interrupt salvages us from), unless there is more than one command
3780 + if (dma->cmdu > 1) {
3781 + sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
3782 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
3785 + hifnstats.hst_ipackets++;
3786 + hifnstats.hst_ibytes += cmd->src_mapsize;
3788 + hifn_dmamap_load_src(sc, cmd);
3791 + * Unlike other descriptors, we don't mask done interrupt from
3792 + * result descriptor.
3796 + device_printf(sc->sc_dev, "load res\n");
3798 + if (dma->resi == HIFN_D_RES_RSIZE) {
3800 + dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3802 + dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
3803 + HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
3804 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3806 + resi = dma->resi++;
3807 + KASSERT(dma->hifn_commands[resi] == NULL,
3808 + ("hifn_crypto: command slot %u busy", resi));
3809 + dma->hifn_commands[resi] = cmd;
3810 + HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
3811 + if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
3812 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
3813 + HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
3815 + dma->resr[resi].l |= htole32(HIFN_D_VALID);
3816 + sc->sc_curbatch++;
3817 + if (sc->sc_curbatch > hifnstats.hst_maxbatch)
3818 + hifnstats.hst_maxbatch = sc->sc_curbatch;
3819 + hifnstats.hst_totbatch++;
3821 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
3823 + dma->resr[resi].l |= htole32(HIFN_D_VALID);
3824 + sc->sc_curbatch = 0;
3826 + HIFN_RESR_SYNC(sc, resi,
3827 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3831 + cmd->slopidx = resi;
3833 + hifn_dmamap_load_dst(sc, cmd);
3836 + if (sc->sc_c_busy == 0) {
3837 + csr |= HIFN_DMACSR_C_CTRL_ENA;
3838 + sc->sc_c_busy = 1;
3840 + if (sc->sc_s_busy == 0) {
3841 + csr |= HIFN_DMACSR_S_CTRL_ENA;
3842 + sc->sc_s_busy = 1;
3844 + if (sc->sc_r_busy == 0) {
3845 + csr |= HIFN_DMACSR_R_CTRL_ENA;
3846 + sc->sc_r_busy = 1;
3848 + if (sc->sc_d_busy == 0) {
3849 + csr |= HIFN_DMACSR_D_CTRL_ENA;
3850 + sc->sc_d_busy = 1;
3853 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
3857 + device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
3858 + READ_REG_1(sc, HIFN_1_DMA_CSR),
3859 + READ_REG_1(sc, HIFN_1_DMA_IER));
3863 + sc->sc_active = 5;
3865 + KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
3866 + return (err); /* success */
3869 + if (cmd->src_map != cmd->dst_map)
3870 + pci_unmap_buf(sc, &cmd->dst);
3873 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
3874 + if (cmd->src_skb != cmd->dst_skb)
3876 + m_freem(cmd->dst_m);
3878 + device_printf(sc->sc_dev,
3879 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
3880 + __FILE__, __LINE__);
3883 + pci_unmap_buf(sc, &cmd->src);
3890 +hifn_tick(unsigned long arg)
3892 + struct hifn_softc *sc;
3893 + unsigned long l_flags;
3895 + if (arg >= HIFN_MAX_CHIPS)
3897 + sc = hifn_chip_idx[arg];
3902 + if (sc->sc_active == 0) {
3903 + struct hifn_dma *dma = sc->sc_dma;
3906 + if (dma->cmdu == 0 && sc->sc_c_busy) {
3907 + sc->sc_c_busy = 0;
3908 + r |= HIFN_DMACSR_C_CTRL_DIS;
3910 + if (dma->srcu == 0 && sc->sc_s_busy) {
3911 + sc->sc_s_busy = 0;
3912 + r |= HIFN_DMACSR_S_CTRL_DIS;
3914 + if (dma->dstu == 0 && sc->sc_d_busy) {
3915 + sc->sc_d_busy = 0;
3916 + r |= HIFN_DMACSR_D_CTRL_DIS;
3918 + if (dma->resu == 0 && sc->sc_r_busy) {
3919 + sc->sc_r_busy = 0;
3920 + r |= HIFN_DMACSR_R_CTRL_DIS;
3923 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
3927 + mod_timer(&sc->sc_tickto, jiffies + HZ);
3931 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
3932 +hifn_intr(int irq, void *arg)
3934 +hifn_intr(int irq, void *arg, struct pt_regs *regs)
3937 + struct hifn_softc *sc = arg;
3938 + struct hifn_dma *dma;
3939 + u_int32_t dmacsr, restart;
3941 + unsigned long l_flags;
3943 + dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
3945 + /* Nothing in the DMA unit interrupted */
3946 + if ((dmacsr & sc->sc_dmaier) == 0)
3955 + device_printf(sc->sc_dev,
3956 + "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
3957 + dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
3958 + dma->cmdi, dma->srci, dma->dsti, dma->resi,
3959 + dma->cmdk, dma->srck, dma->dstk, dma->resk,
3960 + dma->cmdu, dma->srcu, dma->dstu, dma->resu);
3964 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
3966 + if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
3967 + (dmacsr & HIFN_DMACSR_PUBDONE))
3968 + WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
3969 + READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
3971 + restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
3973 + device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
3975 + if (sc->sc_flags & HIFN_IS_7811) {
3976 + if (dmacsr & HIFN_DMACSR_ILLR)
3977 + device_printf(sc->sc_dev, "illegal read\n");
3978 + if (dmacsr & HIFN_DMACSR_ILLW)
3979 + device_printf(sc->sc_dev, "illegal write\n");
3982 + restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
3983 + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
3985 + device_printf(sc->sc_dev, "abort, resetting.\n");
3986 + hifnstats.hst_abort++;
3989 + return IRQ_HANDLED;
3992 + if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
3994 + * If no slots to process and we receive a "waiting on
3995 + * command" interrupt, we disable the "waiting on command"
3996 + * (by clearing it).
3998 + sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
3999 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
4002 + /* clear the rings */
4003 + i = dma->resk; u = dma->resu;
4005 + HIFN_RESR_SYNC(sc, i,
4006 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4007 + if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
4008 + HIFN_RESR_SYNC(sc, i,
4009 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4013 + if (i != HIFN_D_RES_RSIZE) {
4014 + struct hifn_command *cmd;
4015 + u_int8_t *macbuf = NULL;
4017 + HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
4018 + cmd = dma->hifn_commands[i];
4019 + KASSERT(cmd != NULL,
4020 + ("hifn_intr: null command slot %u", i));
4021 + dma->hifn_commands[i] = NULL;
4023 + if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
4024 + macbuf = dma->result_bufs[i];
4028 + hifn_callback(sc, cmd, macbuf);
4029 + hifnstats.hst_opackets++;
4033 + if (++i == (HIFN_D_RES_RSIZE + 1))
4036 + dma->resk = i; dma->resu = u;
4038 + i = dma->srck; u = dma->srcu;
4040 + if (i == HIFN_D_SRC_RSIZE)
4042 + HIFN_SRCR_SYNC(sc, i,
4043 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4044 + if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
4045 + HIFN_SRCR_SYNC(sc, i,
4046 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4051 + dma->srck = i; dma->srcu = u;
4053 + i = dma->cmdk; u = dma->cmdu;
4055 + HIFN_CMDR_SYNC(sc, i,
4056 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4057 + if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
4058 + HIFN_CMDR_SYNC(sc, i,
4059 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4062 + if (i != HIFN_D_CMD_RSIZE) {
4064 + HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
4066 + if (++i == (HIFN_D_CMD_RSIZE + 1))
4069 + dma->cmdk = i; dma->cmdu = u;
4073 + if (sc->sc_needwakeup) { /* XXX check high watermark */
4074 + int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
4077 + device_printf(sc->sc_dev,
4078 + "wakeup crypto (%x) u %d/%d/%d/%d\n",
4079 + sc->sc_needwakeup,
4080 + dma->cmdu, dma->srcu, dma->dstu, dma->resu);
4082 + sc->sc_needwakeup &= ~wakeup;
4083 + crypto_unblock(sc->sc_cid, wakeup);
4086 + return IRQ_HANDLED;
4090 + * Allocate a new 'session' and return an encoded session id. 'sidp'
4091 + * contains our registration id, and should contain an encoded session
4092 + * id on successful allocation.
4095 +hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
4097 + struct hifn_softc *sc = device_get_softc(dev);
4098 + struct cryptoini *c;
4099 + int mac = 0, cry = 0, sesn;
4100 + struct hifn_session *ses = NULL;
4101 + unsigned long l_flags;
4103 + DPRINTF("%s()\n", __FUNCTION__);
4105 + KASSERT(sc != NULL, ("hifn_newsession: null softc"));
4106 + if (sidp == NULL || cri == NULL || sc == NULL) {
4107 + DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
4112 + if (sc->sc_sessions == NULL) {
4113 + ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
4115 + if (ses == NULL) {
4120 + sc->sc_nsessions = 1;
4122 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
4123 + if (!sc->sc_sessions[sesn].hs_used) {
4124 + ses = &sc->sc_sessions[sesn];
4129 + if (ses == NULL) {
4130 + sesn = sc->sc_nsessions;
4131 + ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
4133 + if (ses == NULL) {
4137 + bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
4138 + bzero(sc->sc_sessions, sesn * sizeof(*ses));
4139 + kfree(sc->sc_sessions);
4140 + sc->sc_sessions = ses;
4141 + ses = &sc->sc_sessions[sesn];
4142 + sc->sc_nsessions++;
4147 + bzero(ses, sizeof(*ses));
4150 + for (c = cri; c != NULL; c = c->cri_next) {
4151 + switch (c->cri_alg) {
4154 + case CRYPTO_MD5_HMAC:
4155 + case CRYPTO_SHA1_HMAC:
4157 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4161 + ses->hs_mlen = c->cri_mlen;
4162 + if (ses->hs_mlen == 0) {
4163 + switch (c->cri_alg) {
4165 + case CRYPTO_MD5_HMAC:
4166 + ses->hs_mlen = 16;
4169 + case CRYPTO_SHA1_HMAC:
4170 + ses->hs_mlen = 20;
4175 + case CRYPTO_DES_CBC:
4176 + case CRYPTO_3DES_CBC:
4177 + case CRYPTO_AES_CBC:
4178 + /* XXX this may read fewer, does it matter? */
4179 + read_random(ses->hs_iv,
4180 + c->cri_alg == CRYPTO_AES_CBC ?
4181 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4185 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4191 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4195 + if (mac == 0 && cry == 0) {
4196 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4200 + *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
4206 + * Deallocate a session.
4207 + * XXX this routine should run a zero'd mac/encrypt key into context ram.
4208 + * XXX to blow away any keys already stored there.
4211 +hifn_freesession(device_t dev, u_int64_t tid)
4213 + struct hifn_softc *sc = device_get_softc(dev);
4214 + int session, error;
4215 + u_int32_t sid = CRYPTO_SESID2LID(tid);
4216 + unsigned long l_flags;
4218 + DPRINTF("%s()\n", __FUNCTION__);
4220 + KASSERT(sc != NULL, ("hifn_freesession: null softc"));
4222 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4227 + session = HIFN_SESSION(sid);
4228 + if (session < sc->sc_nsessions) {
4229 + bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
4232 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4241 +hifn_process(device_t dev, struct cryptop *crp, int hint)
4243 + struct hifn_softc *sc = device_get_softc(dev);
4244 + struct hifn_command *cmd = NULL;
4245 + int session, err, ivlen;
4246 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
4248 + DPRINTF("%s()\n", __FUNCTION__);
4250 + if (crp == NULL || crp->crp_callback == NULL) {
4251 + hifnstats.hst_invalid++;
4252 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4255 + session = HIFN_SESSION(crp->crp_sid);
4257 + if (sc == NULL || session >= sc->sc_nsessions) {
4258 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4263 + cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
4264 + if (cmd == NULL) {
4265 + hifnstats.hst_nomem++;
4269 + memset(cmd, 0, sizeof(*cmd));
4271 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
4272 + cmd->src_skb = (struct sk_buff *)crp->crp_buf;
4273 + cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
4274 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
4275 + cmd->src_io = (struct uio *)crp->crp_buf;
4276 + cmd->dst_io = (struct uio *)crp->crp_buf;
4278 + cmd->src_buf = crp->crp_buf;
4279 + cmd->dst_buf = crp->crp_buf;
4282 + crd1 = crp->crp_desc;
4283 + if (crd1 == NULL) {
4284 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4288 + crd2 = crd1->crd_next;
4290 + if (crd2 == NULL) {
4291 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
4292 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
4293 + crd1->crd_alg == CRYPTO_SHA1 ||
4294 + crd1->crd_alg == CRYPTO_MD5) {
4297 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
4298 + crd1->crd_alg == CRYPTO_3DES_CBC ||
4299 + crd1->crd_alg == CRYPTO_AES_CBC ||
4300 + crd1->crd_alg == CRYPTO_ARC4) {
4301 + if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
4302 + cmd->base_masks |= HIFN_BASE_CMD_DECODE;
4306 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4311 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
4312 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
4313 + crd1->crd_alg == CRYPTO_MD5 ||
4314 + crd1->crd_alg == CRYPTO_SHA1) &&
4315 + (crd2->crd_alg == CRYPTO_DES_CBC ||
4316 + crd2->crd_alg == CRYPTO_3DES_CBC ||
4317 + crd2->crd_alg == CRYPTO_AES_CBC ||
4318 + crd2->crd_alg == CRYPTO_ARC4) &&
4319 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
4320 + cmd->base_masks = HIFN_BASE_CMD_DECODE;
4323 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
4324 + crd1->crd_alg == CRYPTO_ARC4 ||
4325 + crd1->crd_alg == CRYPTO_3DES_CBC ||
4326 + crd1->crd_alg == CRYPTO_AES_CBC) &&
4327 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
4328 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
4329 + crd2->crd_alg == CRYPTO_MD5 ||
4330 + crd2->crd_alg == CRYPTO_SHA1) &&
4331 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
4336 + * We cannot order the 7751 as requested
4338 + DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
4345 + cmd->enccrd = enccrd;
4346 + cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
4347 + switch (enccrd->crd_alg) {
4349 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
4351 + case CRYPTO_DES_CBC:
4352 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
4353 + HIFN_CRYPT_CMD_MODE_CBC |
4354 + HIFN_CRYPT_CMD_NEW_IV;
4356 + case CRYPTO_3DES_CBC:
4357 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
4358 + HIFN_CRYPT_CMD_MODE_CBC |
4359 + HIFN_CRYPT_CMD_NEW_IV;
4361 + case CRYPTO_AES_CBC:
4362 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
4363 + HIFN_CRYPT_CMD_MODE_CBC |
4364 + HIFN_CRYPT_CMD_NEW_IV;
4367 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4371 + if (enccrd->crd_alg != CRYPTO_ARC4) {
4372 + ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
4373 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4374 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
4375 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
4376 + bcopy(enccrd->crd_iv, cmd->iv, ivlen);
4378 + bcopy(sc->sc_sessions[session].hs_iv,
4381 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
4383 + crypto_copyback(crp->crp_flags,
4384 + crp->crp_buf, enccrd->crd_inject,
4388 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
4389 + bcopy(enccrd->crd_iv, cmd->iv, ivlen);
4391 + crypto_copydata(crp->crp_flags,
4392 + crp->crp_buf, enccrd->crd_inject,
4398 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
4399 + cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
4400 + cmd->ck = enccrd->crd_key;
4401 + cmd->cklen = enccrd->crd_klen >> 3;
4402 + cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
4405 + * Need to specify the size for the AES key in the masks.
4407 + if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
4408 + HIFN_CRYPT_CMD_ALG_AES) {
4409 + switch (cmd->cklen) {
4411 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
4414 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
4417 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
4420 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4428 + cmd->maccrd = maccrd;
4429 + cmd->base_masks |= HIFN_BASE_CMD_MAC;
4431 + switch (maccrd->crd_alg) {
4433 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
4434 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
4435 + HIFN_MAC_CMD_POS_IPSEC;
4437 + case CRYPTO_MD5_HMAC:
4438 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
4439 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
4440 + HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
4443 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
4444 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
4445 + HIFN_MAC_CMD_POS_IPSEC;
4447 + case CRYPTO_SHA1_HMAC:
4448 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
4449 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
4450 + HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
4454 + if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
4455 + maccrd->crd_alg == CRYPTO_MD5_HMAC) {
4456 + cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
4457 + bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
4458 + bzero(cmd->mac + (maccrd->crd_klen >> 3),
4459 + HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
4464 + cmd->session_num = session;
4467 + err = hifn_crypto(sc, cmd, crp, hint);
4470 + } else if (err == ERESTART) {
4472 + * There weren't enough resources to dispatch the request
4473 + * to the part. Notify the caller so they'll requeue this
4474 + * request and resubmit it again soon.
4478 + device_printf(sc->sc_dev, "requeue request\n");
4481 + sc->sc_needwakeup |= CRYPTO_SYMQ;
4488 + if (err == EINVAL)
4489 + hifnstats.hst_invalid++;
4491 + hifnstats.hst_nomem++;
4492 + crp->crp_etype = err;
4498 +hifn_abort(struct hifn_softc *sc)
4500 + struct hifn_dma *dma = sc->sc_dma;
4501 + struct hifn_command *cmd;
4502 + struct cryptop *crp;
4505 + DPRINTF("%s()\n", __FUNCTION__);
4507 + i = dma->resk; u = dma->resu;
4509 + cmd = dma->hifn_commands[i];
4510 + KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
4511 + dma->hifn_commands[i] = NULL;
4514 + if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
4515 + /* Salvage what we can. */
4518 + if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
4519 + macbuf = dma->result_bufs[i];
4523 + hifnstats.hst_opackets++;
4524 + hifn_callback(sc, cmd, macbuf);
4527 + if (cmd->src_map == cmd->dst_map) {
4528 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4529 + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4531 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4532 + BUS_DMASYNC_POSTWRITE);
4533 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
4534 + BUS_DMASYNC_POSTREAD);
4538 + if (cmd->src_skb != cmd->dst_skb) {
4540 + m_freem(cmd->src_m);
4541 + crp->crp_buf = (caddr_t)cmd->dst_m;
4543 + device_printf(sc->sc_dev,
4544 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
4545 + __FILE__, __LINE__);
4549 + /* non-shared buffers cannot be restarted */
4550 + if (cmd->src_map != cmd->dst_map) {
4552 + * XXX should be EAGAIN, delayed until
4553 + * after the reset.
4555 + crp->crp_etype = ENOMEM;
4556 + pci_unmap_buf(sc, &cmd->dst);
4558 + crp->crp_etype = ENOMEM;
4560 + pci_unmap_buf(sc, &cmd->src);
4563 + if (crp->crp_etype != EAGAIN)
4567 + if (++i == HIFN_D_RES_RSIZE)
4571 + dma->resk = i; dma->resu = u;
4573 + hifn_reset_board(sc, 1);
4574 + hifn_init_dma(sc);
4575 + hifn_init_pci_registers(sc);
4579 +hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
4581 + struct hifn_dma *dma = sc->sc_dma;
4582 + struct cryptop *crp = cmd->crp;
4583 + struct cryptodesc *crd;
4586 + DPRINTF("%s()\n", __FUNCTION__);
4589 + if (cmd->src_map == cmd->dst_map) {
4590 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4591 + BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
4593 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4594 + BUS_DMASYNC_POSTWRITE);
4595 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
4596 + BUS_DMASYNC_POSTREAD);
4600 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
4601 + if (cmd->src_skb != cmd->dst_skb) {
4603 + crp->crp_buf = (caddr_t)cmd->dst_m;
4604 + totlen = cmd->src_mapsize;
4605 + for (m = cmd->dst_m; m != NULL; m = m->m_next) {
4606 + if (totlen < m->m_len) {
4607 + m->m_len = totlen;
4610 + totlen -= m->m_len;
4612 + cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
4613 + m_freem(cmd->src_m);
4615 + device_printf(sc->sc_dev,
4616 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
4617 + __FILE__, __LINE__);
4622 + if (cmd->sloplen != 0) {
4623 + crypto_copyback(crp->crp_flags, crp->crp_buf,
4624 + cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
4625 + (caddr_t)&dma->slop[cmd->slopidx]);
4628 + i = dma->dstk; u = dma->dstu;
4630 + if (i == HIFN_D_DST_RSIZE)
4633 + bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
4634 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4636 + if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
4638 + bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
4639 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4645 + dma->dstk = i; dma->dstu = u;
4647 + hifnstats.hst_obytes += cmd->dst_mapsize;
4649 + if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
4650 + HIFN_BASE_CMD_CRYPT) {
4651 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4652 + if (crd->crd_alg != CRYPTO_DES_CBC &&
4653 + crd->crd_alg != CRYPTO_3DES_CBC &&
4654 + crd->crd_alg != CRYPTO_AES_CBC)
4656 + ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
4657 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4658 + crypto_copydata(crp->crp_flags, crp->crp_buf,
4659 + crd->crd_skip + crd->crd_len - ivlen, ivlen,
4660 + cmd->softc->sc_sessions[cmd->session_num].hs_iv);
4665 + if (macbuf != NULL) {
4666 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4669 + if (crd->crd_alg != CRYPTO_MD5 &&
4670 + crd->crd_alg != CRYPTO_SHA1 &&
4671 + crd->crd_alg != CRYPTO_MD5_HMAC &&
4672 + crd->crd_alg != CRYPTO_SHA1_HMAC) {
4675 + len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
4676 + crypto_copyback(crp->crp_flags, crp->crp_buf,
4677 + crd->crd_inject, len, macbuf);
4682 + if (cmd->src_map != cmd->dst_map)
4683 + pci_unmap_buf(sc, &cmd->dst);
4684 + pci_unmap_buf(sc, &cmd->src);
4690 + * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
4691 + * and Group 1 registers; avoid conditions that could create
4692 + * burst writes by doing a read in between the writes.
4694 + * NB: The read we interpose is always to the same register;
4695 + * we do this because reading from an arbitrary (e.g. last)
4696 + * register may not always work.
4699 +hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
4701 + if (sc->sc_flags & HIFN_IS_7811) {
4702 + if (sc->sc_bar0_lastreg == reg - 4)
4703 + readl(sc->sc_bar0 + HIFN_0_PUCNFG);
4704 + sc->sc_bar0_lastreg = reg;
4706 + writel(val, sc->sc_bar0 + reg);
4710 +hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
4712 + if (sc->sc_flags & HIFN_IS_7811) {
4713 + if (sc->sc_bar1_lastreg == reg - 4)
4714 + readl(sc->sc_bar1 + HIFN_1_REVID);
4715 + sc->sc_bar1_lastreg = reg;
4717 + writel(val, sc->sc_bar1 + reg);
4721 +static struct pci_device_id hifn_pci_tbl[] = {
4722 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
4723 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4724 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
4725 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4726 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
4727 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4728 + { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
4729 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4730 + { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
4731 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4732 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
4733 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4735 + * Other vendors share this PCI ID as well, such as
4736 + * http://www.powercrypt.com, and obviously they also
4737 + * use the same key.
4739 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
4740 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4741 + { 0, 0, 0, 0, 0, 0, }
4743 +MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
4745 +static struct pci_driver hifn_driver = {
4747 + .id_table = hifn_pci_tbl,
4748 + .probe = hifn_probe,
4749 + .remove = hifn_remove,
4750 + /* add PM stuff here one day */
4753 +static int __init hifn_init (void)
4755 + struct hifn_softc *sc = NULL;
4758 + DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
4760 + rc = pci_register_driver(&hifn_driver);
4761 + pci_register_driver_compat(&hifn_driver, rc);
4766 +static void __exit hifn_exit (void)
4768 + pci_unregister_driver(&hifn_driver);
4771 +module_init(hifn_init);
4772 +module_exit(hifn_exit);
4774 +MODULE_LICENSE("BSD");
4775 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
4776 +MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
4778 +++ b/crypto/ocf/hifn/hifnHIPP.c
4781 + * Driver for Hifn HIPP-I/II chipset
4782 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
4784 + * Redistribution and use in source and binary forms, with or without
4785 + * modification, are permitted provided that the following conditions
4788 + * 1. Redistributions of source code must retain the above copyright
4789 + * notice, this list of conditions and the following disclaimer.
4790 + * 2. Redistributions in binary form must reproduce the above copyright
4791 + * notice, this list of conditions and the following disclaimer in the
4792 + * documentation and/or other materials provided with the distribution.
4793 + * 3. The name of the author may not be used to endorse or promote products
4794 + * derived from this software without specific prior written permission.
4796 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
4797 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
4798 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
4799 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
4800 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
4801 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
4802 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
4803 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4804 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
4805 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4807 + * Effort sponsored by Hifn Inc.
4812 + * Driver for various Hifn encryption processors.
4814 +#ifndef AUTOCONF_INCLUDED
4815 +#include <linux/config.h>
4817 +#include <linux/module.h>
4818 +#include <linux/init.h>
4819 +#include <linux/list.h>
4820 +#include <linux/slab.h>
4821 +#include <linux/wait.h>
4822 +#include <linux/sched.h>
4823 +#include <linux/pci.h>
4824 +#include <linux/delay.h>
4825 +#include <linux/interrupt.h>
4826 +#include <linux/spinlock.h>
4827 +#include <linux/random.h>
4828 +#include <linux/version.h>
4829 +#include <linux/skbuff.h>
4830 +#include <linux/uio.h>
4831 +#include <linux/sysfs.h>
4832 +#include <linux/miscdevice.h>
4833 +#include <asm/io.h>
4835 +#include <cryptodev.h>
4837 +#include "hifnHIPPreg.h"
4838 +#include "hifnHIPPvar.h"
4841 +#define DPRINTF(a...) if (hipp_debug) { \
4842 + printk("%s: ", sc ? \
4843 + device_get_nameunit(sc->sc_dev) : "hifn"); \
4847 +#define DPRINTF(a...)
4850 +typedef int bus_size_t;
4853 +pci_get_revid(struct pci_dev *dev)
4856 + pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
4860 +#define debug hipp_debug
4861 +int hipp_debug = 0;
4862 +module_param(hipp_debug, int, 0644);
4863 +MODULE_PARM_DESC(hipp_debug, "Enable debug");
4865 +int hipp_maxbatch = 1;
4866 +module_param(hipp_maxbatch, int, 0644);
4867 +MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
4869 +static int hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
4870 +static void hipp_remove(struct pci_dev *dev);
4871 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
4872 +static irqreturn_t hipp_intr(int irq, void *arg);
4874 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
4877 +static int hipp_num_chips = 0;
4878 +static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
4880 +static int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
4881 +static int hipp_freesession(device_t, u_int64_t);
4882 +static int hipp_process(device_t, struct cryptop *, int);
4884 +static device_method_t hipp_methods = {
4885 + /* crypto device methods */
4886 + DEVMETHOD(cryptodev_newsession, hipp_newsession),
4887 + DEVMETHOD(cryptodev_freesession,hipp_freesession),
4888 + DEVMETHOD(cryptodev_process, hipp_process),
4891 +static __inline u_int32_t
4892 +READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
4894 + u_int32_t v = readl(sc->sc_bar[barno] + reg);
4895 + //sc->sc_bar0_lastreg = (bus_size_t) -1;
4898 +static __inline void
4899 +WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
4901 + writel(val, sc->sc_bar[barno] + reg);
4904 +#define READ_REG_0(sc, reg) READ_REG(sc, 0, reg)
4905 +#define WRITE_REG_0(sc, reg, val) WRITE_REG(sc,0, reg, val)
4906 +#define READ_REG_1(sc, reg) READ_REG(sc, 1, reg)
4907 +#define WRITE_REG_1(sc, reg, val) WRITE_REG(sc,1, reg, val)
4910 +hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
4916 +hipp_freesession(device_t dev, u_int64_t tid)
4922 +hipp_process(device_t dev, struct cryptop *crp, int hint)
4928 +hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
4932 + switch (pci_get_vendor(sc->sc_pcidev)) {
4933 + case PCI_VENDOR_HIFN:
4934 + switch (pci_get_device(sc->sc_pcidev)) {
4935 + case PCI_PRODUCT_HIFN_7855: n = "Hifn 7855";
4936 + case PCI_PRODUCT_HIFN_8155: n = "Hifn 8155";
4937 + case PCI_PRODUCT_HIFN_6500: n = "Hifn 6500";
4942 + snprintf(buf, blen, "VID=%02x,PID=%02x",
4943 + pci_get_vendor(sc->sc_pcidev),
4944 + pci_get_device(sc->sc_pcidev));
4947 + strncat(buf, n, blen);
4952 +struct hipp_fs_entry {
4953 + struct attribute attr;
4959 +cryptoid_show(struct device *dev,
4960 + struct device_attribute *attr,
4963 + struct hipp_softc *sc;
4965 + sc = pci_get_drvdata(to_pci_dev (dev));
4966 + return sprintf (buf, "%d\n", sc->sc_cid);
4969 +struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
4972 + * Attach an interface that successfully probed.
4975 +hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
4977 + struct hipp_softc *sc = NULL;
4985 + DPRINTF("%s()\n", __FUNCTION__);
4987 + if (pci_enable_device(dev) < 0)
4990 + if (pci_set_mwi(dev))
4994 + printk("hifn: found device with no IRQ assigned. check BIOS settings!");
4995 + pci_disable_device(dev);
4999 + sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
5002 + memset(sc, 0, sizeof(*sc));
5004 + softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
5006 + sc->sc_pcidev = dev;
5009 + sc->sc_num = hipp_num_chips++;
5011 + if (sc->sc_num < HIPP_MAX_CHIPS)
5012 + hipp_chip_idx[sc->sc_num] = sc;
5014 + pci_set_drvdata(sc->sc_pcidev, sc);
5016 + spin_lock_init(&sc->sc_mtx);
5019 + * Setup PCI resources.
5020 + * The READ_REG_0, WRITE_REG_0, READ_REG_1,
5021 + * and WRITE_REG_1 macros throughout the driver are used
5022 + * to permit better debugging.
5024 + for(i=0; i<4; i++) {
5025 + unsigned long mem_start, mem_len;
5026 + mem_start = pci_resource_start(sc->sc_pcidev, i);
5027 + mem_len = pci_resource_len(sc->sc_pcidev, i);
5028 + sc->sc_barphy[i] = (caddr_t)mem_start;
5029 + sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
5030 + if (!sc->sc_bar[i]) {
5031 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
5036 + //hipp_reset_board(sc, 0);
5037 + pci_set_master(sc->sc_pcidev);
5040 + * Arrange the interrupt line.
5042 + rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
5044 + device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
5047 + sc->sc_irq = dev->irq;
5049 + rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
5053 + device_printf(sc->sc_dev, "%s, rev %u",
5054 + hipp_partname(sc, b, sizeof(b)), rev);
5058 + if (sc->sc_flags & HIFN_IS_7956)
5059 + printf(", pll=0x%x<%s clk, %ux mult>",
5061 + sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
5062 + 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
5066 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
5067 + if (sc->sc_cid < 0) {
5068 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
5072 +#if 0 /* cannot work with a non-GPL module */
5073 + /* make a sysfs entry to let the world know what entry we got */
5074 + sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
5078 + init_timer(&sc->sc_tickto);
5079 + sc->sc_tickto.function = hifn_tick;
5080 + sc->sc_tickto.data = (unsigned long) sc->sc_num;
5081 + mod_timer(&sc->sc_tickto, jiffies + HZ);
5084 +#if 0 /* no code here yet ?? */
5085 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
5091 + if (sc->sc_cid >= 0)
5092 + crypto_unregister_all(sc->sc_cid);
5093 + if (sc->sc_irq != -1)
5094 + free_irq(sc->sc_irq, sc);
5098 + /* Turn off DMA polling */
5099 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
5100 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
5102 + pci_free_consistent(sc->sc_pcidev,
5103 + sizeof(*sc->sc_dma),
5104 + sc->sc_dma, sc->sc_dma_physaddr);
5112 + * Detach an interface that successfully probed.
5115 +hipp_remove(struct pci_dev *dev)
5117 + struct hipp_softc *sc = pci_get_drvdata(dev);
5118 + unsigned long l_flags;
5120 + DPRINTF("%s()\n", __FUNCTION__);
5122 + /* disable interrupts */
5126 + WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
5129 + /*XXX other resources */
5130 + del_timer_sync(&sc->sc_tickto);
5132 + /* Turn off DMA polling */
5133 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
5134 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
5137 + crypto_unregister_all(sc->sc_cid);
5139 + free_irq(sc->sc_irq, sc);
5142 + pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
5143 + sc->sc_dma, sc->sc_dma_physaddr);
5147 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
5148 +static irqreturn_t hipp_intr(int irq, void *arg)
5150 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
5153 + struct hipp_softc *sc = arg;
5155 + sc = sc; /* shut up compiler */
5157 + return IRQ_HANDLED;
5160 +static struct pci_device_id hipp_pci_tbl[] = {
5161 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
5162 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5163 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
5164 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5166 +MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
5168 +static struct pci_driver hipp_driver = {
5170 + .id_table = hipp_pci_tbl,
5171 + .probe = hipp_probe,
5172 + .remove = hipp_remove,
5173 + /* add PM stuff here one day */
5176 +static int __init hipp_init (void)
5178 + struct hipp_softc *sc = NULL;
5181 + DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
5183 + rc = pci_register_driver(&hipp_driver);
5184 + pci_register_driver_compat(&hipp_driver, rc);
5189 +static void __exit hipp_exit (void)
5191 + pci_unregister_driver(&hipp_driver);
5194 +module_init(hipp_init);
5195 +module_exit(hipp_exit);
5197 +MODULE_LICENSE("BSD");
5198 +MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
5199 +MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
5201 +++ b/crypto/ocf/hifn/hifnHIPPreg.h
5204 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
5205 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
5207 + * Redistribution and use in source and binary forms, with or without
5208 + * modification, are permitted provided that the following conditions
5211 + * 1. Redistributions of source code must retain the above copyright
5212 + * notice, this list of conditions and the following disclaimer.
5213 + * 2. Redistributions in binary form must reproduce the above copyright
5214 + * notice, this list of conditions and the following disclaimer in the
5215 + * documentation and/or other materials provided with the distribution.
5216 + * 3. The name of the author may not be used to endorse or promote products
5217 + * derived from this software without specific prior written permission.
5220 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
5221 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5222 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5223 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5224 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
5225 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5226 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5227 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5228 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
5229 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5231 + * Effort sponsored by Hifn inc.
5235 +#ifndef __HIFNHIPP_H__
5236 +#define __HIFNHIPP_H__
5239 + * PCI vendor and device identifiers
5241 +#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
5242 +#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
5243 +#define PCI_PRODUCT_HIFN_7855 0x001f /* 7855 */
5244 +#define PCI_PRODUCT_HIFN_8155 0x999 /* XXX 8155 */
5246 +#define HIPP_1_REVID 0x01 /* BOGUS */
5248 +#endif /* __HIPP_H__ */
5250 +++ b/crypto/ocf/hifn/hifnHIPPvar.h
5253 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
5254 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> *
5256 + * Redistribution and use in source and binary forms, with or without
5257 + * modification, are permitted provided that the following conditions
5260 + * 1. Redistributions of source code must retain the above copyright
5261 + * notice, this list of conditions and the following disclaimer.
5262 + * 2. Redistributions in binary form must reproduce the above copyright
5263 + * notice, this list of conditions and the following disclaimer in the
5264 + * documentation and/or other materials provided with the distribution.
5265 + * 3. The name of the author may not be used to endorse or promote products
5266 + * derived from this software without specific prior written permission.
5269 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
5270 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5271 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5272 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5273 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
5274 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5275 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5276 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5277 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
5278 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5280 + * Effort sponsored by Hifn inc.
5284 +#ifndef __HIFNHIPPVAR_H__
5285 +#define __HIFNHIPPVAR_H__
5287 +#define HIPP_MAX_CHIPS 8
5290 + * Holds data specific to a single Hifn HIPP-I board.
5292 +struct hipp_softc {
5293 + softc_device_decl sc_dev;
5295 + struct pci_dev *sc_pcidev; /* device backpointer */
5296 + ocf_iomem_t sc_bar[5];
5297 + caddr_t sc_barphy[5]; /* physical address */
5298 + int sc_num; /* for multiple devs */
5299 + spinlock_t sc_mtx; /* per-instance lock */
5305 + u_int32_t sc_dmaier;
5306 + u_int32_t sc_drammodel; /* 1=dram, 0=sram */
5307 + u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
5309 + struct hifn_dma *sc_dma;
5310 + dma_addr_t sc_dma_physaddr;/* physical address of sc_dma */
5315 + struct hifn_session *sc_sessions;
5318 +#define HIFN_HAS_RNG 0x1 /* includes random number generator */
5319 +#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
5320 +#define HIFN_HAS_AES 0x4 /* includes AES support */
5321 +#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
5322 +#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
5324 + struct timer_list sc_tickto; /* for managing DMA */
5327 + int sc_rnghz; /* RNG polling frequency */
5329 + int sc_c_busy; /* command ring busy */
5330 + int sc_s_busy; /* source data ring busy */
5331 + int sc_d_busy; /* destination data ring busy */
5332 + int sc_r_busy; /* result ring busy */
5333 + int sc_active; /* for initial countdown */
5334 + int sc_needwakeup; /* ops q'd wating on resources */
5335 + int sc_curbatch; /* # ops submitted w/o int */
5337 + struct miscdevice sc_miscdev;
5341 +#define HIPP_LOCK(_sc) spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
5342 +#define HIPP_UNLOCK(_sc) spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
5344 +#endif /* __HIFNHIPPVAR_H__ */
5346 +++ b/crypto/ocf/safe/md5.c
5348 +/* $KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
5350 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5351 + * All rights reserved.
5353 + * Redistribution and use in source and binary forms, with or without
5354 + * modification, are permitted provided that the following conditions
5356 + * 1. Redistributions of source code must retain the above copyright
5357 + * notice, this list of conditions and the following disclaimer.
5358 + * 2. Redistributions in binary form must reproduce the above copyright
5359 + * notice, this list of conditions and the following disclaimer in the
5360 + * documentation and/or other materials provided with the distribution.
5361 + * 3. Neither the name of the project nor the names of its contributors
5362 + * may be used to endorse or promote products derived from this software
5363 + * without specific prior written permission.
5365 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
5366 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5367 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5368 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
5369 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5370 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5371 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5372 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5373 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5374 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5379 +#include <sys/cdefs.h>
5380 +__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
5382 +#include <sys/types.h>
5383 +#include <sys/cdefs.h>
5384 +#include <sys/time.h>
5385 +#include <sys/systm.h>
5386 +#include <crypto/md5.h>
5389 +#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
5391 +#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
5392 +#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
5393 +#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
5394 +#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
5396 +#define ROUND1(a, b, c, d, k, s, i) { \
5397 + (a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
5398 + (a) = SHIFT((a), (s)); \
5399 + (a) = (b) + (a); \
5402 +#define ROUND2(a, b, c, d, k, s, i) { \
5403 + (a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
5404 + (a) = SHIFT((a), (s)); \
5405 + (a) = (b) + (a); \
5408 +#define ROUND3(a, b, c, d, k, s, i) { \
5409 + (a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
5410 + (a) = SHIFT((a), (s)); \
5411 + (a) = (b) + (a); \
5414 +#define ROUND4(a, b, c, d, k, s, i) { \
5415 + (a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
5416 + (a) = SHIFT((a), (s)); \
5417 + (a) = (b) + (a); \
5440 +#define MD5_A0 0x67452301
5441 +#define MD5_B0 0xefcdab89
5442 +#define MD5_C0 0x98badcfe
5443 +#define MD5_D0 0x10325476
5445 +/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
5446 +static const u_int32_t T[65] = {
5448 + 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
5449 + 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
5450 + 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
5451 + 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
5453 + 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
5454 + 0xd62f105d, 0x2441453, 0xd8a1e681, 0xe7d3fbc8,
5455 + 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
5456 + 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
5458 + 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
5459 + 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
5460 + 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x4881d05,
5461 + 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
5463 + 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
5464 + 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
5465 + 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
5466 + 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
5469 +static const u_int8_t md5_paddat[MD5_BUFLEN] = {
5470 + 0x80, 0, 0, 0, 0, 0, 0, 0,
5471 + 0, 0, 0, 0, 0, 0, 0, 0,
5472 + 0, 0, 0, 0, 0, 0, 0, 0,
5473 + 0, 0, 0, 0, 0, 0, 0, 0,
5474 + 0, 0, 0, 0, 0, 0, 0, 0,
5475 + 0, 0, 0, 0, 0, 0, 0, 0,
5476 + 0, 0, 0, 0, 0, 0, 0, 0,
5477 + 0, 0, 0, 0, 0, 0, 0, 0,
5480 +static void md5_calc(u_int8_t *, md5_ctxt *);
5482 +void md5_init(ctxt)
5487 + ctxt->md5_sta = MD5_A0;
5488 + ctxt->md5_stb = MD5_B0;
5489 + ctxt->md5_stc = MD5_C0;
5490 + ctxt->md5_std = MD5_D0;
5491 + bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
5494 +void md5_loop(ctxt, input, len)
5497 + u_int len; /* number of bytes */
5501 + ctxt->md5_n += len * 8; /* byte to bit */
5502 + gap = MD5_BUFLEN - ctxt->md5_i;
5505 + bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
5507 + md5_calc(ctxt->md5_buf, ctxt);
5509 + for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
5510 + md5_calc((u_int8_t *)(input + i), ctxt);
5513 + ctxt->md5_i = len - i;
5514 + bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
5516 + bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
5518 + ctxt->md5_i += len;
5527 + /* Don't count up padding. Keep md5_n. */
5528 + gap = MD5_BUFLEN - ctxt->md5_i;
5531 + (void *)(ctxt->md5_buf + ctxt->md5_i),
5532 + gap - sizeof(ctxt->md5_n));
5534 + /* including gap == 8 */
5535 + bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
5537 + md5_calc(ctxt->md5_buf, ctxt);
5538 + bcopy((md5_paddat + gap),
5539 + (void *)ctxt->md5_buf,
5540 + MD5_BUFLEN - sizeof(ctxt->md5_n));
5544 +#if BYTE_ORDER == LITTLE_ENDIAN
5545 + bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
5547 +#if BYTE_ORDER == BIG_ENDIAN
5548 + ctxt->md5_buf[56] = ctxt->md5_n8[7];
5549 + ctxt->md5_buf[57] = ctxt->md5_n8[6];
5550 + ctxt->md5_buf[58] = ctxt->md5_n8[5];
5551 + ctxt->md5_buf[59] = ctxt->md5_n8[4];
5552 + ctxt->md5_buf[60] = ctxt->md5_n8[3];
5553 + ctxt->md5_buf[61] = ctxt->md5_n8[2];
5554 + ctxt->md5_buf[62] = ctxt->md5_n8[1];
5555 + ctxt->md5_buf[63] = ctxt->md5_n8[0];
5558 + md5_calc(ctxt->md5_buf, ctxt);
5561 +void md5_result(digest, ctxt)
5565 + /* 4 byte words */
5566 +#if BYTE_ORDER == LITTLE_ENDIAN
5567 + bcopy(&ctxt->md5_st8[0], digest, 16);
5569 +#if BYTE_ORDER == BIG_ENDIAN
5570 + digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
5571 + digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
5572 + digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
5573 + digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
5574 + digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
5575 + digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
5576 + digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
5577 + digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
5581 +static void md5_calc(b64, ctxt)
5585 + u_int32_t A = ctxt->md5_sta;
5586 + u_int32_t B = ctxt->md5_stb;
5587 + u_int32_t C = ctxt->md5_stc;
5588 + u_int32_t D = ctxt->md5_std;
5589 +#if BYTE_ORDER == LITTLE_ENDIAN
5590 + u_int32_t *X = (u_int32_t *)b64;
5592 +#if BYTE_ORDER == BIG_ENDIAN
5593 + /* 4 byte words */
5594 + /* what a brute force but fast! */
5596 + u_int8_t *y = (u_int8_t *)X;
5597 + y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
5598 + y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
5599 + y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
5600 + y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
5601 + y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
5602 + y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
5603 + y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
5604 + y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
5605 + y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
5606 + y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
5607 + y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
5608 + y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
5609 + y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
5610 + y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
5611 + y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
5612 + y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
5615 + ROUND1(A, B, C, D, 0, Sa, 1); ROUND1(D, A, B, C, 1, Sb, 2);
5616 + ROUND1(C, D, A, B, 2, Sc, 3); ROUND1(B, C, D, A, 3, Sd, 4);
5617 + ROUND1(A, B, C, D, 4, Sa, 5); ROUND1(D, A, B, C, 5, Sb, 6);
5618 + ROUND1(C, D, A, B, 6, Sc, 7); ROUND1(B, C, D, A, 7, Sd, 8);
5619 + ROUND1(A, B, C, D, 8, Sa, 9); ROUND1(D, A, B, C, 9, Sb, 10);
5620 + ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
5621 + ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
5622 + ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
5624 + ROUND2(A, B, C, D, 1, Se, 17); ROUND2(D, A, B, C, 6, Sf, 18);
5625 + ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A, 0, Sh, 20);
5626 + ROUND2(A, B, C, D, 5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
5627 + ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A, 4, Sh, 24);
5628 + ROUND2(A, B, C, D, 9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
5629 + ROUND2(C, D, A, B, 3, Sg, 27); ROUND2(B, C, D, A, 8, Sh, 28);
5630 + ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C, 2, Sf, 30);
5631 + ROUND2(C, D, A, B, 7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
5633 + ROUND3(A, B, C, D, 5, Si, 33); ROUND3(D, A, B, C, 8, Sj, 34);
5634 + ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
5635 + ROUND3(A, B, C, D, 1, Si, 37); ROUND3(D, A, B, C, 4, Sj, 38);
5636 + ROUND3(C, D, A, B, 7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
5637 + ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C, 0, Sj, 42);
5638 + ROUND3(C, D, A, B, 3, Sk, 43); ROUND3(B, C, D, A, 6, Sl, 44);
5639 + ROUND3(A, B, C, D, 9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
5640 + ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A, 2, Sl, 48);
5642 + ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50);
5643 + ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52);
5644 + ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54);
5645 + ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56);
5646 + ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
5647 + ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
5648 + ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
5649 + ROUND4(C, D, A, B, 2, So, 63); ROUND4(B, C, D, A, 9, Sp, 64);
5651 + ctxt->md5_sta += A;
5652 + ctxt->md5_stb += B;
5653 + ctxt->md5_stc += C;
5654 + ctxt->md5_std += D;
5657 +++ b/crypto/ocf/safe/md5.h
5659 +/* $FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $ */
5660 +/* $KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $ */
5663 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5664 + * All rights reserved.
5666 + * Redistribution and use in source and binary forms, with or without
5667 + * modification, are permitted provided that the following conditions
5669 + * 1. Redistributions of source code must retain the above copyright
5670 + * notice, this list of conditions and the following disclaimer.
5671 + * 2. Redistributions in binary form must reproduce the above copyright
5672 + * notice, this list of conditions and the following disclaimer in the
5673 + * documentation and/or other materials provided with the distribution.
5674 + * 3. Neither the name of the project nor the names of its contributors
5675 + * may be used to endorse or promote products derived from this software
5676 + * without specific prior written permission.
5678 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
5679 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5680 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5681 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
5682 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5683 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5684 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5685 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5686 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5687 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5691 +#ifndef _NETINET6_MD5_H_
5692 +#define _NETINET6_MD5_H_
5694 +#define MD5_BUFLEN 64
5698 + u_int32_t md5_state32[4];
5699 + u_int8_t md5_state8[16];
5702 +#define md5_sta md5_st.md5_state32[0]
5703 +#define md5_stb md5_st.md5_state32[1]
5704 +#define md5_stc md5_st.md5_state32[2]
5705 +#define md5_std md5_st.md5_state32[3]
5706 +#define md5_st8 md5_st.md5_state8
5709 + u_int64_t md5_count64;
5710 + u_int8_t md5_count8[8];
5712 +#define md5_n md5_count.md5_count64
5713 +#define md5_n8 md5_count.md5_count8
5716 + u_int8_t md5_buf[MD5_BUFLEN];
5719 +extern void md5_init(md5_ctxt *);
5720 +extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
5721 +extern void md5_pad(md5_ctxt *);
5722 +extern void md5_result(u_int8_t *, md5_ctxt *);
5724 +/* compatibility */
5725 +#define MD5_CTX md5_ctxt
5726 +#define MD5Init(x) md5_init((x))
5727 +#define MD5Update(x, y, z) md5_loop((x), (y), (z))
5728 +#define MD5Final(x, y) \
5731 + md5_result((x), (y)); \
5734 +#endif /* ! _NETINET6_MD5_H_*/
5736 +++ b/crypto/ocf/safe/safe.c
5739 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
5740 + * Copyright (C) 2004-2007 David McCullough
5741 + * The license and original author are listed below.
5743 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
5744 + * Copyright (c) 2003 Global Technology Associates, Inc.
5745 + * All rights reserved.
5747 + * Redistribution and use in source and binary forms, with or without
5748 + * modification, are permitted provided that the following conditions
5750 + * 1. Redistributions of source code must retain the above copyright
5751 + * notice, this list of conditions and the following disclaimer.
5752 + * 2. Redistributions in binary form must reproduce the above copyright
5753 + * notice, this list of conditions and the following disclaimer in the
5754 + * documentation and/or other materials provided with the distribution.
5756 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
5757 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5758 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5759 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
5760 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5761 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5762 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5763 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5764 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5765 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5768 +__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
5771 +#ifndef AUTOCONF_INCLUDED
5772 +#include <linux/config.h>
5774 +#include <linux/module.h>
5775 +#include <linux/kernel.h>
5776 +#include <linux/init.h>
5777 +#include <linux/list.h>
5778 +#include <linux/slab.h>
5779 +#include <linux/wait.h>
5780 +#include <linux/sched.h>
5781 +#include <linux/pci.h>
5782 +#include <linux/delay.h>
5783 +#include <linux/interrupt.h>
5784 +#include <linux/spinlock.h>
5785 +#include <linux/random.h>
5786 +#include <linux/version.h>
5787 +#include <linux/skbuff.h>
5788 +#include <asm/io.h>
5791 + * SafeNet SafeXcel-1141 hardware crypto accelerator
5794 +#include <cryptodev.h>
5796 +#include <safe/safereg.h>
5797 +#include <safe/safevar.h>
5800 +#define DPRINTF(a) do { \
5802 + printk("%s: ", sc ? \
5803 + device_get_nameunit(sc->sc_dev) : "safe"); \
5812 + * until we find a cleaner way, include the BSD md5/sha1 code
5815 +#define HMAC_HACK 1
5817 +#define LITTLE_ENDIAN 1234
5818 +#define BIG_ENDIAN 4321
5819 +#ifdef __LITTLE_ENDIAN
5820 +#define BYTE_ORDER LITTLE_ENDIAN
5822 +#ifdef __BIG_ENDIAN
5823 +#define BYTE_ORDER BIG_ENDIAN
5825 +#include <safe/md5.h>
5826 +#include <safe/md5.c>
5827 +#include <safe/sha1.h>
5828 +#include <safe/sha1.c>
5830 +u_int8_t hmac_ipad_buffer[64] = {
5831 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5832 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5833 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5834 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5835 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5836 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5837 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5838 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
5841 +u_int8_t hmac_opad_buffer[64] = {
5842 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5843 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5844 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5845 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5846 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5847 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5848 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5849 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
5851 +#endif /* HMAC_HACK */
5853 +/* add proc entry for this */
5854 +struct safe_stats safestats;
5856 +#define debug safe_debug
5857 +int safe_debug = 0;
5858 +module_param(safe_debug, int, 0644);
5859 +MODULE_PARM_DESC(safe_debug, "Enable debug");
5861 +static void safe_callback(struct safe_softc *, struct safe_ringentry *);
5862 +static void safe_feed(struct safe_softc *, struct safe_ringentry *);
5863 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
5864 +static void safe_rng_init(struct safe_softc *);
5865 +int safe_rngbufsize = 8; /* 32 bytes each read */
5866 +module_param(safe_rngbufsize, int, 0644);
5867 +MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
5868 +int safe_rngmaxalarm = 8; /* max alarms before reset */
5869 +module_param(safe_rngmaxalarm, int, 0644);
5870 +MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
5871 +#endif /* SAFE_NO_RNG */
5873 +static void safe_totalreset(struct safe_softc *sc);
5874 +static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
5875 +static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
5876 +static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
5877 +static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
5878 +static int safe_kstart(struct safe_softc *sc);
5879 +static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
5880 +static void safe_kfeed(struct safe_softc *sc);
5881 +static void safe_kpoll(unsigned long arg);
5882 +static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
5883 + u_int32_t len, struct crparam *n);
5885 +static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
5886 +static int safe_freesession(device_t, u_int64_t);
5887 +static int safe_process(device_t, struct cryptop *, int);
5889 +static device_method_t safe_methods = {
5890 + /* crypto device methods */
5891 + DEVMETHOD(cryptodev_newsession, safe_newsession),
5892 + DEVMETHOD(cryptodev_freesession,safe_freesession),
5893 + DEVMETHOD(cryptodev_process, safe_process),
5894 + DEVMETHOD(cryptodev_kprocess, safe_kprocess),
5897 +#define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
5898 +#define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
5900 +#define SAFE_MAX_CHIPS 8
5901 +static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
5904 + * split our buffers up into safe DMAable byte fragments to avoid lockup
5905 + * bug in 1141 HW on rev 1.0.
5910 + struct safe_softc *sc,
5911 + struct safe_operand *buf,
5916 + int chunk, tlen = len;
5918 + tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
5920 + buf->mapsize += len;
5922 + chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
5923 + buf->segs[buf->nsegs].ds_addr = tmp;
5924 + buf->segs[buf->nsegs].ds_len = chunk;
5925 + buf->segs[buf->nsegs].ds_tlen = tlen;
5935 + * map in a given uio buffer (great on some arches :-)
5939 +pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
5941 + struct iovec *iov = uio->uio_iov;
5944 + DPRINTF(("%s()\n", __FUNCTION__));
5949 + for (n = 0; n < uio->uio_iovcnt; n++) {
5950 + pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
5954 + /* identify this buffer by the first segment */
5955 + buf->map = (void *) buf->segs[0].ds_addr;
5960 + * map in a given sk_buff
5964 +pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
5968 + DPRINTF(("%s()\n", __FUNCTION__));
5973 + pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
5975 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5976 + pci_map_linear(sc, buf,
5977 + page_address(skb_shinfo(skb)->frags[i].page) +
5978 + skb_shinfo(skb)->frags[i].page_offset,
5979 + skb_shinfo(skb)->frags[i].size);
5982 + /* identify this buffer by the first segment */
5983 + buf->map = (void *) buf->segs[0].ds_addr;
5988 +#if 0 /* not needed at this time */
5990 +pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
5994 + DPRINTF(("%s()\n", __FUNCTION__));
5995 + for (i = 0; i < buf->nsegs; i++)
5996 + pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
5997 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
6002 +pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
6005 + DPRINTF(("%s()\n", __FUNCTION__));
6006 + for (i = 0; i < buf->nsegs; i++) {
6007 + if (buf->segs[i].ds_tlen) {
6008 + DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
6009 + pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
6010 + buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
6011 + DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
6013 + buf->segs[i].ds_addr = 0;
6014 + buf->segs[i].ds_len = 0;
6015 + buf->segs[i].ds_tlen = 0;
6024 + * SafeXcel Interrupt routine
6027 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
6028 +safe_intr(int irq, void *arg)
6030 +safe_intr(int irq, void *arg, struct pt_regs *regs)
6033 + struct safe_softc *sc = arg;
6035 + unsigned long flags;
6037 + stat = READ_REG(sc, SAFE_HM_STAT);
6039 + DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
6041 + if (stat == 0) /* shared irq, not for us */
6044 + WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
6046 + if ((stat & SAFE_INT_PE_DDONE)) {
6048 + * Descriptor(s) done; scan the ring and
6049 + * process completed operations.
6051 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
6052 + while (sc->sc_back != sc->sc_front) {
6053 + struct safe_ringentry *re = sc->sc_back;
6057 + safe_dump_ringstate(sc, __func__);
6058 + safe_dump_request(sc, __func__, re);
6062 + * safe_process marks ring entries that were allocated
6063 + * but not used with a csr of zero. This insures the
6064 + * ring front pointer never needs to be set backwards
6065 + * in the event that an entry is allocated but not used
6066 + * because of a setup error.
6068 + DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
6069 + if (re->re_desc.d_csr != 0) {
6070 + if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
6071 + DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
6074 + if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
6075 + DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
6079 + safe_callback(sc, re);
6081 + if (++(sc->sc_back) == sc->sc_ringtop)
6082 + sc->sc_back = sc->sc_ring;
6084 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6088 + * Check to see if we got any DMA Error
6090 + if (stat & SAFE_INT_PE_ERROR) {
6091 + printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
6092 + (int)READ_REG(sc, SAFE_PE_DMASTAT));
6093 + safestats.st_dmaerr++;
6094 + safe_totalreset(sc);
6100 + if (sc->sc_needwakeup) { /* XXX check high watermark */
6101 + int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
6102 + DPRINTF(("%s: wakeup crypto %x\n", __func__,
6103 + sc->sc_needwakeup));
6104 + sc->sc_needwakeup &= ~wakeup;
6105 + crypto_unblock(sc->sc_cid, wakeup);
6108 + return IRQ_HANDLED;
6112 + * safe_feed() - post a request to chip
6115 +safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
6117 + DPRINTF(("%s()\n", __FUNCTION__));
6120 + safe_dump_ringstate(sc, __func__);
6121 + safe_dump_request(sc, __func__, re);
6125 + if (sc->sc_nqchip > safestats.st_maxqchip)
6126 + safestats.st_maxqchip = sc->sc_nqchip;
6127 + /* poke h/w to check descriptor ring, any value can be written */
6128 + WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
6131 +#define N(a) (sizeof(a) / sizeof (a[0]))
6133 +safe_setup_enckey(struct safe_session *ses, caddr_t key)
6137 + bcopy(key, ses->ses_key, ses->ses_klen / 8);
6139 + /* PE is little-endian, insure proper byte order */
6140 + for (i = 0; i < N(ses->ses_key); i++)
6141 + ses->ses_key[i] = htole32(ses->ses_key[i]);
6145 +safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
6153 + for (i = 0; i < klen; i++)
6154 + key[i] ^= HMAC_IPAD_VAL;
6156 + if (algo == CRYPTO_MD5_HMAC) {
6158 + MD5Update(&md5ctx, key, klen);
6159 + MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
6160 + bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
6162 + SHA1Init(&sha1ctx);
6163 + SHA1Update(&sha1ctx, key, klen);
6164 + SHA1Update(&sha1ctx, hmac_ipad_buffer,
6165 + SHA1_HMAC_BLOCK_LEN - klen);
6166 + bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
6169 + for (i = 0; i < klen; i++)
6170 + key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
6172 + if (algo == CRYPTO_MD5_HMAC) {
6174 + MD5Update(&md5ctx, key, klen);
6175 + MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
6176 + bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
6178 + SHA1Init(&sha1ctx);
6179 + SHA1Update(&sha1ctx, key, klen);
6180 + SHA1Update(&sha1ctx, hmac_opad_buffer,
6181 + SHA1_HMAC_BLOCK_LEN - klen);
6182 + bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
6185 + for (i = 0; i < klen; i++)
6186 + key[i] ^= HMAC_OPAD_VAL;
6190 + * this code prevents SHA working on a BE host,
6191 + * so it is obviously wrong. I think the byte
6192 + * swap setup we do with the chip fixes this for us
6195 + /* PE is little-endian, insure proper byte order */
6196 + for (i = 0; i < N(ses->ses_hminner); i++) {
6197 + ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
6198 + ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
6201 +#else /* HMAC_HACK */
6202 + printk("safe: md5/sha not implemented\n");
6203 +#endif /* HMAC_HACK */
6208 + * Allocate a new 'session' and return an encoded session id. 'sidp'
6209 + * contains our registration id, and should contain an encoded session
6210 + * id on successful allocation.
6213 +safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
6215 + struct safe_softc *sc = device_get_softc(dev);
6216 + struct cryptoini *c, *encini = NULL, *macini = NULL;
6217 + struct safe_session *ses = NULL;
6220 + DPRINTF(("%s()\n", __FUNCTION__));
6222 + if (sidp == NULL || cri == NULL || sc == NULL)
6225 + for (c = cri; c != NULL; c = c->cri_next) {
6226 + if (c->cri_alg == CRYPTO_MD5_HMAC ||
6227 + c->cri_alg == CRYPTO_SHA1_HMAC ||
6228 + c->cri_alg == CRYPTO_NULL_HMAC) {
6232 + } else if (c->cri_alg == CRYPTO_DES_CBC ||
6233 + c->cri_alg == CRYPTO_3DES_CBC ||
6234 + c->cri_alg == CRYPTO_AES_CBC ||
6235 + c->cri_alg == CRYPTO_NULL_CBC) {
6242 + if (encini == NULL && macini == NULL)
6244 + if (encini) { /* validate key length */
6245 + switch (encini->cri_alg) {
6246 + case CRYPTO_DES_CBC:
6247 + if (encini->cri_klen != 64)
6250 + case CRYPTO_3DES_CBC:
6251 + if (encini->cri_klen != 192)
6254 + case CRYPTO_AES_CBC:
6255 + if (encini->cri_klen != 128 &&
6256 + encini->cri_klen != 192 &&
6257 + encini->cri_klen != 256)
6263 + if (sc->sc_sessions == NULL) {
6264 + ses = sc->sc_sessions = (struct safe_session *)
6265 + kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
6268 + memset(ses, 0, sizeof(struct safe_session));
6270 + sc->sc_nsessions = 1;
6272 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
6273 + if (sc->sc_sessions[sesn].ses_used == 0) {
6274 + ses = &sc->sc_sessions[sesn];
6279 + if (ses == NULL) {
6280 + sesn = sc->sc_nsessions;
6281 + ses = (struct safe_session *)
6282 + kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
6285 + memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
6286 + bcopy(sc->sc_sessions, ses, sesn *
6287 + sizeof(struct safe_session));
6288 + bzero(sc->sc_sessions, sesn *
6289 + sizeof(struct safe_session));
6290 + kfree(sc->sc_sessions);
6291 + sc->sc_sessions = ses;
6292 + ses = &sc->sc_sessions[sesn];
6293 + sc->sc_nsessions++;
6297 + bzero(ses, sizeof(struct safe_session));
6298 + ses->ses_used = 1;
6302 + /* XXX may read fewer than requested */
6303 + read_random(ses->ses_iv, sizeof(ses->ses_iv));
6305 + ses->ses_klen = encini->cri_klen;
6306 + if (encini->cri_key != NULL)
6307 + safe_setup_enckey(ses, encini->cri_key);
6311 + ses->ses_mlen = macini->cri_mlen;
6312 + if (ses->ses_mlen == 0) {
6313 + if (macini->cri_alg == CRYPTO_MD5_HMAC)
6314 + ses->ses_mlen = MD5_HASH_LEN;
6316 + ses->ses_mlen = SHA1_HASH_LEN;
6319 + if (macini->cri_key != NULL) {
6320 + safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
6321 + macini->cri_klen / 8);
6325 + *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
6330 + * Deallocate a session.
6333 +safe_freesession(device_t dev, u_int64_t tid)
6335 + struct safe_softc *sc = device_get_softc(dev);
6337 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
6339 + DPRINTF(("%s()\n", __FUNCTION__));
6344 + session = SAFE_SESSION(sid);
6345 + if (session < sc->sc_nsessions) {
6346 + bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
6355 +safe_process(device_t dev, struct cryptop *crp, int hint)
6357 + struct safe_softc *sc = device_get_softc(dev);
6358 + int err = 0, i, nicealign, uniform;
6359 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
6360 + int bypass, oplen, ivsize;
6363 + struct safe_session *ses;
6364 + struct safe_ringentry *re;
6365 + struct safe_sarec *sa;
6366 + struct safe_pdesc *pd;
6367 + u_int32_t cmd0, cmd1, staterec;
6368 + unsigned long flags;
6370 + DPRINTF(("%s()\n", __FUNCTION__));
6372 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
6373 + safestats.st_invalid++;
6376 + if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
6377 + safestats.st_badsession++;
6381 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
6382 + if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
6383 + safestats.st_ringfull++;
6384 + sc->sc_needwakeup |= CRYPTO_SYMQ;
6385 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6386 + return (ERESTART);
6388 + re = sc->sc_front;
6390 + staterec = re->re_sa.sa_staterec; /* save */
6391 + /* NB: zero everything but the PE descriptor */
6392 + bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
6393 + re->re_sa.sa_staterec = staterec; /* restore */
6396 + re->re_sesn = SAFE_SESSION(crp->crp_sid);
6398 + re->re_src.nsegs = 0;
6399 + re->re_dst.nsegs = 0;
6401 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
6402 + re->re_src_skb = (struct sk_buff *)crp->crp_buf;
6403 + re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
6404 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
6405 + re->re_src_io = (struct uio *)crp->crp_buf;
6406 + re->re_dst_io = (struct uio *)crp->crp_buf;
6408 + safestats.st_badflags++;
6410 + goto errout; /* XXX we don't handle contiguous blocks! */
6414 + ses = &sc->sc_sessions[re->re_sesn];
6416 + crd1 = crp->crp_desc;
6417 + if (crd1 == NULL) {
6418 + safestats.st_nodesc++;
6422 + crd2 = crd1->crd_next;
6424 + cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
6426 + if (crd2 == NULL) {
6427 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
6428 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
6429 + crd1->crd_alg == CRYPTO_NULL_HMAC) {
6432 + cmd0 |= SAFE_SA_CMD0_OP_HASH;
6433 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
6434 + crd1->crd_alg == CRYPTO_3DES_CBC ||
6435 + crd1->crd_alg == CRYPTO_AES_CBC ||
6436 + crd1->crd_alg == CRYPTO_NULL_CBC) {
6439 + cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
6441 + safestats.st_badalg++;
6446 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
6447 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
6448 + crd1->crd_alg == CRYPTO_NULL_HMAC) &&
6449 + (crd2->crd_alg == CRYPTO_DES_CBC ||
6450 + crd2->crd_alg == CRYPTO_3DES_CBC ||
6451 + crd2->crd_alg == CRYPTO_AES_CBC ||
6452 + crd2->crd_alg == CRYPTO_NULL_CBC) &&
6453 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
6456 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
6457 + crd1->crd_alg == CRYPTO_3DES_CBC ||
6458 + crd1->crd_alg == CRYPTO_AES_CBC ||
6459 + crd1->crd_alg == CRYPTO_NULL_CBC) &&
6460 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
6461 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
6462 + crd2->crd_alg == CRYPTO_NULL_HMAC) &&
6463 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
6467 + safestats.st_badalg++;
6471 + cmd0 |= SAFE_SA_CMD0_OP_BOTH;
6475 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
6476 + safe_setup_enckey(ses, enccrd->crd_key);
6478 + if (enccrd->crd_alg == CRYPTO_DES_CBC) {
6479 + cmd0 |= SAFE_SA_CMD0_DES;
6480 + cmd1 |= SAFE_SA_CMD1_CBC;
6481 + ivsize = 2*sizeof(u_int32_t);
6482 + } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
6483 + cmd0 |= SAFE_SA_CMD0_3DES;
6484 + cmd1 |= SAFE_SA_CMD1_CBC;
6485 + ivsize = 2*sizeof(u_int32_t);
6486 + } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
6487 + cmd0 |= SAFE_SA_CMD0_AES;
6488 + cmd1 |= SAFE_SA_CMD1_CBC;
6489 + if (ses->ses_klen == 128)
6490 + cmd1 |= SAFE_SA_CMD1_AES128;
6491 + else if (ses->ses_klen == 192)
6492 + cmd1 |= SAFE_SA_CMD1_AES192;
6494 + cmd1 |= SAFE_SA_CMD1_AES256;
6495 + ivsize = 4*sizeof(u_int32_t);
6497 + cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
6502 + * Setup encrypt/decrypt state. When using basic ops
6503 + * we can't use an inline IV because hash/crypt offset
6504 + * must be from the end of the IV to the start of the
6505 + * crypt data and this leaves out the preceding header
6506 + * from the hash calculation. Instead we place the IV
6507 + * in the state record and set the hash/crypt offset to
6508 + * copy both the header+IV.
6510 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
6511 + cmd0 |= SAFE_SA_CMD0_OUTBOUND;
6513 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
6514 + iv = enccrd->crd_iv;
6516 + iv = (caddr_t) ses->ses_iv;
6517 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
6518 + crypto_copyback(crp->crp_flags, crp->crp_buf,
6519 + enccrd->crd_inject, ivsize, iv);
6521 + bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
6523 + for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
6524 + re->re_sastate.sa_saved_iv[i] =
6525 + cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
6526 + cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
6527 + re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
6529 + cmd0 |= SAFE_SA_CMD0_INBOUND;
6531 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
6532 + bcopy(enccrd->crd_iv,
6533 + re->re_sastate.sa_saved_iv, ivsize);
6535 + crypto_copydata(crp->crp_flags, crp->crp_buf,
6536 + enccrd->crd_inject, ivsize,
6537 + (caddr_t)re->re_sastate.sa_saved_iv);
6540 + for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
6541 + re->re_sastate.sa_saved_iv[i] =
6542 + cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
6543 + cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
6546 + * For basic encryption use the zero pad algorithm.
6547 + * This pads results to an 8-byte boundary and
6548 + * suppresses padding verification for inbound (i.e.
6549 + * decrypt) operations.
6551 + * NB: Not sure if the 8-byte pad boundary is a problem.
6553 + cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
6555 + /* XXX assert key bufs have the same size */
6556 + bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
6560 + if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
6561 + safe_setup_mackey(ses, maccrd->crd_alg,
6562 + maccrd->crd_key, maccrd->crd_klen / 8);
6565 + if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
6566 + cmd0 |= SAFE_SA_CMD0_MD5;
6567 + cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
6568 + } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
6569 + cmd0 |= SAFE_SA_CMD0_SHA1;
6570 + cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
6572 + cmd0 |= SAFE_SA_CMD0_HASH_NULL;
6575 + * Digest data is loaded from the SA and the hash
6576 + * result is saved to the state block where we
6577 + * retrieve it for return to the caller.
6579 + /* XXX assert digest bufs have the same size */
6580 + bcopy(ses->ses_hminner, sa->sa_indigest,
6581 + sizeof(sa->sa_indigest));
6582 + bcopy(ses->ses_hmouter, sa->sa_outdigest,
6583 + sizeof(sa->sa_outdigest));
6585 + cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
6586 + re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
6589 + if (enccrd && maccrd) {
6591 + * The offset from hash data to the start of
6592 + * crypt data is the difference in the skips.
6594 + bypass = maccrd->crd_skip;
6595 + coffset = enccrd->crd_skip - maccrd->crd_skip;
6596 + if (coffset < 0) {
6597 + DPRINTF(("%s: hash does not precede crypt; "
6598 + "mac skip %u enc skip %u\n",
6599 + __func__, maccrd->crd_skip, enccrd->crd_skip));
6600 + safestats.st_skipmismatch++;
6604 + oplen = enccrd->crd_skip + enccrd->crd_len;
6605 + if (maccrd->crd_skip + maccrd->crd_len != oplen) {
6606 + DPRINTF(("%s: hash amount %u != crypt amount %u\n",
6607 + __func__, maccrd->crd_skip + maccrd->crd_len,
6609 + safestats.st_lenmismatch++;
6615 + printf("mac: skip %d, len %d, inject %d\n",
6616 + maccrd->crd_skip, maccrd->crd_len,
6617 + maccrd->crd_inject);
6618 + printf("enc: skip %d, len %d, inject %d\n",
6619 + enccrd->crd_skip, enccrd->crd_len,
6620 + enccrd->crd_inject);
6621 + printf("bypass %d coffset %d oplen %d\n",
6622 + bypass, coffset, oplen);
6625 + if (coffset & 3) { /* offset must be 32-bit aligned */
6626 + DPRINTF(("%s: coffset %u misaligned\n",
6627 + __func__, coffset));
6628 + safestats.st_coffmisaligned++;
6633 + if (coffset > 255) { /* offset must be <256 dwords */
6634 + DPRINTF(("%s: coffset %u too big\n",
6635 + __func__, coffset));
6636 + safestats.st_cofftoobig++;
6641 + * Tell the hardware to copy the header to the output.
6642 + * The header is defined as the data from the end of
6643 + * the bypass to the start of data to be encrypted.
6644 + * Typically this is the inline IV. Note that you need
6645 + * to do this even if src+dst are the same; it appears
6646 + * that w/o this bit the crypted data is written
6647 + * immediately after the bypass data.
6649 + cmd1 |= SAFE_SA_CMD1_HDRCOPY;
6651 + * Disable IP header mutable bit handling. This is
6652 + * needed to get correct HMAC calculations.
6654 + cmd1 |= SAFE_SA_CMD1_MUTABLE;
6657 + bypass = enccrd->crd_skip;
6658 + oplen = bypass + enccrd->crd_len;
6660 + bypass = maccrd->crd_skip;
6661 + oplen = bypass + maccrd->crd_len;
6665 + /* XXX verify multiple of 4 when using s/g */
6666 + if (bypass > 96) { /* bypass offset must be <= 96 bytes */
6667 + DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
6668 + safestats.st_bypasstoobig++;
6673 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
6674 + if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
6675 + safestats.st_noload++;
6679 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
6680 + if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
6681 + safestats.st_noload++;
6686 + nicealign = safe_dmamap_aligned(sc, &re->re_src);
6687 + uniform = safe_dmamap_uniform(sc, &re->re_src);
6689 + DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
6690 + nicealign, uniform, re->re_src.nsegs));
6691 + if (re->re_src.nsegs > 1) {
6692 + re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
6693 + ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
6694 + for (i = 0; i < re->re_src_nsegs; i++) {
6695 + /* NB: no need to check if there's space */
6696 + pd = sc->sc_spfree;
6697 + if (++(sc->sc_spfree) == sc->sc_springtop)
6698 + sc->sc_spfree = sc->sc_spring;
6700 + KASSERT((pd->pd_flags&3) == 0 ||
6701 + (pd->pd_flags&3) == SAFE_PD_DONE,
6702 + ("bogus source particle descriptor; flags %x",
6704 + pd->pd_addr = re->re_src_segs[i].ds_addr;
6705 + pd->pd_size = re->re_src_segs[i].ds_len;
6706 + pd->pd_flags = SAFE_PD_READY;
6708 + cmd0 |= SAFE_SA_CMD0_IGATHER;
6711 + * No need for gather, reference the operand directly.
6713 + re->re_desc.d_src = re->re_src_segs[0].ds_addr;
6716 + if (enccrd == NULL && maccrd != NULL) {
6718 + * Hash op; no destination needed.
6721 + if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
6723 + safestats.st_iovmisaligned++;
6727 + if (uniform != 1) {
6728 + device_printf(sc->sc_dev, "!uniform source\n");
6731 + * There's no way to handle the DMA
6732 + * requirements with this uio. We
6733 + * could create a separate DMA area for
6734 + * the result and then copy it back,
6735 + * but for now we just bail and return
6736 + * an error. Note that uio requests
6737 + * > SAFE_MAX_DSIZE are handled because
6738 + * the DMA map and segment list for the
6739 + * destination wil result in a
6740 + * destination particle list that does
6741 + * the necessary scatter DMA.
6743 + safestats.st_iovnotuniform++;
6748 + re->re_dst = re->re_src;
6750 + safestats.st_badflags++;
6755 + if (re->re_dst.nsegs > 1) {
6756 + re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
6757 + ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
6758 + for (i = 0; i < re->re_dst_nsegs; i++) {
6759 + pd = sc->sc_dpfree;
6760 + KASSERT((pd->pd_flags&3) == 0 ||
6761 + (pd->pd_flags&3) == SAFE_PD_DONE,
6762 + ("bogus dest particle descriptor; flags %x",
6764 + if (++(sc->sc_dpfree) == sc->sc_dpringtop)
6765 + sc->sc_dpfree = sc->sc_dpring;
6766 + pd->pd_addr = re->re_dst_segs[i].ds_addr;
6767 + pd->pd_flags = SAFE_PD_READY;
6769 + cmd0 |= SAFE_SA_CMD0_OSCATTER;
6772 + * No need for scatter, reference the operand directly.
6774 + re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
6779 + * All done with setup; fillin the SA command words
6780 + * and the packet engine descriptor. The operation
6781 + * is now ready for submission to the hardware.
6783 + sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
6784 + sa->sa_cmd1 = cmd1
6785 + | (coffset << SAFE_SA_CMD1_OFFSET_S)
6786 + | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
6787 + | SAFE_SA_CMD1_SRPCI
6790 + * NB: the order of writes is important here. In case the
6791 + * chip is scanning the ring because of an outstanding request
6792 + * it might nab this one too. In that case we need to make
6793 + * sure the setup is complete before we write the length
6794 + * field of the descriptor as it signals the descriptor is
6795 + * ready for processing.
6797 + re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
6799 + re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
6801 + re->re_desc.d_len = oplen
6802 + | SAFE_PE_LEN_READY
6803 + | (bypass << SAFE_PE_LEN_BYPASS_S)
6806 + safestats.st_ipackets++;
6807 + safestats.st_ibytes += oplen;
6809 + if (++(sc->sc_front) == sc->sc_ringtop)
6810 + sc->sc_front = sc->sc_ring;
6812 + /* XXX honor batching */
6813 + safe_feed(sc, re);
6814 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6818 + if (re->re_src.map != re->re_dst.map)
6819 + pci_unmap_operand(sc, &re->re_dst);
6820 + if (re->re_src.map)
6821 + pci_unmap_operand(sc, &re->re_src);
6822 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6823 + if (err != ERESTART) {
6824 + crp->crp_etype = err;
6827 + sc->sc_needwakeup |= CRYPTO_SYMQ;
6833 +safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
6835 + struct cryptop *crp = (struct cryptop *)re->re_crp;
6836 + struct cryptodesc *crd;
6838 + DPRINTF(("%s()\n", __FUNCTION__));
6840 + safestats.st_opackets++;
6841 + safestats.st_obytes += re->re_dst.mapsize;
6843 + if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
6844 + device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
6845 + re->re_desc.d_csr,
6846 + re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
6847 + safestats.st_peoperr++;
6848 + crp->crp_etype = EIO; /* something more meaningful? */
6851 + if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
6852 + pci_unmap_operand(sc, &re->re_dst);
6853 + pci_unmap_operand(sc, &re->re_src);
6856 + * If result was written to a differet mbuf chain, swap
6857 + * it in as the return value and reclaim the original.
6859 + if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
6860 + device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
6861 + /* kfree_skb(skb) */
6862 + /* crp->crp_buf = (caddr_t)re->re_dst_skb */
6866 + if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
6867 + /* copy out IV for future use */
6868 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
6872 + if (crd->crd_alg == CRYPTO_DES_CBC ||
6873 + crd->crd_alg == CRYPTO_3DES_CBC) {
6874 + ivsize = 2*sizeof(u_int32_t);
6875 + } else if (crd->crd_alg == CRYPTO_AES_CBC) {
6876 + ivsize = 4*sizeof(u_int32_t);
6879 + crypto_copydata(crp->crp_flags, crp->crp_buf,
6880 + crd->crd_skip + crd->crd_len - ivsize, ivsize,
6881 + (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
6883 + i < ivsize/sizeof(sc->sc_sessions[re->re_sesn].ses_iv[0]);
6885 + sc->sc_sessions[re->re_sesn].ses_iv[i] =
6886 + cpu_to_le32(sc->sc_sessions[re->re_sesn].ses_iv[i]);
6891 + if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
6892 + /* copy out ICV result */
6893 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
6894 + if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
6895 + crd->crd_alg == CRYPTO_SHA1_HMAC ||
6896 + crd->crd_alg == CRYPTO_NULL_HMAC))
6898 + if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
6900 + * SHA-1 ICV's are byte-swapped; fix 'em up
6901 + * before copy them to their destination.
6903 + re->re_sastate.sa_saved_indigest[0] =
6904 + cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
6905 + re->re_sastate.sa_saved_indigest[1] =
6906 + cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
6907 + re->re_sastate.sa_saved_indigest[2] =
6908 + cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
6910 + re->re_sastate.sa_saved_indigest[0] =
6911 + cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
6912 + re->re_sastate.sa_saved_indigest[1] =
6913 + cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
6914 + re->re_sastate.sa_saved_indigest[2] =
6915 + cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
6917 + crypto_copyback(crp->crp_flags, crp->crp_buf,
6919 + sc->sc_sessions[re->re_sesn].ses_mlen,
6920 + (caddr_t)re->re_sastate.sa_saved_indigest);
6928 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
6929 +#define SAFE_RNG_MAXWAIT 1000
6932 +safe_rng_init(struct safe_softc *sc)
6937 + DPRINTF(("%s()\n", __FUNCTION__));
6939 + WRITE_REG(sc, SAFE_RNG_CTRL, 0);
6940 + /* use default value according to the manual */
6941 + WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
6942 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
6945 + * There is a bug in rev 1.0 of the 1140 that when the RNG
6946 + * is brought out of reset the ready status flag does not
6947 + * work until the RNG has finished its internal initialization.
6949 + * So in order to determine the device is through its
6950 + * initialization we must read the data register, using the
6951 + * status reg in the read in case it is initialized. Then read
6952 + * the data register until it changes from the first read.
6953 + * Once it changes read the data register until it changes
6954 + * again. At this time the RNG is considered initialized.
6955 + * This could take between 750ms - 1000ms in time.
6958 + w = READ_REG(sc, SAFE_RNG_OUT);
6960 + v = READ_REG(sc, SAFE_RNG_OUT);
6966 + } while (++i < SAFE_RNG_MAXWAIT);
6968 + /* Wait Until data changes again */
6971 + v = READ_REG(sc, SAFE_RNG_OUT);
6975 + } while (++i < SAFE_RNG_MAXWAIT);
6978 +static __inline void
6979 +safe_rng_disable_short_cycle(struct safe_softc *sc)
6981 + DPRINTF(("%s()\n", __FUNCTION__));
6983 + WRITE_REG(sc, SAFE_RNG_CTRL,
6984 + READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
6987 +static __inline void
6988 +safe_rng_enable_short_cycle(struct safe_softc *sc)
6990 + DPRINTF(("%s()\n", __FUNCTION__));
6992 + WRITE_REG(sc, SAFE_RNG_CTRL,
6993 + READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
6996 +static __inline u_int32_t
6997 +safe_rng_read(struct safe_softc *sc)
7002 + while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
7004 + return READ_REG(sc, SAFE_RNG_OUT);
7008 +safe_read_random(void *arg, u_int32_t *buf, int maxwords)
7010 + struct safe_softc *sc = (struct safe_softc *) arg;
7013 + DPRINTF(("%s()\n", __FUNCTION__));
7015 + safestats.st_rng++;
7017 + * Fetch the next block of data.
7019 + if (maxwords > safe_rngbufsize)
7020 + maxwords = safe_rngbufsize;
7021 + if (maxwords > SAFE_RNG_MAXBUFSIZ)
7022 + maxwords = SAFE_RNG_MAXBUFSIZ;
7024 + /* read as much as we can */
7025 + for (rc = 0; rc < maxwords; rc++) {
7026 + if (READ_REG(sc, SAFE_RNG_STAT) != 0)
7028 + buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
7033 + * Check the comparator alarm count and reset the h/w if
7034 + * it exceeds our threshold. This guards against the
7035 + * hardware oscillators resonating with external signals.
7037 + if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
7038 + u_int32_t freq_inc, w;
7040 + DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
7041 + (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
7042 + safestats.st_rngalarm++;
7043 + safe_rng_enable_short_cycle(sc);
7045 + for (i = 0; i < 64; i++) {
7046 + w = READ_REG(sc, SAFE_RNG_CNFG);
7047 + freq_inc = ((w + freq_inc) & 0x3fL);
7048 + w = ((w & ~0x3fL) | freq_inc);
7049 + WRITE_REG(sc, SAFE_RNG_CNFG, w);
7051 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
7053 + (void) safe_rng_read(sc);
7056 + if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
7057 + safe_rng_disable_short_cycle(sc);
7062 + safe_rng_disable_short_cycle(sc);
7064 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
7068 +#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
7072 + * Resets the board. Values in the regesters are left as is
7073 + * from the reset (i.e. initial values are assigned elsewhere).
7076 +safe_reset_board(struct safe_softc *sc)
7080 + * Reset the device. The manual says no delay
7081 + * is needed between marking and clearing reset.
7083 + DPRINTF(("%s()\n", __FUNCTION__));
7085 + v = READ_REG(sc, SAFE_PE_DMACFG) &~
7086 + (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
7087 + SAFE_PE_DMACFG_SGRESET);
7088 + WRITE_REG(sc, SAFE_PE_DMACFG, v
7089 + | SAFE_PE_DMACFG_PERESET
7090 + | SAFE_PE_DMACFG_PDRRESET
7091 + | SAFE_PE_DMACFG_SGRESET);
7092 + WRITE_REG(sc, SAFE_PE_DMACFG, v);
7096 + * Initialize registers we need to touch only once.
7099 +safe_init_board(struct safe_softc *sc)
7101 + u_int32_t v, dwords;
7103 + DPRINTF(("%s()\n", __FUNCTION__));
7105 + v = READ_REG(sc, SAFE_PE_DMACFG);
7106 + v &=~ ( SAFE_PE_DMACFG_PEMODE
7107 + | SAFE_PE_DMACFG_FSENA /* failsafe enable */
7108 + | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
7109 + | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
7110 + | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
7111 + | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
7112 + | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
7113 + | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
7115 + v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
7116 + | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
7117 + | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
7118 + | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
7119 + | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
7120 + | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
7122 + | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
7125 + WRITE_REG(sc, SAFE_PE_DMACFG, v);
7127 +#ifdef __BIG_ENDIAN
7128 + /* tell the safenet that we are 4321 and not 1234 */
7129 + WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
7132 + if (sc->sc_chiprev == SAFE_REV(1,0)) {
7134 + * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
7135 + * "target mode transfers" done while the chip is DMA'ing
7136 + * >1020 bytes cause the hardware to lockup. To avoid this
7137 + * we reduce the max PCI transfer size and use small source
7138 + * particle descriptors (<= 256 bytes).
7140 + WRITE_REG(sc, SAFE_DMA_CFG, 256);
7141 + device_printf(sc->sc_dev,
7142 + "Reduce max DMA size to %u words for rev %u.%u WAR\n",
7143 + (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
7144 + (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
7145 + (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
7146 + sc->sc_max_dsize = 256;
7148 + sc->sc_max_dsize = SAFE_MAX_DSIZE;
7151 + /* NB: operands+results are overlaid */
7152 + WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
7153 + WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
7155 + * Configure ring entry size and number of items in the ring.
7157 + KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
7158 + ("PE ring entry not 32-bit aligned!"));
7159 + dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
7160 + WRITE_REG(sc, SAFE_PE_RINGCFG,
7161 + (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
7162 + WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
7164 + WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
7165 + WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
7166 + WRITE_REG(sc, SAFE_PE_PARTSIZE,
7167 + (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
7169 + * NB: destination particles are fixed size. We use
7170 + * an mbuf cluster and require all results go to
7171 + * clusters or smaller.
7173 + WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
7175 + /* it's now safe to enable PE mode, do it */
7176 + WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
7179 + * Configure hardware to use level-triggered interrupts and
7180 + * to interrupt after each descriptor is processed.
7182 + WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
7183 + WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
7184 + WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
7185 + WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
7190 + * Clean up after a chip crash.
7191 + * It is assumed that the caller in splimp()
7194 +safe_cleanchip(struct safe_softc *sc)
7196 + DPRINTF(("%s()\n", __FUNCTION__));
7198 + if (sc->sc_nqchip != 0) {
7199 + struct safe_ringentry *re = sc->sc_back;
7201 + while (re != sc->sc_front) {
7202 + if (re->re_desc.d_csr != 0)
7203 + safe_free_entry(sc, re);
7204 + if (++re == sc->sc_ringtop)
7208 + sc->sc_nqchip = 0;
7214 + * It is assumed that the caller is within splimp().
7217 +safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
7219 + struct cryptop *crp;
7221 + DPRINTF(("%s()\n", __FUNCTION__));
7226 + if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
7228 + m_freem(re->re_dst_m);
7230 + printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
7233 + crp = (struct cryptop *)re->re_crp;
7235 + re->re_desc.d_csr = 0;
7237 + crp->crp_etype = EFAULT;
7243 + * Routine to reset the chip and clean up.
7244 + * It is assumed that the caller is in splimp()
7247 +safe_totalreset(struct safe_softc *sc)
7249 + DPRINTF(("%s()\n", __FUNCTION__));
7251 + safe_reset_board(sc);
7252 + safe_init_board(sc);
7253 + safe_cleanchip(sc);
7257 + * Is the operand suitable aligned for direct DMA. Each
7258 + * segment must be aligned on a 32-bit boundary and all
7259 + * but the last segment must be a multiple of 4 bytes.
7262 +safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
7266 + DPRINTF(("%s()\n", __FUNCTION__));
7268 + for (i = 0; i < op->nsegs; i++) {
7269 + if (op->segs[i].ds_addr & 3)
7271 + if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
7278 + * Is the operand suitable for direct DMA as the destination
7279 + * of an operation. The hardware requires that each ``particle''
7280 + * but the last in an operation result have the same size. We
7281 + * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
7282 + * 0 if some segment is not a multiple of of this size, 1 if all
7283 + * segments are exactly this size, or 2 if segments are at worst
7284 + * a multple of this size.
7287 +safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
7291 + DPRINTF(("%s()\n", __FUNCTION__));
7293 + if (op->nsegs > 0) {
7296 + for (i = 0; i < op->nsegs-1; i++) {
7297 + if (op->segs[i].ds_len % sc->sc_max_dsize)
7299 + if (op->segs[i].ds_len != sc->sc_max_dsize)
7307 +safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
7309 + struct safe_softc *sc = device_get_softc(dev);
7310 + struct safe_pkq *q;
7311 + unsigned long flags;
7313 + DPRINTF(("%s()\n", __FUNCTION__));
7316 + krp->krp_status = EINVAL;
7320 + if (krp->krp_op != CRK_MOD_EXP) {
7321 + krp->krp_status = EOPNOTSUPP;
7325 + q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
7327 + krp->krp_status = ENOMEM;
7330 + memset(q, 0, sizeof(*q));
7332 + INIT_LIST_HEAD(&q->pkq_list);
7334 + spin_lock_irqsave(&sc->sc_pkmtx, flags);
7335 + list_add_tail(&q->pkq_list, &sc->sc_pkq);
7337 + spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
7341 + crypto_kdone(krp);
7345 +#define SAFE_CRK_PARAM_BASE 0
7346 +#define SAFE_CRK_PARAM_EXP 1
7347 +#define SAFE_CRK_PARAM_MOD 2
7350 +safe_kstart(struct safe_softc *sc)
7352 + struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
7353 + int exp_bits, mod_bits, base_bits;
7354 + u_int32_t op, a_off, b_off, c_off, d_off;
7356 + DPRINTF(("%s()\n", __FUNCTION__));
7358 + if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
7359 + krp->krp_status = EINVAL;
7363 + base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
7364 + if (base_bits > 2048)
7366 + if (base_bits <= 0) /* 5. base not zero */
7369 + exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
7370 + if (exp_bits > 2048)
7372 + if (exp_bits <= 0) /* 1. exponent word length > 0 */
7373 + goto too_small; /* 4. exponent not zero */
7375 + mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
7376 + if (mod_bits > 2048)
7378 + if (mod_bits <= 32) /* 2. modulus word length > 1 */
7379 + goto too_small; /* 8. MSW of modulus != zero */
7380 + if (mod_bits < exp_bits) /* 3 modulus len >= exponent len */
7382 + if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
7383 + goto bad_domain; /* 6. modulus is odd */
7384 + if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
7385 + goto too_small; /* make sure result will fit */
7387 + /* 7. modulus > base */
7388 + if (mod_bits < base_bits)
7390 + if (mod_bits == base_bits) {
7391 + u_int8_t *basep, *modp;
7394 + basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
7395 + ((base_bits + 7) / 8) - 1;
7396 + modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
7397 + ((mod_bits + 7) / 8) - 1;
7399 + for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
7400 + if (*modp < *basep)
7402 + if (*modp > *basep)
7407 + /* And on the 9th step, he rested. */
7409 + WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
7410 + WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
7411 + if (mod_bits > 1024) {
7412 + op = SAFE_PK_FUNC_EXP4;
7418 + op = SAFE_PK_FUNC_EXP16;
7424 + sc->sc_pk_reslen = b_off - a_off;
7425 + sc->sc_pk_resoff = d_off;
7427 + /* A is exponent, B is modulus, C is base, D is result */
7428 + safe_kload_reg(sc, a_off, b_off - a_off,
7429 + &krp->krp_param[SAFE_CRK_PARAM_EXP]);
7430 + WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
7431 + safe_kload_reg(sc, b_off, b_off - a_off,
7432 + &krp->krp_param[SAFE_CRK_PARAM_MOD]);
7433 + WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
7434 + safe_kload_reg(sc, c_off, b_off - a_off,
7435 + &krp->krp_param[SAFE_CRK_PARAM_BASE]);
7436 + WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
7437 + WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
7439 + WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
7444 + krp->krp_status = E2BIG;
7447 + krp->krp_status = ERANGE;
7450 + krp->krp_status = EDOM;
7455 +safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
7457 + u_int plen = (cr->crp_nbits + 7) / 8;
7458 + int i, sig = plen * 8;
7459 + u_int8_t c, *p = cr->crp_p;
7461 + DPRINTF(("%s()\n", __FUNCTION__));
7463 + for (i = plen - 1; i >= 0; i--) {
7466 + while ((c & 0x80) == 0) {
7478 +safe_kfeed(struct safe_softc *sc)
7480 + struct safe_pkq *q, *tmp;
7482 + DPRINTF(("%s()\n", __FUNCTION__));
7484 + if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
7486 + if (sc->sc_pkq_cur != NULL)
7488 + list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
7489 + sc->sc_pkq_cur = q;
7490 + list_del(&q->pkq_list);
7491 + if (safe_kstart(sc) != 0) {
7492 + crypto_kdone(q->pkq_krp);
7494 + sc->sc_pkq_cur = NULL;
7496 + /* op started, start polling */
7497 + mod_timer(&sc->sc_pkto, jiffies + 1);
7504 +safe_kpoll(unsigned long arg)
7506 + struct safe_softc *sc = NULL;
7507 + struct safe_pkq *q;
7508 + struct crparam *res;
7510 + u_int32_t buf[64];
7511 + unsigned long flags;
7513 + DPRINTF(("%s()\n", __FUNCTION__));
7515 + if (arg >= SAFE_MAX_CHIPS)
7517 + sc = safe_chip_idx[arg];
7519 + DPRINTF(("%s() - bad callback\n", __FUNCTION__));
7523 + spin_lock_irqsave(&sc->sc_pkmtx, flags);
7524 + if (sc->sc_pkq_cur == NULL)
7526 + if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
7527 + /* still running, check back later */
7528 + mod_timer(&sc->sc_pkto, jiffies + 1);
7532 + q = sc->sc_pkq_cur;
7533 + res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
7534 + bzero(buf, sizeof(buf));
7535 + bzero(res->crp_p, (res->crp_nbits + 7) / 8);
7536 + for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
7537 + buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
7538 + sc->sc_pk_resoff + (i << 2)));
7539 + bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
7541 + * reduce the bits that need copying if possible
7543 + res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
7544 + res->crp_nbits = safe_ksigbits(sc, res);
7546 + for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
7547 + WRITE_REG(sc, i, 0);
7549 + crypto_kdone(q->pkq_krp);
7551 + sc->sc_pkq_cur = NULL;
7555 + spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
7559 +safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
7560 + struct crparam *n)
7562 + u_int32_t buf[64], i;
7564 + DPRINTF(("%s()\n", __FUNCTION__));
7566 + bzero(buf, sizeof(buf));
7567 + bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
7569 + for (i = 0; i < len >> 2; i++)
7570 + WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
7571 + cpu_to_le32(buf[i]));
7576 +safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
7578 + printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
7580 + , READ_REG(sc, SAFE_DMA_ENDIAN)
7581 + , READ_REG(sc, SAFE_DMA_SRCADDR)
7582 + , READ_REG(sc, SAFE_DMA_DSTADDR)
7583 + , READ_REG(sc, SAFE_DMA_STAT)
7588 +safe_dump_intrstate(struct safe_softc *sc, const char *tag)
7590 + printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
7592 + , READ_REG(sc, SAFE_HI_CFG)
7593 + , READ_REG(sc, SAFE_HI_MASK)
7594 + , READ_REG(sc, SAFE_HI_DESC_CNT)
7595 + , READ_REG(sc, SAFE_HU_STAT)
7596 + , READ_REG(sc, SAFE_HM_STAT)
7601 +safe_dump_ringstate(struct safe_softc *sc, const char *tag)
7603 + u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
7605 + /* NB: assume caller has lock on ring */
7606 + printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
7608 + estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
7609 + (unsigned long)(sc->sc_back - sc->sc_ring),
7610 + (unsigned long)(sc->sc_front - sc->sc_ring));
7614 +safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
7618 + ix = re - sc->sc_ring;
7619 + printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
7622 + , re->re_desc.d_csr
7623 + , re->re_desc.d_src
7624 + , re->re_desc.d_dst
7625 + , re->re_desc.d_sa
7626 + , re->re_desc.d_len
7628 + if (re->re_src.nsegs > 1) {
7629 + ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
7630 + sizeof(struct safe_pdesc);
7631 + for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
7632 + printf(" spd[%u] %p: %p size %u flags %x"
7633 + , ix, &sc->sc_spring[ix]
7634 + , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
7635 + , sc->sc_spring[ix].pd_size
7636 + , sc->sc_spring[ix].pd_flags
7638 + if (sc->sc_spring[ix].pd_size == 0)
7639 + printf(" (zero!)");
7641 + if (++ix == SAFE_TOTAL_SPART)
7645 + if (re->re_dst.nsegs > 1) {
7646 + ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
7647 + sizeof(struct safe_pdesc);
7648 + for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
7649 + printf(" dpd[%u] %p: %p flags %x\n"
7650 + , ix, &sc->sc_dpring[ix]
7651 + , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
7652 + , sc->sc_dpring[ix].pd_flags
7654 + if (++ix == SAFE_TOTAL_DPART)
7658 + printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
7659 + re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
7660 + printf("sa: key %x %x %x %x %x %x %x %x\n"
7661 + , re->re_sa.sa_key[0]
7662 + , re->re_sa.sa_key[1]
7663 + , re->re_sa.sa_key[2]
7664 + , re->re_sa.sa_key[3]
7665 + , re->re_sa.sa_key[4]
7666 + , re->re_sa.sa_key[5]
7667 + , re->re_sa.sa_key[6]
7668 + , re->re_sa.sa_key[7]
7670 + printf("sa: indigest %x %x %x %x %x\n"
7671 + , re->re_sa.sa_indigest[0]
7672 + , re->re_sa.sa_indigest[1]
7673 + , re->re_sa.sa_indigest[2]
7674 + , re->re_sa.sa_indigest[3]
7675 + , re->re_sa.sa_indigest[4]
7677 + printf("sa: outdigest %x %x %x %x %x\n"
7678 + , re->re_sa.sa_outdigest[0]
7679 + , re->re_sa.sa_outdigest[1]
7680 + , re->re_sa.sa_outdigest[2]
7681 + , re->re_sa.sa_outdigest[3]
7682 + , re->re_sa.sa_outdigest[4]
7684 + printf("sr: iv %x %x %x %x\n"
7685 + , re->re_sastate.sa_saved_iv[0]
7686 + , re->re_sastate.sa_saved_iv[1]
7687 + , re->re_sastate.sa_saved_iv[2]
7688 + , re->re_sastate.sa_saved_iv[3]
7690 + printf("sr: hashbc %u indigest %x %x %x %x %x\n"
7691 + , re->re_sastate.sa_saved_hashbc
7692 + , re->re_sastate.sa_saved_indigest[0]
7693 + , re->re_sastate.sa_saved_indigest[1]
7694 + , re->re_sastate.sa_saved_indigest[2]
7695 + , re->re_sastate.sa_saved_indigest[3]
7696 + , re->re_sastate.sa_saved_indigest[4]
7701 +safe_dump_ring(struct safe_softc *sc, const char *tag)
7703 + unsigned long flags;
7705 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
7706 + printf("\nSafeNet Ring State:\n");
7707 + safe_dump_intrstate(sc, tag);
7708 + safe_dump_dmastatus(sc, tag);
7709 + safe_dump_ringstate(sc, tag);
7710 + if (sc->sc_nqchip) {
7711 + struct safe_ringentry *re = sc->sc_back;
7713 + safe_dump_request(sc, tag, re);
7714 + if (++re == sc->sc_ringtop)
7716 + } while (re != sc->sc_front);
7718 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
7720 +#endif /* SAFE_DEBUG */
7723 +static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
7725 + struct safe_softc *sc = NULL;
7726 + u32 mem_start, mem_len, cmd;
7727 + int i, rc, devinfo;
7729 + static int num_chips = 0;
7731 + DPRINTF(("%s()\n", __FUNCTION__));
7733 + if (pci_enable_device(dev) < 0)
7737 + printk("safe: found device with no IRQ assigned. check BIOS settings!");
7738 + pci_disable_device(dev);
7742 + if (pci_set_mwi(dev)) {
7743 + printk("safe: pci_set_mwi failed!");
7747 + sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
7750 + memset(sc, 0, sizeof(*sc));
7752 + softc_device_init(sc, "safe", num_chips, safe_methods);
7756 + sc->sc_pcidev = dev;
7757 + if (num_chips < SAFE_MAX_CHIPS) {
7758 + safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
7762 + INIT_LIST_HEAD(&sc->sc_pkq);
7763 + spin_lock_init(&sc->sc_pkmtx);
7765 + pci_set_drvdata(sc->sc_pcidev, sc);
7767 + /* we read its hardware registers as memory */
7768 + mem_start = pci_resource_start(sc->sc_pcidev, 0);
7769 + mem_len = pci_resource_len(sc->sc_pcidev, 0);
7771 + sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
7772 + if (!sc->sc_base_addr) {
7773 + device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
7774 + mem_start, mem_start + mem_len - 1);
7778 + /* fix up the bus size */
7779 + if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
7780 + device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
7783 + if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
7784 + device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
7788 + pci_set_master(sc->sc_pcidev);
7790 + pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
7792 + if (!(cmd & PCI_COMMAND_MEMORY)) {
7793 + device_printf(sc->sc_dev, "failed to enable memory mapping\n");
7797 + if (!(cmd & PCI_COMMAND_MASTER)) {
7798 + device_printf(sc->sc_dev, "failed to enable bus mastering\n");
7802 + rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
7804 + device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
7807 + sc->sc_irq = dev->irq;
7809 + sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
7810 + (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
7813 + * Allocate packet engine descriptors.
7815 + sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7816 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7817 + &sc->sc_ringalloc.dma_paddr);
7818 + if (!sc->sc_ringalloc.dma_vaddr) {
7819 + device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
7824 + * Hookup the static portion of all our data structures.
7826 + sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
7827 + sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
7828 + sc->sc_front = sc->sc_ring;
7829 + sc->sc_back = sc->sc_ring;
7830 + raddr = sc->sc_ringalloc.dma_paddr;
7831 + bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
7832 + for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
7833 + struct safe_ringentry *re = &sc->sc_ring[i];
7835 + re->re_desc.d_sa = raddr +
7836 + offsetof(struct safe_ringentry, re_sa);
7837 + re->re_sa.sa_staterec = raddr +
7838 + offsetof(struct safe_ringentry, re_sastate);
7840 + raddr += sizeof (struct safe_ringentry);
7842 + spin_lock_init(&sc->sc_ringmtx);
7845 + * Allocate scatter and gather particle descriptors.
7847 + sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7848 + SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
7849 + &sc->sc_spalloc.dma_paddr);
7850 + if (!sc->sc_spalloc.dma_vaddr) {
7851 + device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
7854 + sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
7855 + sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
7856 + sc->sc_spfree = sc->sc_spring;
7857 + bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
7859 + sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7860 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7861 + &sc->sc_dpalloc.dma_paddr);
7862 + if (!sc->sc_dpalloc.dma_vaddr) {
7863 + device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
7866 + sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
7867 + sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
7868 + sc->sc_dpfree = sc->sc_dpring;
7869 + bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
7871 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
7872 + if (sc->sc_cid < 0) {
7873 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
7877 + printf("%s:", device_get_nameunit(sc->sc_dev));
7879 + devinfo = READ_REG(sc, SAFE_DEVINFO);
7880 + if (devinfo & SAFE_DEVINFO_RNG) {
7881 + sc->sc_flags |= SAFE_FLAGS_RNG;
7884 + if (devinfo & SAFE_DEVINFO_PKEY) {
7886 + sc->sc_flags |= SAFE_FLAGS_KEY;
7887 + crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
7889 + crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
7891 + init_timer(&sc->sc_pkto);
7892 + sc->sc_pkto.function = safe_kpoll;
7893 + sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
7895 + if (devinfo & SAFE_DEVINFO_DES) {
7896 + printf(" des/3des");
7897 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
7898 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
7900 + if (devinfo & SAFE_DEVINFO_AES) {
7902 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
7904 + if (devinfo & SAFE_DEVINFO_MD5) {
7906 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
7908 + if (devinfo & SAFE_DEVINFO_SHA1) {
7910 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
7913 + crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
7914 + crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
7915 + /* XXX other supported algorithms */
7918 + safe_reset_board(sc); /* reset h/w */
7919 + safe_init_board(sc); /* init h/w */
7921 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
7922 + if (sc->sc_flags & SAFE_FLAGS_RNG) {
7923 + safe_rng_init(sc);
7924 + crypto_rregister(sc->sc_cid, safe_read_random, sc);
7926 +#endif /* SAFE_NO_RNG */
7931 + if (sc->sc_cid >= 0)
7932 + crypto_unregister_all(sc->sc_cid);
7933 + if (sc->sc_irq != -1)
7934 + free_irq(sc->sc_irq, sc);
7935 + if (sc->sc_ringalloc.dma_vaddr)
7936 + pci_free_consistent(sc->sc_pcidev,
7937 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7938 + sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
7939 + if (sc->sc_spalloc.dma_vaddr)
7940 + pci_free_consistent(sc->sc_pcidev,
7941 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7942 + sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
7943 + if (sc->sc_dpalloc.dma_vaddr)
7944 + pci_free_consistent(sc->sc_pcidev,
7945 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7946 + sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
7951 +static void safe_remove(struct pci_dev *dev)
7953 + struct safe_softc *sc = pci_get_drvdata(dev);
7955 + DPRINTF(("%s()\n", __FUNCTION__));
7957 + /* XXX wait/abort active ops */
7959 + WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
7961 + del_timer_sync(&sc->sc_pkto);
7963 + crypto_unregister_all(sc->sc_cid);
7965 + safe_cleanchip(sc);
7967 + if (sc->sc_irq != -1)
7968 + free_irq(sc->sc_irq, sc);
7969 + if (sc->sc_ringalloc.dma_vaddr)
7970 + pci_free_consistent(sc->sc_pcidev,
7971 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7972 + sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
7973 + if (sc->sc_spalloc.dma_vaddr)
7974 + pci_free_consistent(sc->sc_pcidev,
7975 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7976 + sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
7977 + if (sc->sc_dpalloc.dma_vaddr)
7978 + pci_free_consistent(sc->sc_pcidev,
7979 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7980 + sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
7982 + sc->sc_ringalloc.dma_vaddr = NULL;
7983 + sc->sc_spalloc.dma_vaddr = NULL;
7984 + sc->sc_dpalloc.dma_vaddr = NULL;
7987 +static struct pci_device_id safe_pci_tbl[] = {
7988 + { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
7989 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
7992 +MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
7994 +static struct pci_driver safe_driver = {
7996 + .id_table = safe_pci_tbl,
7997 + .probe = safe_probe,
7998 + .remove = safe_remove,
7999 + /* add PM stuff here one day */
8002 +static int __init safe_init (void)
8004 + struct safe_softc *sc = NULL;
8007 + DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
8009 + rc = pci_register_driver(&safe_driver);
8010 + pci_register_driver_compat(&safe_driver, rc);
8015 +static void __exit safe_exit (void)
8017 + pci_unregister_driver(&safe_driver);
8020 +module_init(safe_init);
8021 +module_exit(safe_exit);
8023 +MODULE_LICENSE("BSD");
8024 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
8025 +MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
8027 +++ b/crypto/ocf/safe/sha1.c
8029 +/* $KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
8031 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
8032 + * All rights reserved.
8034 + * Redistribution and use in source and binary forms, with or without
8035 + * modification, are permitted provided that the following conditions
8037 + * 1. Redistributions of source code must retain the above copyright
8038 + * notice, this list of conditions and the following disclaimer.
8039 + * 2. Redistributions in binary form must reproduce the above copyright
8040 + * notice, this list of conditions and the following disclaimer in the
8041 + * documentation and/or other materials provided with the distribution.
8042 + * 3. Neither the name of the project nor the names of its contributors
8043 + * may be used to endorse or promote products derived from this software
8044 + * without specific prior written permission.
8046 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
8047 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8048 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8049 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
8050 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8051 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8052 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8053 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8054 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8055 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8060 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
8061 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
8062 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
8066 +#include <sys/cdefs.h>
8067 +__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
8069 +#include <sys/types.h>
8070 +#include <sys/cdefs.h>
8071 +#include <sys/time.h>
8072 +#include <sys/systm.h>
8074 +#include <crypto/sha1.h>
8078 +#if BYTE_ORDER != BIG_ENDIAN
8079 +# if BYTE_ORDER != LITTLE_ENDIAN
8080 +# define unsupported 1
8084 +#ifndef unsupported
8086 +/* constant table */
8087 +static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
8088 +#define K(t) _K[(t) / 20]
8090 +#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
8091 +#define F1(b, c, d) (((b) ^ (c)) ^ (d))
8092 +#define F2(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
8093 +#define F3(b, c, d) (((b) ^ (c)) ^ (d))
8095 +#define S(n, x) (((x) << (n)) | ((x) >> (32 - n)))
8098 +#define H(n) (ctxt->h.b32[(n)])
8099 +#define COUNT (ctxt->count)
8100 +#define BCOUNT (ctxt->c.b64[0] / 8)
8101 +#define W(n) (ctxt->m.b32[(n)])
8103 +#define PUTBYTE(x) { \
8104 + ctxt->m.b8[(COUNT % 64)] = (x); \
8107 + ctxt->c.b64[0] += 8; \
8108 + if (COUNT % 64 == 0) \
8109 + sha1_step(ctxt); \
8112 +#define PUTPAD(x) { \
8113 + ctxt->m.b8[(COUNT % 64)] = (x); \
8116 + if (COUNT % 64 == 0) \
8117 + sha1_step(ctxt); \
8120 +static void sha1_step(struct sha1_ctxt *);
8124 + struct sha1_ctxt *ctxt;
8126 + u_int32_t a, b, c, d, e;
8130 +#if BYTE_ORDER == LITTLE_ENDIAN
8131 + struct sha1_ctxt tctxt;
8132 + bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
8133 + ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
8134 + ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
8135 + ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
8136 + ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
8137 + ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
8138 + ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
8139 + ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
8140 + ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
8141 + ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
8142 + ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
8143 + ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
8144 + ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
8145 + ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
8146 + ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
8147 + ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
8148 + ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
8149 + ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
8150 + ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
8151 + ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
8152 + ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
8153 + ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
8154 + ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
8155 + ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
8156 + ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
8157 + ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
8158 + ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
8159 + ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
8160 + ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
8161 + ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
8162 + ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
8163 + ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
8164 + ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
8167 + a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
8169 + for (t = 0; t < 20; t++) {
8172 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8174 + tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
8175 + e = d; d = c; c = S(30, b); b = a; a = tmp;
8177 + for (t = 20; t < 40; t++) {
8179 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8180 + tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
8181 + e = d; d = c; c = S(30, b); b = a; a = tmp;
8183 + for (t = 40; t < 60; t++) {
8185 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8186 + tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
8187 + e = d; d = c; c = S(30, b); b = a; a = tmp;
8189 + for (t = 60; t < 80; t++) {
8191 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8192 + tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
8193 + e = d; d = c; c = S(30, b); b = a; a = tmp;
8202 + bzero(&ctxt->m.b8[0], 64);
8205 +/*------------------------------------------------------------*/
8209 + struct sha1_ctxt *ctxt;
8211 + bzero(ctxt, sizeof(struct sha1_ctxt));
8212 + H(0) = 0x67452301;
8213 + H(1) = 0xefcdab89;
8214 + H(2) = 0x98badcfe;
8215 + H(3) = 0x10325476;
8216 + H(4) = 0xc3d2e1f0;
8221 + struct sha1_ctxt *ctxt;
8223 + size_t padlen; /*pad length in bytes*/
8228 + padstart = COUNT % 64;
8229 + padlen = 64 - padstart;
8231 + bzero(&ctxt->m.b8[padstart], padlen);
8235 + padstart = COUNT % 64; /* should be 0 */
8236 + padlen = 64 - padstart; /* should be 64 */
8238 + bzero(&ctxt->m.b8[padstart], padlen - 8);
8239 + COUNT += (padlen - 8);
8241 +#if BYTE_ORDER == BIG_ENDIAN
8242 + PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
8243 + PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
8244 + PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
8245 + PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
8247 + PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
8248 + PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
8249 + PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
8250 + PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
8255 +sha1_loop(ctxt, input, len)
8256 + struct sha1_ctxt *ctxt;
8257 + const u_int8_t *input;
8267 + while (off < len) {
8268 + gapstart = COUNT % 64;
8269 + gaplen = 64 - gapstart;
8271 + copysiz = (gaplen < len - off) ? gaplen : len - off;
8272 + bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
8275 + ctxt->c.b64[0] += copysiz * 8;
8276 + if (COUNT % 64 == 0)
8283 +sha1_result(ctxt, digest0)
8284 + struct sha1_ctxt *ctxt;
8289 + digest = (u_int8_t *)digest0;
8291 +#if BYTE_ORDER == BIG_ENDIAN
8292 + bcopy(&ctxt->h.b8[0], digest, 20);
8294 + digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
8295 + digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
8296 + digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
8297 + digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
8298 + digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
8299 + digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
8300 + digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
8301 + digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
8302 + digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
8303 + digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
8307 +#endif /*unsupported*/
8309 +++ b/crypto/ocf/safe/sha1.h
8311 +/* $FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $ */
8312 +/* $KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $ */
8315 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
8316 + * All rights reserved.
8318 + * Redistribution and use in source and binary forms, with or without
8319 + * modification, are permitted provided that the following conditions
8321 + * 1. Redistributions of source code must retain the above copyright
8322 + * notice, this list of conditions and the following disclaimer.
8323 + * 2. Redistributions in binary form must reproduce the above copyright
8324 + * notice, this list of conditions and the following disclaimer in the
8325 + * documentation and/or other materials provided with the distribution.
8326 + * 3. Neither the name of the project nor the names of its contributors
8327 + * may be used to endorse or promote products derived from this software
8328 + * without specific prior written permission.
8330 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
8331 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8332 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8333 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
8334 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8335 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8336 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8337 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8338 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8339 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8343 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
8344 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
8345 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
8348 +#ifndef _NETINET6_SHA1_H_
8349 +#define _NETINET6_SHA1_H_
8362 + u_int32_t b32[16];
8368 +extern void sha1_init(struct sha1_ctxt *);
8369 +extern void sha1_pad(struct sha1_ctxt *);
8370 +extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
8371 +extern void sha1_result(struct sha1_ctxt *, caddr_t);
8373 +/* compatibilty with other SHA1 source codes */
8374 +typedef struct sha1_ctxt SHA1_CTX;
8375 +#define SHA1Init(x) sha1_init((x))
8376 +#define SHA1Update(x, y, z) sha1_loop((x), (y), (z))
8377 +#define SHA1Final(x, y) sha1_result((y), (x))
8378 +#endif /* __KERNEL__ */
8380 +#define SHA1_RESULTLEN (160/8)
8382 +#endif /*_NETINET6_SHA1_H_*/
8384 +++ b/crypto/ocf/safe/safereg.h
8387 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
8388 + * Copyright (c) 2003 Global Technology Associates, Inc.
8389 + * All rights reserved.
8391 + * Redistribution and use in source and binary forms, with or without
8392 + * modification, are permitted provided that the following conditions
8394 + * 1. Redistributions of source code must retain the above copyright
8395 + * notice, this list of conditions and the following disclaimer.
8396 + * 2. Redistributions in binary form must reproduce the above copyright
8397 + * notice, this list of conditions and the following disclaimer in the
8398 + * documentation and/or other materials provided with the distribution.
8400 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
8401 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8402 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8403 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8404 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8405 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8406 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8407 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8408 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8409 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8412 + * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
8414 +#ifndef _SAFE_SAFEREG_H_
8415 +#define _SAFE_SAFEREG_H_
8418 + * Register definitions for SafeNet SafeXcel-1141 crypto device.
8419 + * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
8422 +#define BS_BAR 0x10 /* DMA base address register */
8423 +#define BS_TRDY_TIMEOUT 0x40 /* TRDY timeout */
8424 +#define BS_RETRY_TIMEOUT 0x41 /* DMA retry timeout */
8426 +#define PCI_VENDOR_SAFENET 0x16ae /* SafeNet, Inc. */
8429 +#define PCI_PRODUCT_SAFEXCEL 0x1141 /* 1141 */
8431 +#define SAFE_PE_CSR 0x0000 /* Packet Enginge Ctrl/Status */
8432 +#define SAFE_PE_SRC 0x0004 /* Packet Engine Source */
8433 +#define SAFE_PE_DST 0x0008 /* Packet Engine Destination */
8434 +#define SAFE_PE_SA 0x000c /* Packet Engine SA */
8435 +#define SAFE_PE_LEN 0x0010 /* Packet Engine Length */
8436 +#define SAFE_PE_DMACFG 0x0040 /* Packet Engine DMA Configuration */
8437 +#define SAFE_PE_DMASTAT 0x0044 /* Packet Engine DMA Status */
8438 +#define SAFE_PE_PDRBASE 0x0048 /* Packet Engine Descriptor Ring Base */
8439 +#define SAFE_PE_RDRBASE 0x004c /* Packet Engine Result Ring Base */
8440 +#define SAFE_PE_RINGCFG 0x0050 /* Packet Engine Ring Configuration */
8441 +#define SAFE_PE_RINGPOLL 0x0054 /* Packet Engine Ring Poll */
8442 +#define SAFE_PE_IRNGSTAT 0x0058 /* Packet Engine Internal Ring Status */
8443 +#define SAFE_PE_ERNGSTAT 0x005c /* Packet Engine External Ring Status */
8444 +#define SAFE_PE_IOTHRESH 0x0060 /* Packet Engine I/O Threshold */
8445 +#define SAFE_PE_GRNGBASE 0x0064 /* Packet Engine Gather Ring Base */
8446 +#define SAFE_PE_SRNGBASE 0x0068 /* Packet Engine Scatter Ring Base */
8447 +#define SAFE_PE_PARTSIZE 0x006c /* Packet Engine Particlar Ring Size */
8448 +#define SAFE_PE_PARTCFG 0x0070 /* Packet Engine Particle Ring Config */
8449 +#define SAFE_CRYPTO_CTRL 0x0080 /* Crypto Control */
8450 +#define SAFE_DEVID 0x0084 /* Device ID */
8451 +#define SAFE_DEVINFO 0x0088 /* Device Info */
8452 +#define SAFE_HU_STAT 0x00a0 /* Host Unmasked Status */
8453 +#define SAFE_HM_STAT 0x00a4 /* Host Masked Status (read-only) */
8454 +#define SAFE_HI_CLR 0x00a4 /* Host Clear Interrupt (write-only) */
8455 +#define SAFE_HI_MASK 0x00a8 /* Host Mask Control */
8456 +#define SAFE_HI_CFG 0x00ac /* Interrupt Configuration */
8457 +#define SAFE_HI_RD_DESCR 0x00b4 /* Force Descriptor Read */
8458 +#define SAFE_HI_DESC_CNT 0x00b8 /* Host Descriptor Done Count */
8459 +#define SAFE_DMA_ENDIAN 0x00c0 /* Master Endian Status */
8460 +#define SAFE_DMA_SRCADDR 0x00c4 /* DMA Source Address Status */
8461 +#define SAFE_DMA_DSTADDR 0x00c8 /* DMA Destination Address Status */
8462 +#define SAFE_DMA_STAT 0x00cc /* DMA Current Status */
8463 +#define SAFE_DMA_CFG 0x00d4 /* DMA Configuration/Status */
8464 +#define SAFE_ENDIAN 0x00e0 /* Endian Configuration */
8465 +#define SAFE_PK_A_ADDR 0x0800 /* Public Key A Address */
8466 +#define SAFE_PK_B_ADDR 0x0804 /* Public Key B Address */
8467 +#define SAFE_PK_C_ADDR 0x0808 /* Public Key C Address */
8468 +#define SAFE_PK_D_ADDR 0x080c /* Public Key D Address */
8469 +#define SAFE_PK_A_LEN 0x0810 /* Public Key A Length */
8470 +#define SAFE_PK_B_LEN 0x0814 /* Public Key B Length */
8471 +#define SAFE_PK_SHIFT 0x0818 /* Public Key Shift */
8472 +#define SAFE_PK_FUNC 0x081c /* Public Key Function */
8473 +#define SAFE_PK_RAM_START 0x1000 /* Public Key RAM start address */
8474 +#define SAFE_PK_RAM_END 0x1fff /* Public Key RAM end address */
8476 +#define SAFE_RNG_OUT 0x0100 /* RNG Output */
8477 +#define SAFE_RNG_STAT 0x0104 /* RNG Status */
8478 +#define SAFE_RNG_CTRL 0x0108 /* RNG Control */
8479 +#define SAFE_RNG_A 0x010c /* RNG A */
8480 +#define SAFE_RNG_B 0x0110 /* RNG B */
8481 +#define SAFE_RNG_X_LO 0x0114 /* RNG X [31:0] */
8482 +#define SAFE_RNG_X_MID 0x0118 /* RNG X [63:32] */
8483 +#define SAFE_RNG_X_HI 0x011c /* RNG X [80:64] */
8484 +#define SAFE_RNG_X_CNTR 0x0120 /* RNG Counter */
8485 +#define SAFE_RNG_ALM_CNT 0x0124 /* RNG Alarm Count */
8486 +#define SAFE_RNG_CNFG 0x0128 /* RNG Configuration */
8487 +#define SAFE_RNG_LFSR1_LO 0x012c /* RNG LFSR1 [31:0] */
8488 +#define SAFE_RNG_LFSR1_HI 0x0130 /* RNG LFSR1 [47:32] */
8489 +#define SAFE_RNG_LFSR2_LO 0x0134 /* RNG LFSR1 [31:0] */
8490 +#define SAFE_RNG_LFSR2_HI 0x0138 /* RNG LFSR1 [47:32] */
8492 +#define SAFE_PE_CSR_READY 0x00000001 /* ready for processing */
8493 +#define SAFE_PE_CSR_DONE 0x00000002 /* h/w completed processing */
8494 +#define SAFE_PE_CSR_LOADSA 0x00000004 /* load SA digests */
8495 +#define SAFE_PE_CSR_HASHFINAL 0x00000010 /* do hash pad & write result */
8496 +#define SAFE_PE_CSR_SABUSID 0x000000c0 /* bus id for SA */
8497 +#define SAFE_PE_CSR_SAPCI 0x00000040 /* PCI bus id for SA */
8498 +#define SAFE_PE_CSR_NXTHDR 0x0000ff00 /* next hdr value for IPsec */
8499 +#define SAFE_PE_CSR_FPAD 0x0000ff00 /* fixed pad for basic ops */
8500 +#define SAFE_PE_CSR_STATUS 0x00ff0000 /* operation result status */
8501 +#define SAFE_PE_CSR_AUTH_FAIL 0x00010000 /* ICV mismatch (inbound) */
8502 +#define SAFE_PE_CSR_PAD_FAIL 0x00020000 /* pad verify fail (inbound) */
8503 +#define SAFE_PE_CSR_SEQ_FAIL 0x00040000 /* sequence number (inbound) */
8504 +#define SAFE_PE_CSR_XERROR 0x00080000 /* extended error follows */
8505 +#define SAFE_PE_CSR_XECODE 0x00f00000 /* extended error code */
8506 +#define SAFE_PE_CSR_XECODE_S 20
8507 +#define SAFE_PE_CSR_XECODE_BADCMD 0 /* invalid command */
8508 +#define SAFE_PE_CSR_XECODE_BADALG 1 /* invalid algorithm */
8509 +#define SAFE_PE_CSR_XECODE_ALGDIS 2 /* algorithm disabled */
8510 +#define SAFE_PE_CSR_XECODE_ZEROLEN 3 /* zero packet length */
8511 +#define SAFE_PE_CSR_XECODE_DMAERR 4 /* bus DMA error */
8512 +#define SAFE_PE_CSR_XECODE_PIPEABORT 5 /* secondary bus DMA error */
8513 +#define SAFE_PE_CSR_XECODE_BADSPI 6 /* IPsec SPI mismatch */
8514 +#define SAFE_PE_CSR_XECODE_TIMEOUT 10 /* failsafe timeout */
8515 +#define SAFE_PE_CSR_PAD 0xff000000 /* ESP padding control/status */
8516 +#define SAFE_PE_CSR_PAD_MIN 0x00000000 /* minimum IPsec padding */
8517 +#define SAFE_PE_CSR_PAD_16 0x08000000 /* pad to 16-byte boundary */
8518 +#define SAFE_PE_CSR_PAD_32 0x10000000 /* pad to 32-byte boundary */
8519 +#define SAFE_PE_CSR_PAD_64 0x20000000 /* pad to 64-byte boundary */
8520 +#define SAFE_PE_CSR_PAD_128 0x40000000 /* pad to 128-byte boundary */
8521 +#define SAFE_PE_CSR_PAD_256 0x80000000 /* pad to 256-byte boundary */
8524 + * Check the CSR to see if the PE has returned ownership to
8525 + * the host. Note that before processing a descriptor this
8526 + * must be done followed by a check of the SAFE_PE_LEN register
8527 + * status bits to avoid premature processing of a descriptor
8528 + * on its way back to the host.
8530 +#define SAFE_PE_CSR_IS_DONE(_csr) \
8531 + (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
8533 +#define SAFE_PE_LEN_LENGTH 0x000fffff /* total length (bytes) */
8534 +#define SAFE_PE_LEN_READY 0x00400000 /* ready for processing */
8535 +#define SAFE_PE_LEN_DONE 0x00800000 /* h/w completed processing */
8536 +#define SAFE_PE_LEN_BYPASS 0xff000000 /* bypass offset (bytes) */
8537 +#define SAFE_PE_LEN_BYPASS_S 24
8539 +#define SAFE_PE_LEN_IS_DONE(_len) \
8540 + (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
8542 +/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
8543 +#define SAFE_INT_PE_CDONE 0x00000002 /* PE context done */
8544 +#define SAFE_INT_PE_DDONE 0x00000008 /* PE descriptor done */
8545 +#define SAFE_INT_PE_ERROR 0x00000010 /* PE error */
8546 +#define SAFE_INT_PE_ODONE 0x00000020 /* PE operation done */
8548 +#define SAFE_HI_CFG_PULSE 0x00000001 /* use pulse interrupt */
8549 +#define SAFE_HI_CFG_LEVEL 0x00000000 /* use level interrupt */
8550 +#define SAFE_HI_CFG_AUTOCLR 0x00000002 /* auto-clear pulse interrupt */
8552 +#define SAFE_ENDIAN_PASS 0x000000e4 /* straight pass-thru */
8553 +#define SAFE_ENDIAN_SWAB 0x0000001b /* swap bytes in 32-bit word */
8555 +#define SAFE_PE_DMACFG_PERESET 0x00000001 /* reset packet engine */
8556 +#define SAFE_PE_DMACFG_PDRRESET 0x00000002 /* reset PDR counters/ptrs */
8557 +#define SAFE_PE_DMACFG_SGRESET 0x00000004 /* reset scatter/gather cache */
8558 +#define SAFE_PE_DMACFG_FSENA 0x00000008 /* enable failsafe reset */
8559 +#define SAFE_PE_DMACFG_PEMODE 0x00000100 /* packet engine mode */
8560 +#define SAFE_PE_DMACFG_SAPREC 0x00000200 /* SA precedes packet */
8561 +#define SAFE_PE_DMACFG_PKFOLL 0x00000400 /* packet follows descriptor */
8562 +#define SAFE_PE_DMACFG_GPRBID 0x00003000 /* gather particle ring busid */
8563 +#define SAFE_PE_DMACFG_GPRPCI 0x00001000 /* PCI gather particle ring */
8564 +#define SAFE_PE_DMACFG_SPRBID 0x0000c000 /* scatter part. ring busid */
8565 +#define SAFE_PE_DMACFG_SPRPCI 0x00004000 /* PCI scatter part. ring */
8566 +#define SAFE_PE_DMACFG_ESDESC 0x00010000 /* endian swap descriptors */
8567 +#define SAFE_PE_DMACFG_ESSA 0x00020000 /* endian swap SA data */
8568 +#define SAFE_PE_DMACFG_ESPACKET 0x00040000 /* endian swap packet data */
8569 +#define SAFE_PE_DMACFG_ESPDESC 0x00080000 /* endian swap particle desc. */
8570 +#define SAFE_PE_DMACFG_NOPDRUP 0x00100000 /* supp. PDR ownership update */
8571 +#define SAFE_PD_EDMACFG_PCIMODE 0x01000000 /* PCI target mode */
8573 +#define SAFE_PE_DMASTAT_PEIDONE 0x00000001 /* PE core input done */
8574 +#define SAFE_PE_DMASTAT_PEODONE 0x00000002 /* PE core output done */
8575 +#define SAFE_PE_DMASTAT_ENCDONE 0x00000004 /* encryption done */
8576 +#define SAFE_PE_DMASTAT_IHDONE 0x00000008 /* inner hash done */
8577 +#define SAFE_PE_DMASTAT_OHDONE 0x00000010 /* outer hash (HMAC) done */
8578 +#define SAFE_PE_DMASTAT_PADFLT 0x00000020 /* crypto pad fault */
8579 +#define SAFE_PE_DMASTAT_ICVFLT 0x00000040 /* ICV fault */
8580 +#define SAFE_PE_DMASTAT_SPIMIS 0x00000080 /* SPI mismatch */
8581 +#define SAFE_PE_DMASTAT_CRYPTO 0x00000100 /* crypto engine timeout */
8582 +#define SAFE_PE_DMASTAT_CQACT 0x00000200 /* command queue active */
8583 +#define SAFE_PE_DMASTAT_IRACT 0x00000400 /* input request active */
8584 +#define SAFE_PE_DMASTAT_ORACT 0x00000800 /* output request active */
8585 +#define SAFE_PE_DMASTAT_PEISIZE 0x003ff000 /* PE input size:32-bit words */
8586 +#define SAFE_PE_DMASTAT_PEOSIZE 0xffc00000 /* PE out. size:32-bit words */
8588 +#define SAFE_PE_RINGCFG_SIZE 0x000003ff /* ring size (descriptors) */
8589 +#define SAFE_PE_RINGCFG_OFFSET 0xffff0000 /* offset btw desc's (dwords) */
8590 +#define SAFE_PE_RINGCFG_OFFSET_S 16
8592 +#define SAFE_PE_RINGPOLL_POLL 0x00000fff /* polling frequency/divisor */
8593 +#define SAFE_PE_RINGPOLL_RETRY 0x03ff0000 /* polling frequency/divisor */
8594 +#define SAFE_PE_RINGPOLL_CONT 0x80000000 /* continuously poll */
8596 +#define SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001 /* command queue available */
8598 +#define SAFE_PE_ERNGSTAT_NEXT 0x03ff0000 /* index of next packet desc. */
8599 +#define SAFE_PE_ERNGSTAT_NEXT_S 16
8601 +#define SAFE_PE_IOTHRESH_INPUT 0x000003ff /* input threshold (dwords) */
8602 +#define SAFE_PE_IOTHRESH_OUTPUT 0x03ff0000 /* output threshold (dwords) */
8604 +#define SAFE_PE_PARTCFG_SIZE 0x0000ffff /* scatter particle size */
8605 +#define SAFE_PE_PARTCFG_GBURST 0x00030000 /* gather particle burst */
8606 +#define SAFE_PE_PARTCFG_GBURST_2 0x00000000
8607 +#define SAFE_PE_PARTCFG_GBURST_4 0x00010000
8608 +#define SAFE_PE_PARTCFG_GBURST_8 0x00020000
8609 +#define SAFE_PE_PARTCFG_GBURST_16 0x00030000
8610 +#define SAFE_PE_PARTCFG_SBURST 0x000c0000 /* scatter particle burst */
8611 +#define SAFE_PE_PARTCFG_SBURST_2 0x00000000
8612 +#define SAFE_PE_PARTCFG_SBURST_4 0x00040000
8613 +#define SAFE_PE_PARTCFG_SBURST_8 0x00080000
8614 +#define SAFE_PE_PARTCFG_SBURST_16 0x000c0000
8616 +#define SAFE_PE_PARTSIZE_SCAT 0xffff0000 /* scatter particle ring size */
8617 +#define SAFE_PE_PARTSIZE_GATH 0x0000ffff /* gather particle ring size */
8619 +#define SAFE_CRYPTO_CTRL_3DES 0x00000001 /* enable 3DES support */
8620 +#define SAFE_CRYPTO_CTRL_PKEY 0x00010000 /* enable public key support */
8621 +#define SAFE_CRYPTO_CTRL_RNG 0x00020000 /* enable RNG support */
8623 +#define SAFE_DEVINFO_REV_MIN 0x0000000f /* minor rev for chip */
8624 +#define SAFE_DEVINFO_REV_MAJ 0x000000f0 /* major rev for chip */
8625 +#define SAFE_DEVINFO_REV_MAJ_S 4
8626 +#define SAFE_DEVINFO_DES 0x00000100 /* DES/3DES support present */
8627 +#define SAFE_DEVINFO_ARC4 0x00000200 /* ARC4 support present */
8628 +#define SAFE_DEVINFO_AES 0x00000400 /* AES support present */
8629 +#define SAFE_DEVINFO_MD5 0x00001000 /* MD5 support present */
8630 +#define SAFE_DEVINFO_SHA1 0x00002000 /* SHA-1 support present */
8631 +#define SAFE_DEVINFO_RIPEMD 0x00004000 /* RIPEMD support present */
8632 +#define SAFE_DEVINFO_DEFLATE 0x00010000 /* Deflate support present */
8633 +#define SAFE_DEVINFO_SARAM 0x00100000 /* on-chip SA RAM present */
8634 +#define SAFE_DEVINFO_EMIBUS 0x00200000 /* EMI bus present */
8635 +#define SAFE_DEVINFO_PKEY 0x00400000 /* public key support present */
8636 +#define SAFE_DEVINFO_RNG 0x00800000 /* RNG present */
8638 +#define SAFE_REV(_maj, _min) (((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
8639 +#define SAFE_REV_MAJ(_chiprev) \
8640 + (((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
8641 +#define SAFE_REV_MIN(_chiprev) ((_chiprev) & SAFE_DEVINFO_REV_MIN)
8643 +#define SAFE_PK_FUNC_MULT 0x00000001 /* Multiply function */
8644 +#define SAFE_PK_FUNC_SQUARE 0x00000004 /* Square function */
8645 +#define SAFE_PK_FUNC_ADD 0x00000010 /* Add function */
8646 +#define SAFE_PK_FUNC_SUB 0x00000020 /* Subtract function */
8647 +#define SAFE_PK_FUNC_LSHIFT 0x00000040 /* Left-shift function */
8648 +#define SAFE_PK_FUNC_RSHIFT 0x00000080 /* Right-shift function */
8649 +#define SAFE_PK_FUNC_DIV 0x00000100 /* Divide function */
8650 +#define SAFE_PK_FUNC_CMP 0x00000400 /* Compare function */
8651 +#define SAFE_PK_FUNC_COPY 0x00000800 /* Copy function */
8652 +#define SAFE_PK_FUNC_EXP16 0x00002000 /* Exponentiate (4-bit ACT) */
8653 +#define SAFE_PK_FUNC_EXP4 0x00004000 /* Exponentiate (2-bit ACT) */
8654 +#define SAFE_PK_FUNC_RUN 0x00008000 /* start/status */
8656 +#define SAFE_RNG_STAT_BUSY 0x00000001 /* busy, data not valid */
8658 +#define SAFE_RNG_CTRL_PRE_LFSR 0x00000001 /* enable output pre-LFSR */
8659 +#define SAFE_RNG_CTRL_TST_MODE 0x00000002 /* enable test mode */
8660 +#define SAFE_RNG_CTRL_TST_RUN 0x00000004 /* start test state machine */
8661 +#define SAFE_RNG_CTRL_ENA_RING1 0x00000008 /* test entropy oscillator #1 */
8662 +#define SAFE_RNG_CTRL_ENA_RING2 0x00000010 /* test entropy oscillator #2 */
8663 +#define SAFE_RNG_CTRL_DIS_ALARM 0x00000020 /* disable RNG alarm reports */
8664 +#define SAFE_RNG_CTRL_TST_CLOCK 0x00000040 /* enable test clock */
8665 +#define SAFE_RNG_CTRL_SHORTEN 0x00000080 /* shorten state timers */
8666 +#define SAFE_RNG_CTRL_TST_ALARM 0x00000100 /* simulate alarm state */
8667 +#define SAFE_RNG_CTRL_RST_LFSR 0x00000200 /* reset LFSR */
8670 + * Packet engine descriptor. Note that d_csr is a copy of the
8671 + * SAFE_PE_CSR register and all definitions apply, and d_len
8672 + * is a copy of the SAFE_PE_LEN register and all definitions apply.
8673 + * d_src and d_len may point directly to contiguous data or to a
8674 + * list of ``particle descriptors'' when using scatter/gather i/o.
8677 + u_int32_t d_csr; /* per-packet control/status */
8678 + u_int32_t d_src; /* source address */
8679 + u_int32_t d_dst; /* destination address */
8680 + u_int32_t d_sa; /* SA address */
8681 + u_int32_t d_len; /* length, bypass, status */
8685 + * Scatter/Gather particle descriptor.
8687 + * NB: scatter descriptors do not specify a size; this is fixed
8688 + * by the setting of the SAFE_PE_PARTCFG register.
8690 +struct safe_pdesc {
8691 + u_int32_t pd_addr; /* particle address */
8692 +#ifdef __BIG_ENDIAN
8693 + u_int16_t pd_flags; /* control word */
8694 + u_int16_t pd_size; /* particle size (bytes) */
8696 + u_int16_t pd_flags; /* control word */
8697 + u_int16_t pd_size; /* particle size (bytes) */
8701 +#define SAFE_PD_READY 0x0001 /* ready for processing */
8702 +#define SAFE_PD_DONE 0x0002 /* h/w completed processing */
8705 + * Security Association (SA) Record (Rev 1). One of these is
8706 + * required for each operation processed by the packet engine.
8708 +struct safe_sarec {
8709 + u_int32_t sa_cmd0;
8710 + u_int32_t sa_cmd1;
8711 + u_int32_t sa_resv0;
8712 + u_int32_t sa_resv1;
8713 + u_int32_t sa_key[8]; /* DES/3DES/AES key */
8714 + u_int32_t sa_indigest[5]; /* inner digest */
8715 + u_int32_t sa_outdigest[5]; /* outer digest */
8716 + u_int32_t sa_spi; /* SPI */
8717 + u_int32_t sa_seqnum; /* sequence number */
8718 + u_int32_t sa_seqmask[2]; /* sequence number mask */
8719 + u_int32_t sa_resv2;
8720 + u_int32_t sa_staterec; /* address of state record */
8721 + u_int32_t sa_resv3[2];
8722 + u_int32_t sa_samgmt0; /* SA management field 0 */
8723 + u_int32_t sa_samgmt1; /* SA management field 0 */
8726 +#define SAFE_SA_CMD0_OP 0x00000007 /* operation code */
8727 +#define SAFE_SA_CMD0_OP_CRYPT 0x00000000 /* encrypt/decrypt (basic) */
8728 +#define SAFE_SA_CMD0_OP_BOTH 0x00000001 /* encrypt-hash/hash-decrypto */
8729 +#define SAFE_SA_CMD0_OP_HASH 0x00000003 /* hash (outbound-only) */
8730 +#define SAFE_SA_CMD0_OP_ESP 0x00000000 /* ESP in/out (proto) */
8731 +#define SAFE_SA_CMD0_OP_AH 0x00000001 /* AH in/out (proto) */
8732 +#define SAFE_SA_CMD0_INBOUND 0x00000008 /* inbound operation */
8733 +#define SAFE_SA_CMD0_OUTBOUND 0x00000000 /* outbound operation */
8734 +#define SAFE_SA_CMD0_GROUP 0x00000030 /* operation group */
8735 +#define SAFE_SA_CMD0_BASIC 0x00000000 /* basic operation */
8736 +#define SAFE_SA_CMD0_PROTO 0x00000010 /* protocol/packet operation */
8737 +#define SAFE_SA_CMD0_BUNDLE 0x00000020 /* bundled operation (resvd) */
8738 +#define SAFE_SA_CMD0_PAD 0x000000c0 /* crypto pad method */
8739 +#define SAFE_SA_CMD0_PAD_IPSEC 0x00000000 /* IPsec padding */
8740 +#define SAFE_SA_CMD0_PAD_PKCS7 0x00000040 /* PKCS#7 padding */
8741 +#define SAFE_SA_CMD0_PAD_CONS 0x00000080 /* constant padding */
8742 +#define SAFE_SA_CMD0_PAD_ZERO 0x000000c0 /* zero padding */
8743 +#define SAFE_SA_CMD0_CRYPT_ALG 0x00000f00 /* symmetric crypto algorithm */
8744 +#define SAFE_SA_CMD0_DES 0x00000000 /* DES crypto algorithm */
8745 +#define SAFE_SA_CMD0_3DES 0x00000100 /* 3DES crypto algorithm */
8746 +#define SAFE_SA_CMD0_AES 0x00000300 /* AES crypto algorithm */
8747 +#define SAFE_SA_CMD0_CRYPT_NULL 0x00000f00 /* null crypto algorithm */
8748 +#define SAFE_SA_CMD0_HASH_ALG 0x0000f000 /* hash algorithm */
8749 +#define SAFE_SA_CMD0_MD5 0x00000000 /* MD5 hash algorithm */
8750 +#define SAFE_SA_CMD0_SHA1 0x00001000 /* SHA-1 hash algorithm */
8751 +#define SAFE_SA_CMD0_HASH_NULL 0x0000f000 /* null hash algorithm */
8752 +#define SAFE_SA_CMD0_HDR_PROC 0x00080000 /* header processing */
8753 +#define SAFE_SA_CMD0_IBUSID 0x00300000 /* input bus id */
8754 +#define SAFE_SA_CMD0_IPCI 0x00100000 /* PCI input bus id */
8755 +#define SAFE_SA_CMD0_OBUSID 0x00c00000 /* output bus id */
8756 +#define SAFE_SA_CMD0_OPCI 0x00400000 /* PCI output bus id */
8757 +#define SAFE_SA_CMD0_IVLD 0x03000000 /* IV loading */
8758 +#define SAFE_SA_CMD0_IVLD_NONE 0x00000000 /* IV no load (reuse) */
8759 +#define SAFE_SA_CMD0_IVLD_IBUF 0x01000000 /* IV load from input buffer */
8760 +#define SAFE_SA_CMD0_IVLD_STATE 0x02000000 /* IV load from state */
8761 +#define SAFE_SA_CMD0_HSLD 0x0c000000 /* hash state loading */
8762 +#define SAFE_SA_CMD0_HSLD_SA 0x00000000 /* hash state load from SA */
8763 +#define SAFE_SA_CMD0_HSLD_STATE 0x08000000 /* hash state load from state */
8764 +#define SAFE_SA_CMD0_HSLD_NONE 0x0c000000 /* hash state no load */
8765 +#define SAFE_SA_CMD0_SAVEIV 0x10000000 /* save IV */
8766 +#define SAFE_SA_CMD0_SAVEHASH 0x20000000 /* save hash state */
8767 +#define SAFE_SA_CMD0_IGATHER 0x40000000 /* input gather */
8768 +#define SAFE_SA_CMD0_OSCATTER 0x80000000 /* output scatter */
8770 +#define SAFE_SA_CMD1_HDRCOPY 0x00000002 /* copy header to output */
8771 +#define SAFE_SA_CMD1_PAYCOPY 0x00000004 /* copy payload to output */
8772 +#define SAFE_SA_CMD1_PADCOPY 0x00000008 /* copy pad to output */
8773 +#define SAFE_SA_CMD1_IPV4 0x00000000 /* IPv4 protocol */
8774 +#define SAFE_SA_CMD1_IPV6 0x00000010 /* IPv6 protocol */
8775 +#define SAFE_SA_CMD1_MUTABLE 0x00000020 /* mutable bit processing */
8776 +#define SAFE_SA_CMD1_SRBUSID 0x000000c0 /* state record bus id */
8777 +#define SAFE_SA_CMD1_SRPCI 0x00000040 /* state record from PCI */
8778 +#define SAFE_SA_CMD1_CRMODE 0x00000300 /* crypto mode */
8779 +#define SAFE_SA_CMD1_ECB 0x00000000 /* ECB crypto mode */
8780 +#define SAFE_SA_CMD1_CBC 0x00000100 /* CBC crypto mode */
8781 +#define SAFE_SA_CMD1_OFB 0x00000200 /* OFB crypto mode */
8782 +#define SAFE_SA_CMD1_CFB 0x00000300 /* CFB crypto mode */
8783 +#define SAFE_SA_CMD1_CRFEEDBACK 0x00000c00 /* crypto feedback mode */
8784 +#define SAFE_SA_CMD1_64BIT 0x00000000 /* 64-bit crypto feedback */
8785 +#define SAFE_SA_CMD1_8BIT 0x00000400 /* 8-bit crypto feedback */
8786 +#define SAFE_SA_CMD1_1BIT 0x00000800 /* 1-bit crypto feedback */
8787 +#define SAFE_SA_CMD1_128BIT 0x00000c00 /* 128-bit crypto feedback */
8788 +#define SAFE_SA_CMD1_OPTIONS 0x00001000 /* HMAC/options mutable bit */
8789 +#define SAFE_SA_CMD1_HMAC SAFE_SA_CMD1_OPTIONS
8790 +#define SAFE_SA_CMD1_SAREV1 0x00008000 /* SA Revision 1 */
8791 +#define SAFE_SA_CMD1_OFFSET 0x00ff0000 /* hash/crypto offset(dwords) */
8792 +#define SAFE_SA_CMD1_OFFSET_S 16
8793 +#define SAFE_SA_CMD1_AESKEYLEN 0x0f000000 /* AES key length */
8794 +#define SAFE_SA_CMD1_AES128 0x02000000 /* 128-bit AES key */
8795 +#define SAFE_SA_CMD1_AES192 0x03000000 /* 192-bit AES key */
8796 +#define SAFE_SA_CMD1_AES256 0x04000000 /* 256-bit AES key */
8799 + * Security Associate State Record (Rev 1).
8801 +struct safe_sastate {
8802 + u_int32_t sa_saved_iv[4]; /* saved IV (DES/3DES/AES) */
8803 + u_int32_t sa_saved_hashbc; /* saved hash byte count */
8804 + u_int32_t sa_saved_indigest[5]; /* saved inner digest */
8806 +#endif /* _SAFE_SAFEREG_H_ */
8808 +++ b/crypto/ocf/safe/safevar.h
8811 + * The linux port of this code done by David McCullough
8812 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
8813 + * The license and original author are listed below.
8815 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
8816 + * Copyright (c) 2003 Global Technology Associates, Inc.
8817 + * All rights reserved.
8819 + * Redistribution and use in source and binary forms, with or without
8820 + * modification, are permitted provided that the following conditions
8822 + * 1. Redistributions of source code must retain the above copyright
8823 + * notice, this list of conditions and the following disclaimer.
8824 + * 2. Redistributions in binary form must reproduce the above copyright
8825 + * notice, this list of conditions and the following disclaimer in the
8826 + * documentation and/or other materials provided with the distribution.
8828 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
8829 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8830 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8831 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8832 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8833 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8834 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8835 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8836 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8837 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8840 + * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
8842 +#ifndef _SAFE_SAFEVAR_H_
8843 +#define _SAFE_SAFEVAR_H_
8845 +/* Maximum queue length */
8846 +#ifndef SAFE_MAX_NQUEUE
8847 +#define SAFE_MAX_NQUEUE 60
8850 +#define SAFE_MAX_PART 64 /* Maximum scatter/gather depth */
8851 +#define SAFE_DMA_BOUNDARY 0 /* No boundary for source DMA ops */
8852 +#define SAFE_MAX_DSIZE 2048 /* MCLBYTES Fixed scatter particle size */
8853 +#define SAFE_MAX_SSIZE 0x0ffff /* Maximum gather particle size */
8854 +#define SAFE_MAX_DMA 0xfffff /* Maximum PE operand size (20 bits) */
8855 +/* total src+dst particle descriptors */
8856 +#define SAFE_TOTAL_DPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
8857 +#define SAFE_TOTAL_SPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
8859 +#define SAFE_RNG_MAXBUFSIZ 128 /* 32-bit words */
8861 +#define SAFE_CARD(sid) (((sid) & 0xf0000000) >> 28)
8862 +#define SAFE_SESSION(sid) ( (sid) & 0x0fffffff)
8863 +#define SAFE_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
8865 +#define SAFE_DEF_RTY 0xff /* PCI Retry Timeout */
8866 +#define SAFE_DEF_TOUT 0xff /* PCI TRDY Timeout */
8867 +#define SAFE_DEF_CACHELINE 0x01 /* Cache Line setting */
8871 + * State associated with the allocation of each chunk
8872 + * of memory setup for DMA.
8874 +struct safe_dma_alloc {
8875 + dma_addr_t dma_paddr;
8880 + * Cryptographic operand state. One of these exists for each
8881 + * source and destination operand passed in from the crypto
8882 + * subsystem. When possible source and destination operands
8883 + * refer to the same memory. More often they are distinct.
8884 + * We track the virtual address of each operand as well as
8885 + * where each is mapped for DMA.
8887 +struct safe_operand {
8889 + struct sk_buff *skb;
8893 + int mapsize; /* total number of bytes in segs */
8895 + dma_addr_t ds_addr;
8898 + } segs[SAFE_MAX_PART];
8903 + * Packet engine ring entry and cryptographic operation state.
8904 + * The packet engine requires a ring of descriptors that contain
8905 + * pointers to various cryptographic state. However the ring
8906 + * configuration register allows you to specify an arbitrary size
8907 + * for ring entries. We use this feature to collect most of the
8908 + * state for each cryptographic request into one spot. Other than
8909 + * ring entries only the ``particle descriptors'' (scatter/gather
8910 + * lists) and the actual operand data are kept separate. The
8911 + * particle descriptors must also be organized in rings. The
8912 + * operand data can be located aribtrarily (modulo alignment constraints).
8914 + * Note that the descriptor ring is mapped onto the PCI bus so
8915 + * the hardware can DMA data. This means the entire ring must be
8918 +struct safe_ringentry {
8919 + struct safe_desc re_desc; /* command descriptor */
8920 + struct safe_sarec re_sa; /* SA record */
8921 + struct safe_sastate re_sastate; /* SA state record */
8923 + struct cryptop *re_crp; /* crypto operation */
8925 + struct safe_operand re_src; /* source operand */
8926 + struct safe_operand re_dst; /* destination operand */
8928 + int re_sesn; /* crypto session ID */
8930 +#define SAFE_QFLAGS_COPYOUTIV 0x1 /* copy back on completion */
8931 +#define SAFE_QFLAGS_COPYOUTICV 0x2 /* copy back on completion */
8934 +#define re_src_skb re_src.u.skb
8935 +#define re_src_io re_src.u.io
8936 +#define re_src_map re_src.map
8937 +#define re_src_nsegs re_src.nsegs
8938 +#define re_src_segs re_src.segs
8939 +#define re_src_mapsize re_src.mapsize
8941 +#define re_dst_skb re_dst.u.skb
8942 +#define re_dst_io re_dst.u.io
8943 +#define re_dst_map re_dst.map
8944 +#define re_dst_nsegs re_dst.nsegs
8945 +#define re_dst_segs re_dst.segs
8946 +#define re_dst_mapsize re_dst.mapsize
8948 +struct rndstate_test;
8950 +struct safe_session {
8951 + u_int32_t ses_used;
8952 + u_int32_t ses_klen; /* key length in bits */
8953 + u_int32_t ses_key[8]; /* DES/3DES/AES key */
8954 + u_int32_t ses_mlen; /* hmac length in bytes */
8955 + u_int32_t ses_hminner[5]; /* hmac inner state */
8956 + u_int32_t ses_hmouter[5]; /* hmac outer state */
8957 + u_int32_t ses_iv[4]; /* DES/3DES/AES iv */
8961 + struct list_head pkq_list;
8962 + struct cryptkop *pkq_krp;
8965 +struct safe_softc {
8966 + softc_device_decl sc_dev;
8969 + struct pci_dev *sc_pcidev;
8970 + ocf_iomem_t sc_base_addr;
8972 + u_int sc_chiprev; /* major/minor chip revision */
8973 + int sc_flags; /* device specific flags */
8974 +#define SAFE_FLAGS_KEY 0x01 /* has key accelerator */
8975 +#define SAFE_FLAGS_RNG 0x02 /* hardware rng */
8977 + int sc_needwakeup; /* notify crypto layer */
8978 + int32_t sc_cid; /* crypto tag */
8980 + struct safe_dma_alloc sc_ringalloc; /* PE ring allocation state */
8981 + struct safe_ringentry *sc_ring; /* PE ring */
8982 + struct safe_ringentry *sc_ringtop; /* PE ring top */
8983 + struct safe_ringentry *sc_front; /* next free entry */
8984 + struct safe_ringentry *sc_back; /* next pending entry */
8985 + int sc_nqchip; /* # passed to chip */
8986 + spinlock_t sc_ringmtx; /* PE ring lock */
8987 + struct safe_pdesc *sc_spring; /* src particle ring */
8988 + struct safe_pdesc *sc_springtop; /* src particle ring top */
8989 + struct safe_pdesc *sc_spfree; /* next free src particle */
8990 + struct safe_dma_alloc sc_spalloc; /* src particle ring state */
8991 + struct safe_pdesc *sc_dpring; /* dest particle ring */
8992 + struct safe_pdesc *sc_dpringtop; /* dest particle ring top */
8993 + struct safe_pdesc *sc_dpfree; /* next free dest particle */
8994 + struct safe_dma_alloc sc_dpalloc; /* dst particle ring state */
8995 + int sc_nsessions; /* # of sessions */
8996 + struct safe_session *sc_sessions; /* sessions */
8998 + struct timer_list sc_pkto; /* PK polling */
8999 + spinlock_t sc_pkmtx; /* PK lock */
9000 + struct list_head sc_pkq; /* queue of PK requests */
9001 + struct safe_pkq *sc_pkq_cur; /* current processing request */
9002 + u_int32_t sc_pk_reslen, sc_pk_resoff;
9004 + int sc_max_dsize; /* maximum safe DMA size */
9006 +#endif /* __KERNEL__ */
9008 +struct safe_stats {
9009 + u_int64_t st_ibytes;
9010 + u_int64_t st_obytes;
9011 + u_int32_t st_ipackets;
9012 + u_int32_t st_opackets;
9013 + u_int32_t st_invalid; /* invalid argument */
9014 + u_int32_t st_badsession; /* invalid session id */
9015 + u_int32_t st_badflags; /* flags indicate !(mbuf | uio) */
9016 + u_int32_t st_nodesc; /* op submitted w/o descriptors */
9017 + u_int32_t st_badalg; /* unsupported algorithm */
9018 + u_int32_t st_ringfull; /* PE descriptor ring full */
9019 + u_int32_t st_peoperr; /* PE marked error */
9020 + u_int32_t st_dmaerr; /* PE DMA error */
9021 + u_int32_t st_bypasstoobig; /* bypass > 96 bytes */
9022 + u_int32_t st_skipmismatch; /* enc part begins before auth part */
9023 + u_int32_t st_lenmismatch; /* enc length different auth length */
9024 + u_int32_t st_coffmisaligned; /* crypto offset not 32-bit aligned */
9025 + u_int32_t st_cofftoobig; /* crypto offset > 255 words */
9026 + u_int32_t st_iovmisaligned; /* iov op not aligned */
9027 + u_int32_t st_iovnotuniform; /* iov op not suitable */
9028 + u_int32_t st_unaligned; /* unaligned src caused copy */
9029 + u_int32_t st_notuniform; /* non-uniform src caused copy */
9030 + u_int32_t st_nomap; /* bus_dmamap_create failed */
9031 + u_int32_t st_noload; /* bus_dmamap_load_* failed */
9032 + u_int32_t st_nombuf; /* MGET* failed */
9033 + u_int32_t st_nomcl; /* MCLGET* failed */
9034 + u_int32_t st_maxqchip; /* max mcr1 ops out for processing */
9035 + u_int32_t st_rng; /* RNG requests */
9036 + u_int32_t st_rngalarm; /* RNG alarm requests */
9037 + u_int32_t st_noicvcopy; /* ICV data copies suppressed */
9039 +#endif /* _SAFE_SAFEVAR_H_ */
9041 +++ b/crypto/ocf/crypto.c
9044 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
9045 + * Copyright (C) 2006-2007 David McCullough
9046 + * Copyright (C) 2004-2005 Intel Corporation.
9047 + * The license and original author are listed below.
9049 + * Redistribution and use in source and binary forms, with or without
9050 + * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
9052 + * modification, are permitted provided that the following conditions
9054 + * 1. Redistributions of source code must retain the above copyright
9055 + * notice, this list of conditions and the following disclaimer.
9056 + * 2. Redistributions in binary form must reproduce the above copyright
9057 + * notice, this list of conditions and the following disclaimer in the
9058 + * documentation and/or other materials provided with the distribution.
9060 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
9061 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
9062 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
9063 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
9064 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
9065 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
9066 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
9067 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9068 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
9069 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9073 +#include <sys/cdefs.h>
9074 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
9078 + * Cryptographic Subsystem.
9080 + * This code is derived from the Openbsd Cryptographic Framework (OCF)
9081 + * that has the copyright shown below. Very little of the original
9085 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
9087 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
9088 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
9089 + * supported the development of this code.
9091 + * Copyright (c) 2000, 2001 Angelos D. Keromytis
9093 + * Permission to use, copy, and modify this software with or without fee
9094 + * is hereby granted, provided that this entire notice is included in
9095 + * all source code copies of any software which is or includes a copy or
9096 + * modification of this software.
9098 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
9099 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
9100 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
9101 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
9104 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
9108 +#ifndef AUTOCONF_INCLUDED
9109 +#include <linux/config.h>
9111 +#include <linux/module.h>
9112 +#include <linux/init.h>
9113 +#include <linux/list.h>
9114 +#include <linux/slab.h>
9115 +#include <linux/wait.h>
9116 +#include <linux/sched.h>
9117 +#include <linux/spinlock.h>
9118 +#include <linux/version.h>
9119 +#include <cryptodev.h>
9122 + * keep track of whether or not we have been initialised, a big
9123 + * issue if we are linked into the kernel and a driver gets started before
9126 +static int crypto_initted = 0;
9129 + * Crypto drivers register themselves by allocating a slot in the
9130 + * crypto_drivers table with crypto_get_driverid() and then registering
9131 + * each algorithm they support with crypto_register() and crypto_kregister().
9135 + * lock on driver table
9136 + * we track its state as spin_is_locked does not do anything on non-SMP boxes
9138 +static spinlock_t crypto_drivers_lock;
9139 +static int crypto_drivers_locked; /* for non-SMP boxes */
9141 +#define CRYPTO_DRIVER_LOCK() \
9143 + spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
9144 + crypto_drivers_locked = 1; \
9145 + dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
9147 +#define CRYPTO_DRIVER_UNLOCK() \
9149 + dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
9150 + crypto_drivers_locked = 0; \
9151 + spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
9153 +#define CRYPTO_DRIVER_ASSERT() \
9155 + if (!crypto_drivers_locked) { \
9156 + dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
9161 + * Crypto device/driver capabilities structure.
9163 + * Synchronization:
9164 + * (d) - protected by CRYPTO_DRIVER_LOCK()
9165 + * (q) - protected by CRYPTO_Q_LOCK()
9166 + * Not tagged fields are read-only.
9169 + device_t cc_dev; /* (d) device/driver */
9170 + u_int32_t cc_sessions; /* (d) # of sessions */
9171 + u_int32_t cc_koperations; /* (d) # os asym operations */
9173 + * Largest possible operator length (in bits) for each type of
9174 + * encryption algorithm. XXX not used
9176 + u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
9177 + u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
9178 + u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
9180 + int cc_flags; /* (d) flags */
9181 +#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
9182 + int cc_qblocked; /* (q) symmetric q blocked */
9183 + int cc_kqblocked; /* (q) asymmetric q blocked */
9185 +static struct cryptocap *crypto_drivers = NULL;
9186 +static int crypto_drivers_num = 0;
9189 + * There are two queues for crypto requests; one for symmetric (e.g.
9190 + * cipher) operations and one for asymmetric (e.g. MOD)operations.
9191 + * A single mutex is used to lock access to both queues. We could
9192 + * have one per-queue but having one simplifies handling of block/unblock
9195 +static int crp_sleep = 0;
9196 +static LIST_HEAD(crp_q); /* request queues */
9197 +static LIST_HEAD(crp_kq);
9199 +static spinlock_t crypto_q_lock;
9201 +int crypto_all_qblocked = 0; /* protect with Q_LOCK */
9202 +module_param(crypto_all_qblocked, int, 0444);
9203 +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
9205 +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
9206 +module_param(crypto_all_kqblocked, int, 0444);
9207 +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
9209 +#define CRYPTO_Q_LOCK() \
9211 + spin_lock_irqsave(&crypto_q_lock, q_flags); \
9212 + dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
9214 +#define CRYPTO_Q_UNLOCK() \
9216 + dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
9217 + spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
9221 + * There are two queues for processing completed crypto requests; one
9222 + * for the symmetric and one for the asymmetric ops. We only need one
9223 + * but have two to avoid type futzing (cryptop vs. cryptkop). A single
9224 + * mutex is used to lock access to both queues. Note that this lock
9225 + * must be separate from the lock on request queues to insure driver
9226 + * callbacks don't generate lock order reversals.
9228 +static LIST_HEAD(crp_ret_q); /* callback queues */
9229 +static LIST_HEAD(crp_ret_kq);
9231 +static spinlock_t crypto_ret_q_lock;
9232 +#define CRYPTO_RETQ_LOCK() \
9234 + spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
9235 + dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
9237 +#define CRYPTO_RETQ_UNLOCK() \
9239 + dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
9240 + spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
9242 +#define CRYPTO_RETQ_EMPTY() (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
9244 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
9245 +static kmem_cache_t *cryptop_zone;
9246 +static kmem_cache_t *cryptodesc_zone;
9248 +static struct kmem_cache *cryptop_zone;
9249 +static struct kmem_cache *cryptodesc_zone;
9252 +#define debug crypto_debug
9253 +int crypto_debug = 0;
9254 +module_param(crypto_debug, int, 0644);
9255 +MODULE_PARM_DESC(crypto_debug, "Enable debug");
9256 +EXPORT_SYMBOL(crypto_debug);
9259 + * Maximum number of outstanding crypto requests before we start
9260 + * failing requests. We need this to prevent DOS when too many
9261 + * requests are arriving for us to keep up. Otherwise we will
9262 + * run the system out of memory. Since crypto is slow, we are
9263 + * usually the bottleneck that needs to say, enough is enough.
9265 + * We cannot print errors when this condition occurs, we are already too
9266 + * slow, printing anything will just kill us
9269 +static int crypto_q_cnt = 0;
9270 +module_param(crypto_q_cnt, int, 0444);
9271 +MODULE_PARM_DESC(crypto_q_cnt,
9272 + "Current number of outstanding crypto requests");
9274 +static int crypto_q_max = 1000;
9275 +module_param(crypto_q_max, int, 0644);
9276 +MODULE_PARM_DESC(crypto_q_max,
9277 + "Maximum number of outstanding crypto requests");
9279 +#define bootverbose crypto_verbose
9280 +static int crypto_verbose = 0;
9281 +module_param(crypto_verbose, int, 0644);
9282 +MODULE_PARM_DESC(crypto_verbose,
9283 + "Enable verbose crypto startup");
9285 +int crypto_usercrypto = 1; /* userland may do crypto reqs */
9286 +module_param(crypto_usercrypto, int, 0644);
9287 +MODULE_PARM_DESC(crypto_usercrypto,
9288 + "Enable/disable user-mode access to crypto support");
9290 +int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
9291 +module_param(crypto_userasymcrypto, int, 0644);
9292 +MODULE_PARM_DESC(crypto_userasymcrypto,
9293 + "Enable/disable user-mode access to asymmetric crypto support");
9295 +int crypto_devallowsoft = 0; /* only use hardware crypto */
9296 +module_param(crypto_devallowsoft, int, 0644);
9297 +MODULE_PARM_DESC(crypto_devallowsoft,
9298 + "Enable/disable use of software crypto support");
9300 +static pid_t cryptoproc = (pid_t) -1;
9301 +static struct completion cryptoproc_exited;
9302 +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
9303 +static pid_t cryptoretproc = (pid_t) -1;
9304 +static struct completion cryptoretproc_exited;
9305 +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
9307 +static int crypto_proc(void *arg);
9308 +static int crypto_ret_proc(void *arg);
9309 +static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
9310 +static int crypto_kinvoke(struct cryptkop *krp, int flags);
9311 +static void crypto_exit(void);
9312 +static int crypto_init(void);
9314 +static struct cryptostats cryptostats;
9316 +static struct cryptocap *
9317 +crypto_checkdriver(u_int32_t hid)
9319 + if (crypto_drivers == NULL)
9321 + return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
9325 + * Compare a driver's list of supported algorithms against another
9326 + * list; return non-zero if all algorithms are supported.
9329 +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
9331 + const struct cryptoini *cr;
9333 + /* See if all the algorithms are supported. */
9334 + for (cr = cri; cr; cr = cr->cri_next)
9335 + if (cap->cc_alg[cr->cri_alg] == 0)
9341 + * Select a driver for a new session that supports the specified
9342 + * algorithms and, optionally, is constrained according to the flags.
9343 + * The algorithm we use here is pretty stupid; just use the
9344 + * first driver that supports all the algorithms we need. If there
9345 + * are multiple drivers we choose the driver with the fewest active
9346 + * sessions. We prefer hardware-backed drivers to software ones.
9348 + * XXX We need more smarts here (in real life too, but that's
9349 + * XXX another story altogether).
9351 +static struct cryptocap *
9352 +crypto_select_driver(const struct cryptoini *cri, int flags)
9354 + struct cryptocap *cap, *best;
9357 + CRYPTO_DRIVER_ASSERT();
9360 + * Look first for hardware crypto devices if permitted.
9362 + if (flags & CRYPTOCAP_F_HARDWARE)
9363 + match = CRYPTOCAP_F_HARDWARE;
9365 + match = CRYPTOCAP_F_SOFTWARE;
9368 + for (hid = 0; hid < crypto_drivers_num; hid++) {
9369 + cap = &crypto_drivers[hid];
9371 + * If it's not initialized, is in the process of
9372 + * going away, or is not appropriate (hardware
9373 + * or software based on match), then skip.
9375 + if (cap->cc_dev == NULL ||
9376 + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
9377 + (cap->cc_flags & match) == 0)
9380 + /* verify all the algorithms are supported. */
9381 + if (driver_suitable(cap, cri)) {
9382 + if (best == NULL ||
9383 + cap->cc_sessions < best->cc_sessions)
9389 + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
9390 + /* sort of an Algol 68-style for loop */
9391 + match = CRYPTOCAP_F_SOFTWARE;
9398 + * Create a new session. The crid argument specifies a crypto
9399 + * driver to use or constraints on a driver to select (hardware
9400 + * only, software only, either). Whatever driver is selected
9401 + * must be capable of the requested crypto algorithms.
9404 +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
9406 + struct cryptocap *cap;
9407 + u_int32_t hid, lid;
9409 + unsigned long d_flags;
9411 + CRYPTO_DRIVER_LOCK();
9412 + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9414 + * Use specified driver; verify it is capable.
9416 + cap = crypto_checkdriver(crid);
9417 + if (cap != NULL && !driver_suitable(cap, cri))
9421 + * No requested driver; select based on crid flags.
9423 + cap = crypto_select_driver(cri, crid);
9425 + * if NULL then can't do everything in one session.
9426 + * XXX Fix this. We need to inject a "virtual" session
9427 + * XXX layer right about here.
9430 + if (cap != NULL) {
9431 + /* Call the driver initialization routine. */
9432 + hid = cap - crypto_drivers;
9433 + lid = hid; /* Pass the driver ID. */
9434 + cap->cc_sessions++;
9435 + CRYPTO_DRIVER_UNLOCK();
9436 + err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
9437 + CRYPTO_DRIVER_LOCK();
9439 + (*sid) = (cap->cc_flags & 0xff000000)
9440 + | (hid & 0x00ffffff);
9442 + (*sid) |= (lid & 0xffffffff);
9444 + cap->cc_sessions--;
9447 + CRYPTO_DRIVER_UNLOCK();
9452 +crypto_remove(struct cryptocap *cap)
9454 + CRYPTO_DRIVER_ASSERT();
9455 + if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
9456 + bzero(cap, sizeof(*cap));
9460 + * Delete an existing session (or a reserved session on an unregistered
9464 +crypto_freesession(u_int64_t sid)
9466 + struct cryptocap *cap;
9469 + unsigned long d_flags;
9471 + dprintk("%s()\n", __FUNCTION__);
9472 + CRYPTO_DRIVER_LOCK();
9474 + if (crypto_drivers == NULL) {
9479 + /* Determine two IDs. */
9480 + hid = CRYPTO_SESID2HID(sid);
9482 + if (hid >= crypto_drivers_num) {
9483 + dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
9487 + cap = &crypto_drivers[hid];
9489 + if (cap->cc_dev) {
9490 + CRYPTO_DRIVER_UNLOCK();
9491 + /* Call the driver cleanup routine, if available, unlocked. */
9492 + err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
9493 + CRYPTO_DRIVER_LOCK();
9496 + if (cap->cc_sessions)
9497 + cap->cc_sessions--;
9499 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
9500 + crypto_remove(cap);
9503 + CRYPTO_DRIVER_UNLOCK();
9508 + * Return an unused driver id. Used by drivers prior to registering
9509 + * support for the algorithms they handle.
9512 +crypto_get_driverid(device_t dev, int flags)
9514 + struct cryptocap *newdrv;
9516 + unsigned long d_flags;
9518 + if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9519 + printf("%s: no flags specified when registering driver\n",
9520 + device_get_nameunit(dev));
9524 + CRYPTO_DRIVER_LOCK();
9526 + for (i = 0; i < crypto_drivers_num; i++) {
9527 + if (crypto_drivers[i].cc_dev == NULL &&
9528 + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
9533 + /* Out of entries, allocate some more. */
9534 + if (i == crypto_drivers_num) {
9535 + /* Be careful about wrap-around. */
9536 + if (2 * crypto_drivers_num <= crypto_drivers_num) {
9537 + CRYPTO_DRIVER_UNLOCK();
9538 + printk("crypto: driver count wraparound!\n");
9542 + newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
9544 + if (newdrv == NULL) {
9545 + CRYPTO_DRIVER_UNLOCK();
9546 + printk("crypto: no space to expand driver table!\n");
9550 + memcpy(newdrv, crypto_drivers,
9551 + crypto_drivers_num * sizeof(struct cryptocap));
9552 + memset(&newdrv[crypto_drivers_num], 0,
9553 + crypto_drivers_num * sizeof(struct cryptocap));
9555 + crypto_drivers_num *= 2;
9557 + kfree(crypto_drivers);
9558 + crypto_drivers = newdrv;
9561 + /* NB: state is zero'd on free */
9562 + crypto_drivers[i].cc_sessions = 1; /* Mark */
9563 + crypto_drivers[i].cc_dev = dev;
9564 + crypto_drivers[i].cc_flags = flags;
9566 + printf("crypto: assign %s driver id %u, flags %u\n",
9567 + device_get_nameunit(dev), i, flags);
9569 + CRYPTO_DRIVER_UNLOCK();
9575 + * Lookup a driver by name. We match against the full device
9576 + * name and unit, and against just the name. The latter gives
9577 + * us a simple widlcarding by device name. On success return the
9578 + * driver/hardware identifier; otherwise return -1.
9581 +crypto_find_driver(const char *match)
9583 + int i, len = strlen(match);
9584 + unsigned long d_flags;
9586 + CRYPTO_DRIVER_LOCK();
9587 + for (i = 0; i < crypto_drivers_num; i++) {
9588 + device_t dev = crypto_drivers[i].cc_dev;
9589 + if (dev == NULL ||
9590 + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
9592 + if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
9593 + strncmp(match, device_get_name(dev), len) == 0)
9596 + CRYPTO_DRIVER_UNLOCK();
9597 + return i < crypto_drivers_num ? i : -1;
9601 + * Return the device_t for the specified driver or NULL
9602 + * if the driver identifier is invalid.
9605 +crypto_find_device_byhid(int hid)
9607 + struct cryptocap *cap = crypto_checkdriver(hid);
9608 + return cap != NULL ? cap->cc_dev : NULL;
9612 + * Return the device/driver capabilities.
9615 +crypto_getcaps(int hid)
9617 + struct cryptocap *cap = crypto_checkdriver(hid);
9618 + return cap != NULL ? cap->cc_flags : 0;
9622 + * Register support for a key-related algorithm. This routine
9623 + * is called once for each algorithm supported a driver.
9626 +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
9628 + struct cryptocap *cap;
9630 + unsigned long d_flags;
9632 + dprintk("%s()\n", __FUNCTION__);
9633 + CRYPTO_DRIVER_LOCK();
9635 + cap = crypto_checkdriver(driverid);
9636 + if (cap != NULL &&
9637 + (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
9639 + * XXX Do some performance testing to determine placing.
9640 + * XXX We probably need an auxiliary data structure that
9641 + * XXX describes relative performances.
9644 + cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
9646 + printf("crypto: %s registers key alg %u flags %u\n"
9647 + , device_get_nameunit(cap->cc_dev)
9655 + CRYPTO_DRIVER_UNLOCK();
9660 + * Register support for a non-key-related algorithm. This routine
9661 + * is called once for each such algorithm supported by a driver.
9664 +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
9667 + struct cryptocap *cap;
9669 + unsigned long d_flags;
9671 + dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
9672 + driverid, alg, maxoplen, flags);
9674 + CRYPTO_DRIVER_LOCK();
9676 + cap = crypto_checkdriver(driverid);
9677 + /* NB: algorithms are in the range [1..max] */
9678 + if (cap != NULL &&
9679 + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
9681 + * XXX Do some performance testing to determine placing.
9682 + * XXX We probably need an auxiliary data structure that
9683 + * XXX describes relative performances.
9686 + cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
9687 + cap->cc_max_op_len[alg] = maxoplen;
9689 + printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
9690 + , device_get_nameunit(cap->cc_dev)
9695 + cap->cc_sessions = 0; /* Unmark */
9700 + CRYPTO_DRIVER_UNLOCK();
9705 +driver_finis(struct cryptocap *cap)
9707 + u_int32_t ses, kops;
9709 + CRYPTO_DRIVER_ASSERT();
9711 + ses = cap->cc_sessions;
9712 + kops = cap->cc_koperations;
9713 + bzero(cap, sizeof(*cap));
9714 + if (ses != 0 || kops != 0) {
9716 + * If there are pending sessions,
9717 + * just mark as invalid.
9719 + cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
9720 + cap->cc_sessions = ses;
9721 + cap->cc_koperations = kops;
9726 + * Unregister a crypto driver. If there are pending sessions using it,
9727 + * leave enough information around so that subsequent calls using those
9728 + * sessions will correctly detect the driver has been unregistered and
9729 + * reroute requests.
9732 +crypto_unregister(u_int32_t driverid, int alg)
9734 + struct cryptocap *cap;
9736 + unsigned long d_flags;
9738 + dprintk("%s()\n", __FUNCTION__);
9739 + CRYPTO_DRIVER_LOCK();
9741 + cap = crypto_checkdriver(driverid);
9742 + if (cap != NULL &&
9743 + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
9744 + cap->cc_alg[alg] != 0) {
9745 + cap->cc_alg[alg] = 0;
9746 + cap->cc_max_op_len[alg] = 0;
9748 + /* Was this the last algorithm ? */
9749 + for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
9750 + if (cap->cc_alg[i] != 0)
9753 + if (i == CRYPTO_ALGORITHM_MAX + 1)
9754 + driver_finis(cap);
9758 + CRYPTO_DRIVER_UNLOCK();
9763 + * Unregister all algorithms associated with a crypto driver.
9764 + * If there are pending sessions using it, leave enough information
9765 + * around so that subsequent calls using those sessions will
9766 + * correctly detect the driver has been unregistered and reroute
9770 +crypto_unregister_all(u_int32_t driverid)
9772 + struct cryptocap *cap;
9774 + unsigned long d_flags;
9776 + dprintk("%s()\n", __FUNCTION__);
9777 + CRYPTO_DRIVER_LOCK();
9778 + cap = crypto_checkdriver(driverid);
9779 + if (cap != NULL) {
9780 + driver_finis(cap);
9784 + CRYPTO_DRIVER_UNLOCK();
9790 + * Clear blockage on a driver. The what parameter indicates whether
9791 + * the driver is now ready for cryptop's and/or cryptokop's.
9794 +crypto_unblock(u_int32_t driverid, int what)
9796 + struct cryptocap *cap;
9798 + unsigned long q_flags;
9801 + cap = crypto_checkdriver(driverid);
9802 + if (cap != NULL) {
9803 + if (what & CRYPTO_SYMQ) {
9804 + cap->cc_qblocked = 0;
9805 + crypto_all_qblocked = 0;
9807 + if (what & CRYPTO_ASYMQ) {
9808 + cap->cc_kqblocked = 0;
9809 + crypto_all_kqblocked = 0;
9812 + wake_up_interruptible(&cryptoproc_wait);
9816 + CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
9822 + * Add a crypto request to a queue, to be processed by the kernel thread.
9825 +crypto_dispatch(struct cryptop *crp)
9827 + struct cryptocap *cap;
9829 + unsigned long q_flags;
9831 + dprintk("%s()\n", __FUNCTION__);
9833 + cryptostats.cs_ops++;
9836 + if (crypto_q_cnt >= crypto_q_max) {
9837 + CRYPTO_Q_UNLOCK();
9838 + cryptostats.cs_drops++;
9844 + * Caller marked the request to be processed immediately; dispatch
9845 + * it directly to the driver unless the driver is currently blocked.
9847 + if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
9848 + int hid = CRYPTO_SESID2HID(crp->crp_sid);
9849 + cap = crypto_checkdriver(hid);
9850 + /* Driver cannot disappear when there is an active session. */
9851 + KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
9852 + if (!cap->cc_qblocked) {
9853 + crypto_all_qblocked = 0;
9854 + crypto_drivers[hid].cc_qblocked = 1;
9855 + CRYPTO_Q_UNLOCK();
9856 + result = crypto_invoke(cap, crp, 0);
9858 + if (result != ERESTART)
9859 + crypto_drivers[hid].cc_qblocked = 0;
9862 + if (result == ERESTART) {
9864 + * The driver ran out of resources, mark the
9865 + * driver ``blocked'' for cryptop's and put
9866 + * the request back in the queue. It would
9867 + * best to put the request back where we got
9868 + * it but that's hard so for now we put it
9869 + * at the front. This should be ok; putting
9870 + * it at the end does not work.
9872 + list_add(&crp->crp_next, &crp_q);
9873 + cryptostats.cs_blocks++;
9874 + } else if (result == -1) {
9875 + TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
9878 + wake_up_interruptible(&cryptoproc_wait);
9879 + CRYPTO_Q_UNLOCK();
9884 + * Add an asymetric crypto request to a queue,
9885 + * to be processed by the kernel thread.
9888 +crypto_kdispatch(struct cryptkop *krp)
9891 + unsigned long q_flags;
9893 + cryptostats.cs_kops++;
9895 + error = crypto_kinvoke(krp, krp->krp_crid);
9896 + if (error == ERESTART) {
9898 + TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
9900 + wake_up_interruptible(&cryptoproc_wait);
9901 + CRYPTO_Q_UNLOCK();
9908 + * Verify a driver is suitable for the specified operation.
9910 +static __inline int
9911 +kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
9913 + return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
9917 + * Select a driver for an asym operation. The driver must
9918 + * support the necessary algorithm. The caller can constrain
9919 + * which device is selected with the flags parameter. The
9920 + * algorithm we use here is pretty stupid; just use the first
9921 + * driver that supports the algorithms we need. If there are
9922 + * multiple suitable drivers we choose the driver with the
9923 + * fewest active operations. We prefer hardware-backed
9924 + * drivers to software ones when either may be used.
9926 +static struct cryptocap *
9927 +crypto_select_kdriver(const struct cryptkop *krp, int flags)
9929 + struct cryptocap *cap, *best, *blocked;
9932 + CRYPTO_DRIVER_ASSERT();
9935 + * Look first for hardware crypto devices if permitted.
9937 + if (flags & CRYPTOCAP_F_HARDWARE)
9938 + match = CRYPTOCAP_F_HARDWARE;
9940 + match = CRYPTOCAP_F_SOFTWARE;
9944 + for (hid = 0; hid < crypto_drivers_num; hid++) {
9945 + cap = &crypto_drivers[hid];
9947 + * If it's not initialized, is in the process of
9948 + * going away, or is not appropriate (hardware
9949 + * or software based on match), then skip.
9951 + if (cap->cc_dev == NULL ||
9952 + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
9953 + (cap->cc_flags & match) == 0)
9956 + /* verify all the algorithms are supported. */
9957 + if (kdriver_suitable(cap, krp)) {
9958 + if (best == NULL ||
9959 + cap->cc_koperations < best->cc_koperations)
9965 + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
9966 + /* sort of an Algol 68-style for loop */
9967 + match = CRYPTOCAP_F_SOFTWARE;
9974 + * Dispatch an assymetric crypto request.
9977 +crypto_kinvoke(struct cryptkop *krp, int crid)
9979 + struct cryptocap *cap = NULL;
9981 + unsigned long d_flags;
9983 + KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
9984 + KASSERT(krp->krp_callback != NULL,
9985 + ("%s: krp->crp_callback == NULL", __func__));
9987 + CRYPTO_DRIVER_LOCK();
9988 + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9989 + cap = crypto_checkdriver(crid);
9990 + if (cap != NULL) {
9992 + * Driver present, it must support the necessary
9993 + * algorithm and, if s/w drivers are excluded,
9994 + * it must be registered as hardware-backed.
9996 + if (!kdriver_suitable(cap, krp) ||
9997 + (!crypto_devallowsoft &&
9998 + (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
10003 + * No requested driver; select based on crid flags.
10005 + if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
10006 + crid &= ~CRYPTOCAP_F_SOFTWARE;
10007 + cap = crypto_select_kdriver(krp, crid);
10009 + if (cap != NULL && !cap->cc_kqblocked) {
10010 + krp->krp_hid = cap - crypto_drivers;
10011 + cap->cc_koperations++;
10012 + CRYPTO_DRIVER_UNLOCK();
10013 + error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
10014 + CRYPTO_DRIVER_LOCK();
10015 + if (error == ERESTART) {
10016 + cap->cc_koperations--;
10017 + CRYPTO_DRIVER_UNLOCK();
10020 + /* return the actual device used */
10021 + krp->krp_crid = krp->krp_hid;
10024 + * NB: cap is !NULL if device is blocked; in
10025 + * that case return ERESTART so the operation
10026 + * is resubmitted if possible.
10028 + error = (cap == NULL) ? ENODEV : ERESTART;
10030 + CRYPTO_DRIVER_UNLOCK();
10033 + krp->krp_status = error;
10034 + crypto_kdone(krp);
10041 + * Dispatch a crypto request to the appropriate crypto devices.
10044 +crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
10046 + KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
10047 + KASSERT(crp->crp_callback != NULL,
10048 + ("%s: crp->crp_callback == NULL", __func__));
10049 + KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
10051 + dprintk("%s()\n", __FUNCTION__);
10053 +#ifdef CRYPTO_TIMING
10054 + if (crypto_timing)
10055 + crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
10057 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
10058 + struct cryptodesc *crd;
10062 + * Driver has unregistered; migrate the session and return
10063 + * an error to the caller so they'll resubmit the op.
10065 + * XXX: What if there are more already queued requests for this
10068 + crypto_freesession(crp->crp_sid);
10070 + for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
10071 + crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
10073 + /* XXX propagate flags from initial session? */
10074 + if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
10075 + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
10076 + crp->crp_sid = nid;
10078 + crp->crp_etype = EAGAIN;
10079 + crypto_done(crp);
10083 + * Invoke the driver to process the request.
10085 + return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
10090 + * Release a set of crypto descriptors.
10093 +crypto_freereq(struct cryptop *crp)
10095 + struct cryptodesc *crd;
10102 + struct cryptop *crp2;
10103 + unsigned long q_flags;
10106 + TAILQ_FOREACH(crp2, &crp_q, crp_next) {
10107 + KASSERT(crp2 != crp,
10108 + ("Freeing cryptop from the crypto queue (%p).",
10111 + CRYPTO_Q_UNLOCK();
10112 + CRYPTO_RETQ_LOCK();
10113 + TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
10114 + KASSERT(crp2 != crp,
10115 + ("Freeing cryptop from the return queue (%p).",
10118 + CRYPTO_RETQ_UNLOCK();
10122 + while ((crd = crp->crp_desc) != NULL) {
10123 + crp->crp_desc = crd->crd_next;
10124 + kmem_cache_free(cryptodesc_zone, crd);
10126 + kmem_cache_free(cryptop_zone, crp);
10130 + * Acquire a set of crypto descriptors.
10133 +crypto_getreq(int num)
10135 + struct cryptodesc *crd;
10136 + struct cryptop *crp;
10138 + crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
10139 + if (crp != NULL) {
10140 + memset(crp, 0, sizeof(*crp));
10141 + INIT_LIST_HEAD(&crp->crp_next);
10142 + init_waitqueue_head(&crp->crp_waitq);
10144 + crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
10145 + if (crd == NULL) {
10146 + crypto_freereq(crp);
10149 + memset(crd, 0, sizeof(*crd));
10150 + crd->crd_next = crp->crp_desc;
10151 + crp->crp_desc = crd;
10158 + * Invoke the callback on behalf of the driver.
10161 +crypto_done(struct cryptop *crp)
10163 + unsigned long q_flags;
10165 + dprintk("%s()\n", __FUNCTION__);
10166 + if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
10167 + crp->crp_flags |= CRYPTO_F_DONE;
10170 + CRYPTO_Q_UNLOCK();
10172 + printk("crypto: crypto_done op already done, flags 0x%x",
10174 + if (crp->crp_etype != 0)
10175 + cryptostats.cs_errs++;
10177 + * CBIMM means unconditionally do the callback immediately;
10178 + * CBIFSYNC means do the callback immediately only if the
10179 + * operation was done synchronously. Both are used to avoid
10180 + * doing extraneous context switches; the latter is mostly
10181 + * used with the software crypto driver.
10183 + if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
10184 + ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
10185 + (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
10187 + * Do the callback directly. This is ok when the
10188 + * callback routine does very little (e.g. the
10189 + * /dev/crypto callback method just does a wakeup).
10191 + crp->crp_callback(crp);
10193 + unsigned long r_flags;
10195 + * Normal case; queue the callback for the thread.
10197 + CRYPTO_RETQ_LOCK();
10198 + if (CRYPTO_RETQ_EMPTY())
10199 + wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
10200 + TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
10201 + CRYPTO_RETQ_UNLOCK();
10206 + * Invoke the callback on behalf of the driver.
10209 +crypto_kdone(struct cryptkop *krp)
10211 + struct cryptocap *cap;
10212 + unsigned long d_flags;
10214 + if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
10215 + printk("crypto: crypto_kdone op already done, flags 0x%x",
10217 + krp->krp_flags |= CRYPTO_KF_DONE;
10218 + if (krp->krp_status != 0)
10219 + cryptostats.cs_kerrs++;
10221 + CRYPTO_DRIVER_LOCK();
10222 + /* XXX: What if driver is loaded in the meantime? */
10223 + if (krp->krp_hid < crypto_drivers_num) {
10224 + cap = &crypto_drivers[krp->krp_hid];
10225 + cap->cc_koperations--;
10226 + KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
10227 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
10228 + crypto_remove(cap);
10230 + CRYPTO_DRIVER_UNLOCK();
10233 + * CBIMM means unconditionally do the callback immediately;
10234 + * This is used to avoid doing extraneous context switches
10236 + if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
10238 + * Do the callback directly. This is ok when the
10239 + * callback routine does very little (e.g. the
10240 + * /dev/crypto callback method just does a wakeup).
10242 + krp->krp_callback(krp);
10244 + unsigned long r_flags;
10246 + * Normal case; queue the callback for the thread.
10248 + CRYPTO_RETQ_LOCK();
10249 + if (CRYPTO_RETQ_EMPTY())
10250 + wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
10251 + TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
10252 + CRYPTO_RETQ_UNLOCK();
10257 +crypto_getfeat(int *featp)
10259 + int hid, kalg, feat = 0;
10260 + unsigned long d_flags;
10262 + CRYPTO_DRIVER_LOCK();
10263 + for (hid = 0; hid < crypto_drivers_num; hid++) {
10264 + const struct cryptocap *cap = &crypto_drivers[hid];
10266 + if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
10267 + !crypto_devallowsoft) {
10270 + for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
10271 + if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
10272 + feat |= 1 << kalg;
10274 + CRYPTO_DRIVER_UNLOCK();
10280 + * Crypto thread, dispatches crypto requests.
10283 +crypto_proc(void *arg)
10285 + struct cryptop *crp, *submit;
10286 + struct cryptkop *krp, *krpp;
10287 + struct cryptocap *cap;
10289 + int result, hint;
10290 + unsigned long q_flags;
10292 + ocf_daemonize("crypto");
10297 + * we need to make sure we don't get into a busy loop with nothing
10298 + * to do, the two crypto_all_*blocked vars help us find out when
10299 + * we are all full and can do nothing on any driver or Q. If so we
10300 + * wait for an unblock.
10302 + crypto_all_qblocked = !list_empty(&crp_q);
10305 + * Find the first element in the queue that can be
10306 + * processed and look-ahead to see if multiple ops
10307 + * are ready for the same driver.
10311 + list_for_each_entry(crp, &crp_q, crp_next) {
10312 + hid = CRYPTO_SESID2HID(crp->crp_sid);
10313 + cap = crypto_checkdriver(hid);
10315 + * Driver cannot disappear when there is an active
10318 + KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
10319 + __func__, __LINE__));
10320 + if (cap == NULL || cap->cc_dev == NULL) {
10321 + /* Op needs to be migrated, process it. */
10322 + if (submit == NULL)
10326 + if (!cap->cc_qblocked) {
10327 + if (submit != NULL) {
10329 + * We stop on finding another op,
10330 + * regardless whether its for the same
10331 + * driver or not. We could keep
10332 + * searching the queue but it might be
10333 + * better to just use a per-driver
10336 + if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
10337 + hint = CRYPTO_HINT_MORE;
10341 + if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
10343 + /* keep scanning for more are q'd */
10347 + if (submit != NULL) {
10348 + hid = CRYPTO_SESID2HID(submit->crp_sid);
10349 + crypto_all_qblocked = 0;
10350 + list_del(&submit->crp_next);
10351 + crypto_drivers[hid].cc_qblocked = 1;
10352 + cap = crypto_checkdriver(hid);
10353 + CRYPTO_Q_UNLOCK();
10354 + KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
10355 + __func__, __LINE__));
10356 + result = crypto_invoke(cap, submit, hint);
10358 + if (result == ERESTART) {
10360 + * The driver ran out of resources, mark the
10361 + * driver ``blocked'' for cryptop's and put
10362 + * the request back in the queue. It would
10363 + * best to put the request back where we got
10364 + * it but that's hard so for now we put it
10365 + * at the front. This should be ok; putting
10366 + * it at the end does not work.
10368 + /* XXX validate sid again? */
10369 + list_add(&submit->crp_next, &crp_q);
10370 + cryptostats.cs_blocks++;
10372 + crypto_drivers[hid].cc_qblocked=0;
10375 + crypto_all_kqblocked = !list_empty(&crp_kq);
10377 + /* As above, but for key ops */
10379 + list_for_each_entry(krpp, &crp_kq, krp_next) {
10380 + cap = crypto_checkdriver(krpp->krp_hid);
10381 + if (cap == NULL || cap->cc_dev == NULL) {
10383 + * Operation needs to be migrated, invalidate
10384 + * the assigned device so it will reselect a
10385 + * new one below. Propagate the original
10386 + * crid selection flags if supplied.
10388 + krp->krp_hid = krp->krp_crid &
10389 + (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
10390 + if (krp->krp_hid == 0)
10392 + CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
10395 + if (!cap->cc_kqblocked) {
10400 + if (krp != NULL) {
10401 + crypto_all_kqblocked = 0;
10402 + list_del(&krp->krp_next);
10403 + crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
10404 + CRYPTO_Q_UNLOCK();
10405 + result = crypto_kinvoke(krp, krp->krp_hid);
10407 + if (result == ERESTART) {
10409 + * The driver ran out of resources, mark the
10410 + * driver ``blocked'' for cryptkop's and put
10411 + * the request back in the queue. It would
10412 + * best to put the request back where we got
10413 + * it but that's hard so for now we put it
10414 + * at the front. This should be ok; putting
10415 + * it at the end does not work.
10417 + /* XXX validate sid again? */
10418 + list_add(&krp->krp_next, &crp_kq);
10419 + cryptostats.cs_kblocks++;
10421 + crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
10424 + if (submit == NULL && krp == NULL) {
10426 + * Nothing more to be processed. Sleep until we're
10427 + * woken because there are more ops to process.
10428 + * This happens either by submission or by a driver
10429 + * becoming unblocked and notifying us through
10430 + * crypto_unblock. Note that when we wakeup we
10431 + * start processing each queue again from the
10432 + * front. It's not clear that it's important to
10433 + * preserve this ordering since ops may finish
10434 + * out of order if dispatched to different devices
10435 + * and some become blocked while others do not.
10437 + dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
10439 + list_empty(&crp_q), crypto_all_qblocked,
10440 + list_empty(&crp_kq), crypto_all_kqblocked);
10441 + CRYPTO_Q_UNLOCK();
10443 + wait_event_interruptible(cryptoproc_wait,
10444 + !(list_empty(&crp_q) || crypto_all_qblocked) ||
10445 + !(list_empty(&crp_kq) || crypto_all_kqblocked) ||
10446 + cryptoproc == (pid_t) -1);
10448 + if (signal_pending (current)) {
10449 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10450 + spin_lock_irq(¤t->sigmask_lock);
10452 + flush_signals(current);
10453 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10454 + spin_unlock_irq(¤t->sigmask_lock);
10458 + dprintk("%s - awake\n", __FUNCTION__);
10459 + if (cryptoproc == (pid_t) -1)
10461 + cryptostats.cs_intrs++;
10464 + CRYPTO_Q_UNLOCK();
10465 + complete_and_exit(&cryptoproc_exited, 0);
10469 + * Crypto returns thread, does callbacks for processed crypto requests.
10470 + * Callbacks are done here, rather than in the crypto drivers, because
10471 + * callbacks typically are expensive and would slow interrupt handling.
10474 +crypto_ret_proc(void *arg)
10476 + struct cryptop *crpt;
10477 + struct cryptkop *krpt;
10478 + unsigned long r_flags;
10480 + ocf_daemonize("crypto_ret");
10482 + CRYPTO_RETQ_LOCK();
10484 + /* Harvest return q's for completed ops */
10486 + if (!list_empty(&crp_ret_q))
10487 + crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
10488 + if (crpt != NULL)
10489 + list_del(&crpt->crp_next);
10492 + if (!list_empty(&crp_ret_kq))
10493 + krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
10494 + if (krpt != NULL)
10495 + list_del(&krpt->krp_next);
10497 + if (crpt != NULL || krpt != NULL) {
10498 + CRYPTO_RETQ_UNLOCK();
10500 + * Run callbacks unlocked.
10502 + if (crpt != NULL)
10503 + crpt->crp_callback(crpt);
10504 + if (krpt != NULL)
10505 + krpt->krp_callback(krpt);
10506 + CRYPTO_RETQ_LOCK();
10509 + * Nothing more to be processed. Sleep until we're
10510 + * woken because there are more returns to process.
10512 + dprintk("%s - sleeping\n", __FUNCTION__);
10513 + CRYPTO_RETQ_UNLOCK();
10514 + wait_event_interruptible(cryptoretproc_wait,
10515 + cryptoretproc == (pid_t) -1 ||
10516 + !list_empty(&crp_ret_q) ||
10517 + !list_empty(&crp_ret_kq));
10518 + if (signal_pending (current)) {
10519 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10520 + spin_lock_irq(¤t->sigmask_lock);
10522 + flush_signals(current);
10523 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10524 + spin_unlock_irq(¤t->sigmask_lock);
10527 + CRYPTO_RETQ_LOCK();
10528 + dprintk("%s - awake\n", __FUNCTION__);
10529 + if (cryptoretproc == (pid_t) -1) {
10530 + dprintk("%s - EXITING!\n", __FUNCTION__);
10533 + cryptostats.cs_rets++;
10536 + CRYPTO_RETQ_UNLOCK();
10537 + complete_and_exit(&cryptoretproc_exited, 0);
10541 +#if 0 /* should put this into /proc or something */
10543 +db_show_drivers(void)
10547 + db_printf("%12s %4s %4s %8s %2s %2s\n"
10555 + for (hid = 0; hid < crypto_drivers_num; hid++) {
10556 + const struct cryptocap *cap = &crypto_drivers[hid];
10557 + if (cap->cc_dev == NULL)
10559 + db_printf("%-12s %4u %4u %08x %2u %2u\n"
10560 + , device_get_nameunit(cap->cc_dev)
10561 + , cap->cc_sessions
10562 + , cap->cc_koperations
10564 + , cap->cc_qblocked
10565 + , cap->cc_kqblocked
10570 +DB_SHOW_COMMAND(crypto, db_show_crypto)
10572 + struct cryptop *crp;
10574 + db_show_drivers();
10577 + db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
10578 + "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
10579 + "Desc", "Callback");
10580 + TAILQ_FOREACH(crp, &crp_q, crp_next) {
10581 + db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
10582 + , (int) CRYPTO_SESID2HID(crp->crp_sid)
10583 + , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
10584 + , crp->crp_ilen, crp->crp_olen
10588 + , crp->crp_callback
10591 + if (!TAILQ_EMPTY(&crp_ret_q)) {
10592 + db_printf("\n%4s %4s %4s %8s\n",
10593 + "HID", "Etype", "Flags", "Callback");
10594 + TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
10595 + db_printf("%4u %4u %04x %8p\n"
10596 + , (int) CRYPTO_SESID2HID(crp->crp_sid)
10599 + , crp->crp_callback
10605 +DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
10607 + struct cryptkop *krp;
10609 + db_show_drivers();
10612 + db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
10613 + "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
10614 + TAILQ_FOREACH(krp, &crp_kq, krp_next) {
10615 + db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
10617 + , krp->krp_status
10618 + , krp->krp_iparams, krp->krp_oparams
10619 + , krp->krp_crid, krp->krp_hid
10620 + , krp->krp_callback
10623 + if (!TAILQ_EMPTY(&crp_ret_q)) {
10624 + db_printf("%4s %5s %8s %4s %8s\n",
10625 + "Op", "Status", "CRID", "HID", "Callback");
10626 + TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
10627 + db_printf("%4u %5u %08x %4u %8p\n"
10629 + , krp->krp_status
10630 + , krp->krp_crid, krp->krp_hid
10631 + , krp->krp_callback
10644 + dprintk("%s(0x%x)\n", __FUNCTION__, (int) crypto_init);
10646 + if (crypto_initted)
10648 + crypto_initted = 1;
10650 + spin_lock_init(&crypto_drivers_lock);
10651 + spin_lock_init(&crypto_q_lock);
10652 + spin_lock_init(&crypto_ret_q_lock);
10654 + cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
10655 + 0, SLAB_HWCACHE_ALIGN, NULL
10656 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
10661 + cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
10662 + 0, SLAB_HWCACHE_ALIGN, NULL
10663 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
10668 + if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
10669 + printk("crypto: crypto_init cannot setup crypto zones\n");
10674 + crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
10675 + crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
10677 + if (crypto_drivers == NULL) {
10678 + printk("crypto: crypto_init cannot setup crypto drivers\n");
10683 + memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
10685 + init_completion(&cryptoproc_exited);
10686 + init_completion(&cryptoretproc_exited);
10688 + cryptoproc = 0; /* to avoid race condition where proc runs first */
10689 + cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
10690 + if (cryptoproc < 0) {
10691 + error = cryptoproc;
10692 + printk("crypto: crypto_init cannot start crypto thread; error %d",
10697 + cryptoretproc = 0; /* to avoid race condition where proc runs first */
10698 + cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
10699 + if (cryptoretproc < 0) {
10700 + error = cryptoretproc;
10701 + printk("crypto: crypto_init cannot start cryptoret thread; error %d",
10717 + unsigned long d_flags;
10719 + dprintk("%s()\n", __FUNCTION__);
10722 + * Terminate any crypto threads.
10725 + CRYPTO_DRIVER_LOCK();
10727 + cryptoproc = (pid_t) -1;
10728 + kill_proc(p, SIGTERM, 1);
10729 + wake_up_interruptible(&cryptoproc_wait);
10730 + CRYPTO_DRIVER_UNLOCK();
10732 + wait_for_completion(&cryptoproc_exited);
10734 + CRYPTO_DRIVER_LOCK();
10735 + p = cryptoretproc;
10736 + cryptoretproc = (pid_t) -1;
10737 + kill_proc(p, SIGTERM, 1);
10738 + wake_up_interruptible(&cryptoretproc_wait);
10739 + CRYPTO_DRIVER_UNLOCK();
10741 + wait_for_completion(&cryptoretproc_exited);
10743 + /* XXX flush queues??? */
10746 + * Reclaim dynamically allocated resources.
10748 + if (crypto_drivers != NULL)
10749 + kfree(crypto_drivers);
10751 + if (cryptodesc_zone != NULL)
10752 + kmem_cache_destroy(cryptodesc_zone);
10753 + if (cryptop_zone != NULL)
10754 + kmem_cache_destroy(cryptop_zone);
10758 +EXPORT_SYMBOL(crypto_newsession);
10759 +EXPORT_SYMBOL(crypto_freesession);
10760 +EXPORT_SYMBOL(crypto_get_driverid);
10761 +EXPORT_SYMBOL(crypto_kregister);
10762 +EXPORT_SYMBOL(crypto_register);
10763 +EXPORT_SYMBOL(crypto_unregister);
10764 +EXPORT_SYMBOL(crypto_unregister_all);
10765 +EXPORT_SYMBOL(crypto_unblock);
10766 +EXPORT_SYMBOL(crypto_dispatch);
10767 +EXPORT_SYMBOL(crypto_kdispatch);
10768 +EXPORT_SYMBOL(crypto_freereq);
10769 +EXPORT_SYMBOL(crypto_getreq);
10770 +EXPORT_SYMBOL(crypto_done);
10771 +EXPORT_SYMBOL(crypto_kdone);
10772 +EXPORT_SYMBOL(crypto_getfeat);
10773 +EXPORT_SYMBOL(crypto_userasymcrypto);
10774 +EXPORT_SYMBOL(crypto_getcaps);
10775 +EXPORT_SYMBOL(crypto_find_driver);
10776 +EXPORT_SYMBOL(crypto_find_device_byhid);
10778 +module_init(crypto_init);
10779 +module_exit(crypto_exit);
10781 +MODULE_LICENSE("BSD");
10782 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
10783 +MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
10785 +++ b/crypto/ocf/criov.c
10787 +/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
10790 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
10791 + * Copyright (C) 2006-2007 David McCullough
10792 + * Copyright (C) 2004-2005 Intel Corporation.
10793 + * The license and original author are listed below.
10795 + * Copyright (c) 1999 Theo de Raadt
10797 + * Redistribution and use in source and binary forms, with or without
10798 + * modification, are permitted provided that the following conditions
10801 + * 1. Redistributions of source code must retain the above copyright
10802 + * notice, this list of conditions and the following disclaimer.
10803 + * 2. Redistributions in binary form must reproduce the above copyright
10804 + * notice, this list of conditions and the following disclaimer in the
10805 + * documentation and/or other materials provided with the distribution.
10806 + * 3. The name of the author may not be used to endorse or promote products
10807 + * derived from this software without specific prior written permission.
10809 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
10810 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
10811 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
10812 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
10813 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
10814 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
10815 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
10816 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
10817 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
10818 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10820 +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
10823 +#ifndef AUTOCONF_INCLUDED
10824 +#include <linux/config.h>
10826 +#include <linux/module.h>
10827 +#include <linux/init.h>
10828 +#include <linux/slab.h>
10829 +#include <linux/uio.h>
10830 +#include <linux/skbuff.h>
10831 +#include <linux/kernel.h>
10832 +#include <linux/mm.h>
10833 +#include <asm/io.h>
10836 +#include <cryptodev.h>
10839 + * This macro is only for avoiding code duplication, as we need to skip
10840 + * given number of bytes in the same way in three functions below.
10842 +#define CUIO_SKIP() do { \
10843 + KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
10844 + KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
10845 + while (off > 0) { \
10846 + KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
10847 + if (off < iov->iov_len) \
10849 + off -= iov->iov_len; \
10856 +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
10858 + struct iovec *iov = uio->uio_iov;
10859 + int iol = uio->uio_iovcnt;
10863 + while (len > 0) {
10864 + KASSERT(iol >= 0, ("%s: empty", __func__));
10865 + count = min((int)(iov->iov_len - off), len);
10866 + memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
10876 +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
10878 + struct iovec *iov = uio->uio_iov;
10879 + int iol = uio->uio_iovcnt;
10883 + while (len > 0) {
10884 + KASSERT(iol >= 0, ("%s: empty", __func__));
10885 + count = min((int)(iov->iov_len - off), len);
10886 + memcpy(((caddr_t)iov->iov_base) + off, cp, count);
10896 + * Return a pointer to iov/offset of location in iovec list.
10899 +cuio_getptr(struct uio *uio, int loc, int *off)
10901 + struct iovec *iov = uio->uio_iov;
10902 + int iol = uio->uio_iovcnt;
10904 + while (loc >= 0) {
10905 + /* Normal end of search */
10906 + if (loc < iov->iov_len) {
10911 + loc -= iov->iov_len;
10914 + /* Point at the end of valid data */
10915 + *off = iov->iov_len;
10927 +EXPORT_SYMBOL(cuio_copyback);
10928 +EXPORT_SYMBOL(cuio_copydata);
10929 +EXPORT_SYMBOL(cuio_getptr);
10933 +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
10936 + if (offset < skb_headlen(skb)) {
10937 + memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
10938 + len -= skb_headlen(skb);
10939 + cp += skb_headlen(skb);
10941 + offset -= skb_headlen(skb);
10942 + for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
10943 + if (offset < skb_shinfo(skb)->frags[i].size) {
10944 + memcpy(page_address(skb_shinfo(skb)->frags[i].page) +
10945 + skb_shinfo(skb)->frags[i].page_offset,
10946 + cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
10947 + len -= skb_shinfo(skb)->frags[i].size;
10948 + cp += skb_shinfo(skb)->frags[i].size;
10950 + offset -= skb_shinfo(skb)->frags[i].size;
10955 +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
10958 + if ((flags & CRYPTO_F_SKBUF) != 0)
10959 + skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
10960 + else if ((flags & CRYPTO_F_IOV) != 0)
10961 + cuio_copyback((struct uio *)buf, off, size, in);
10963 + bcopy(in, buf + off, size);
10967 +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
10970 + if ((flags & CRYPTO_F_SKBUF) != 0)
10971 + skb_copy_bits((struct sk_buff *)buf, off, out, size);
10972 + else if ((flags & CRYPTO_F_IOV) != 0)
10973 + cuio_copydata((struct uio *)buf, off, size, out);
10975 + bcopy(buf + off, out, size);
10979 +crypto_apply(int flags, caddr_t buf, int off, int len,
10980 + int (*f)(void *, void *, u_int), void *arg)
10985 + if ((flags & CRYPTO_F_SKBUF) != 0)
10986 + error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
10987 + else if ((flags & CRYPTO_F_IOV) != 0)
10988 + error = cuio_apply((struct uio *)buf, off, len, f, arg);
10990 + error = (*f)(arg, buf + off, len);
10993 + KASSERT(0, ("crypto_apply not implemented!\n"));
10998 +EXPORT_SYMBOL(crypto_copyback);
10999 +EXPORT_SYMBOL(crypto_copydata);
11000 +EXPORT_SYMBOL(crypto_apply);
11003 +++ b/crypto/ocf/uio.h
11005 +#ifndef _OCF_UIO_H_
11006 +#define _OCF_UIO_H_
11008 +#include <linux/uio.h>
11011 + * The linux uio.h doesn't have all we need. To be fully api compatible
11012 + * with the BSD cryptodev, we need to keep this around. Perhaps this can
11013 + * be moved back into the linux/uio.h
11015 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
11016 + * Copyright (C) 2006-2007 David McCullough
11017 + * Copyright (C) 2004-2005 Intel Corporation.
11021 + * The free distribution and use of this software in both source and binary
11022 + * form is allowed (with or without changes) provided that:
11024 + * 1. distributions of this source code include the above copyright
11025 + * notice, this list of conditions and the following disclaimer;
11027 + * 2. distributions in binary form include the above copyright
11028 + * notice, this list of conditions and the following disclaimer
11029 + * in the documentation and/or other associated materials;
11031 + * 3. the copyright holder's name is not used to endorse products
11032 + * built using this software without specific written permission.
11034 + * ALTERNATIVELY, provided that this notice is retained in full, this product
11035 + * may be distributed under the terms of the GNU General Public License (GPL),
11036 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
11040 + * This software is provided 'as is' with no explicit or implied warranties
11041 + * in respect of its properties, including, but not limited to, correctness
11042 + * and/or fitness for purpose.
11043 + * ---------------------------------------------------------------------------
11047 + struct iovec *uio_iov;
11049 + off_t uio_offset;
11052 + enum uio_seg uio_segflg;
11053 + enum uio_rw uio_rw;
11054 + struct thread *uio_td;
11060 +++ b/crypto/ocf/talitos/talitos.c
11063 + * crypto/ocf/talitos/talitos.c
11065 + * An OCF-Linux module that uses Freescale's SEC to do the crypto.
11066 + * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
11068 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
11070 + * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
11071 + * some code copied from files with the following:
11072 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com
11074 + * Redistribution and use in source and binary forms, with or without
11075 + * modification, are permitted provided that the following conditions
11078 + * 1. Redistributions of source code must retain the above copyright
11079 + * notice, this list of conditions and the following disclaimer.
11080 + * 2. Redistributions in binary form must reproduce the above copyright
11081 + * notice, this list of conditions and the following disclaimer in the
11082 + * documentation and/or other materials provided with the distribution.
11083 + * 3. The name of the author may not be used to endorse or promote products
11084 + * derived from this software without specific prior written permission.
11086 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
11087 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
11088 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
11089 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11090 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
11091 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
11092 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
11093 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11094 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
11095 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11097 + * ---------------------------------------------------------------------------
11101 + * The Freescale SEC (also known as 'talitos') resides on the
11102 + * internal bus, and runs asynchronous to the processor core. It has
11103 + * a wide gamut of cryptographic acceleration features, including single-
11104 + * pass IPsec (also known as algorithm chaining). To properly utilize
11105 + * all of the SEC's performance enhancing features, further reworking
11106 + * of higher level code (framework, applications) will be necessary.
11108 + * The following table shows which SEC version is present in which devices:
11110 + * Devices SEC version
11112 + * 8272, 8248 SEC 1.0
11113 + * 885, 875 SEC 1.2
11114 + * 8555E, 8541E SEC 2.0
11118 + * The following table shows the features offered by each SEC version:
11121 + * version Bus I/F Clock nels DEU AESU AFEU MDEU PKEU RNG KEU
11123 + * SEC 1.0 internal 64b 100MHz 4 1 1 1 1 1 1 0
11124 + * SEC 1.2 internal 32b 66MHz 1 1 1 0 1 0 0 0
11125 + * SEC 2.0 internal 64b 166MHz 4 1 1 1 1 1 1 0
11126 + * SEC 2.01 internal 64b 166MHz 4 1 1 1 1 1 1 0
11127 + * SEC 2.1 internal 64b 333MHz 4 1 1 1 1 1 1 1
11129 + * Each execution unit in the SEC has two modes of execution; channel and
11130 + * slave/debug. This driver employs the channel infrastructure in the
11131 + * device for convenience. Only the RNG is directly accessed due to the
11132 + * convenience of its random fifo pool. The relationship between the
11133 + * channels and execution units is depicted in the following diagram:
11135 + * ------- ------------
11136 + * ---| ch0 |---| |
11138 + * | |------+-------+-------+-------+------------
11139 + * ------- | | | | | | |
11140 + * ---| ch1 |---| | | | | | |
11141 + * ------- | | ------ ------ ------ ------ ------
11142 + * |controller| |DEU | |AESU| |MDEU| |PKEU| ... |RNG |
11143 + * ------- | | ------ ------ ------ ------ ------
11144 + * ---| ch2 |---| | | | | | |
11145 + * ------- | | | | | | |
11146 + * | |------+-------+-------+-------+------------
11148 + * ---| ch3 |---| |
11149 + * ------- ------------
11151 + * Channel ch0 may drive an aes operation to the aes unit (AESU),
11152 + * and, at the same time, ch1 may drive a message digest operation
11153 + * to the mdeu. Each channel has an input descriptor FIFO, and the
11154 + * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
11155 + * a buffer overrun error is triggered. The controller is responsible
11156 + * for fetching the data from descriptor pointers, and passing the
11157 + * data to the appropriate EUs. The controller also writes the
11158 + * cryptographic operation's result to memory. The SEC notifies
11159 + * completion by triggering an interrupt and/or setting the 1st byte
11160 + * of the hdr field to 0xff.
11163 + * o support more algorithms
11164 + * o support more versions of the SEC
11165 + * o add support for linux 2.4
11166 + * o scatter-gather (sg) support
11167 + * o add support for public key ops (PKEU)
11168 + * o add statistics
11171 +#ifndef AUTOCONF_INCLUDED
11172 +#include <linux/config.h>
11174 +#include <linux/module.h>
11175 +#include <linux/init.h>
11176 +#include <linux/interrupt.h>
11177 +#include <linux/spinlock.h>
11178 +#include <linux/random.h>
11179 +#include <linux/skbuff.h>
11180 +#include <asm/scatterlist.h>
11181 +#include <linux/dma-mapping.h> /* dma_map_single() */
11182 +#include <linux/moduleparam.h>
11184 +#include <linux/version.h>
11185 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
11186 +#include <linux/platform_device.h>
11189 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
11190 +#include <linux/of_platform.h>
11193 +#include <cryptodev.h>
11196 +#define DRV_NAME "talitos"
11198 +#include "talitos_dev.h"
11199 +#include "talitos_soft.h"
11201 +#define read_random(p,l) get_random_bytes(p,l)
11203 +const char talitos_driver_name[] = "Talitos OCF";
11204 +const char talitos_driver_version[] = "0.2";
11206 +static int talitos_newsession(device_t dev, u_int32_t *sidp,
11207 + struct cryptoini *cri);
11208 +static int talitos_freesession(device_t dev, u_int64_t tid);
11209 +static int talitos_process(device_t dev, struct cryptop *crp, int hint);
11210 +static void dump_talitos_status(struct talitos_softc *sc);
11211 +static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td,
11213 +static void talitos_doneprocessing(struct talitos_softc *sc);
11214 +static void talitos_init_device(struct talitos_softc *sc);
11215 +static void talitos_reset_device_master(struct talitos_softc *sc);
11216 +static void talitos_reset_device(struct talitos_softc *sc);
11217 +static void talitos_errorprocessing(struct talitos_softc *sc);
11218 +#ifdef CONFIG_PPC_MERGE
11219 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
11220 +static int talitos_remove(struct of_device *ofdev);
11222 +static int talitos_probe(struct platform_device *pdev);
11223 +static int talitos_remove(struct platform_device *pdev);
11225 +#ifdef CONFIG_OCF_RANDOMHARVEST
11226 +static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
11227 +static void talitos_rng_init(struct talitos_softc *sc);
11230 +static device_method_t talitos_methods = {
11231 + /* crypto device methods */
11232 + DEVMETHOD(cryptodev_newsession, talitos_newsession),
11233 + DEVMETHOD(cryptodev_freesession,talitos_freesession),
11234 + DEVMETHOD(cryptodev_process, talitos_process),
11237 +#define debug talitos_debug
11238 +int talitos_debug = 0;
11239 +module_param(talitos_debug, int, 0644);
11240 +MODULE_PARM_DESC(talitos_debug, "Enable debug");
11242 +static inline void talitos_write(volatile unsigned *addr, u32 val)
11244 + out_be32(addr, val);
11247 +static inline u32 talitos_read(volatile unsigned *addr)
11250 + val = in_be32(addr);
11254 +static void dump_talitos_status(struct talitos_softc *sc)
11256 + unsigned int v, v_hi, i, *ptr;
11257 + v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
11258 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
11259 + printk(KERN_INFO "%s: MCR 0x%08x_%08x\n",
11260 + device_get_nameunit(sc->sc_cdev), v, v_hi);
11261 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
11262 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
11263 + printk(KERN_INFO "%s: IMR 0x%08x_%08x\n",
11264 + device_get_nameunit(sc->sc_cdev), v, v_hi);
11265 + v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
11266 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
11267 + printk(KERN_INFO "%s: ISR 0x%08x_%08x\n",
11268 + device_get_nameunit(sc->sc_cdev), v, v_hi);
11269 + for (i = 0; i < sc->sc_num_channels; i++) {
11270 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
11271 + TALITOS_CH_CDPR);
11272 + v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
11273 + TALITOS_CH_CDPR_HI);
11274 + printk(KERN_INFO "%s: CDPR ch%d 0x%08x_%08x\n",
11275 + device_get_nameunit(sc->sc_cdev), i, v, v_hi);
11277 + for (i = 0; i < sc->sc_num_channels; i++) {
11278 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
11279 + TALITOS_CH_CCPSR);
11280 + v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
11281 + TALITOS_CH_CCPSR_HI);
11282 + printk(KERN_INFO "%s: CCPSR ch%d 0x%08x_%08x\n",
11283 + device_get_nameunit(sc->sc_cdev), i, v, v_hi);
11285 + ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
11286 + for (i = 0; i < 16; i++) {
11287 + v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
11288 + printk(KERN_INFO "%s: DESCBUF ch0 0x%08x_%08x (tdp%02d)\n",
11289 + device_get_nameunit(sc->sc_cdev), v, v_hi, i);
11295 +#ifdef CONFIG_OCF_RANDOMHARVEST
11297 + * pull random numbers off the RNG FIFO, not exceeding amount available
11300 +talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
11302 + struct talitos_softc *sc = (struct talitos_softc *) arg;
11306 + DPRINTF("%s()\n", __FUNCTION__);
11308 + /* check for things like FIFO underflow */
11309 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
11310 + if (unlikely(v)) {
11311 + printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
11312 + device_get_nameunit(sc->sc_cdev), v);
11316 + * OFL is number of available 64-bit words,
11317 + * shift and convert to a 32-bit word count
11319 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
11320 + v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
11321 + if (maxwords > v)
11323 + for (rc = 0; rc < maxwords; rc++) {
11324 + buf[rc] = talitos_read(sc->sc_base_addr +
11325 + TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
11327 + if (maxwords & 1) {
11329 + * RNG will complain with an AE in the RNGISR
11330 + * if we don't complete the pairs of 32-bit reads
11331 + * to its 64-bit register based FIFO
11333 + v = talitos_read(sc->sc_base_addr +
11334 + TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
11341 +talitos_rng_init(struct talitos_softc *sc)
11345 + DPRINTF("%s()\n", __FUNCTION__);
11346 + /* reset RNG EU */
11347 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
11348 + v |= TALITOS_RNGRCR_HI_SR;
11349 + talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
11350 + while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI)
11351 + & TALITOS_RNGSR_HI_RD) == 0)
11354 + * we tell the RNG to start filling the RNG FIFO
11355 + * by writing the RNGDSR
11357 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
11358 + talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
11360 + * 64 bits of data will be pushed onto the FIFO every
11361 + * 256 SEC cycles until the FIFO is full. The RNG then
11362 + * attempts to keep the FIFO full.
11364 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
11366 + printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
11367 + device_get_nameunit(sc->sc_cdev), v);
11371 + * n.b. we need to add a FIPS test here - if the RNG is going
11372 + * to fail, it's going to fail at reset time
11376 +#endif /* CONFIG_OCF_RANDOMHARVEST */
11379 + * Generate a new software session.
11382 +talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
11384 + struct cryptoini *c, *encini = NULL, *macini = NULL;
11385 + struct talitos_softc *sc = device_get_softc(dev);
11386 + struct talitos_session *ses = NULL;
11389 + DPRINTF("%s()\n", __FUNCTION__);
11390 + if (sidp == NULL || cri == NULL || sc == NULL) {
11391 + DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
11394 + for (c = cri; c != NULL; c = c->cri_next) {
11395 + if (c->cri_alg == CRYPTO_MD5 ||
11396 + c->cri_alg == CRYPTO_MD5_HMAC ||
11397 + c->cri_alg == CRYPTO_SHA1 ||
11398 + c->cri_alg == CRYPTO_SHA1_HMAC ||
11399 + c->cri_alg == CRYPTO_NULL_HMAC) {
11403 + } else if (c->cri_alg == CRYPTO_DES_CBC ||
11404 + c->cri_alg == CRYPTO_3DES_CBC ||
11405 + c->cri_alg == CRYPTO_AES_CBC ||
11406 + c->cri_alg == CRYPTO_NULL_CBC) {
11411 + DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
11415 + if (encini == NULL && macini == NULL)
11418 + /* validate key length */
11419 + switch (encini->cri_alg) {
11420 + case CRYPTO_DES_CBC:
11421 + if (encini->cri_klen != 64)
11424 + case CRYPTO_3DES_CBC:
11425 + if (encini->cri_klen != 192) {
11429 + case CRYPTO_AES_CBC:
11430 + if (encini->cri_klen != 128 &&
11431 + encini->cri_klen != 192 &&
11432 + encini->cri_klen != 256)
11436 + DPRINTF("UNKNOWN encini->cri_alg %d\n",
11437 + encini->cri_alg);
11442 + if (sc->sc_sessions == NULL) {
11443 + ses = sc->sc_sessions = (struct talitos_session *)
11444 + kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
11447 + memset(ses, 0, sizeof(struct talitos_session));
11449 + sc->sc_nsessions = 1;
11451 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
11452 + if (sc->sc_sessions[sesn].ses_used == 0) {
11453 + ses = &sc->sc_sessions[sesn];
11458 + if (ses == NULL) {
11459 + /* allocating session */
11460 + sesn = sc->sc_nsessions;
11461 + ses = (struct talitos_session *) kmalloc(
11462 + (sesn + 1) * sizeof(struct talitos_session),
11467 + (sesn + 1) * sizeof(struct talitos_session));
11468 + memcpy(ses, sc->sc_sessions,
11469 + sesn * sizeof(struct talitos_session));
11470 + memset(sc->sc_sessions, 0,
11471 + sesn * sizeof(struct talitos_session));
11472 + kfree(sc->sc_sessions);
11473 + sc->sc_sessions = ses;
11474 + ses = &sc->sc_sessions[sesn];
11475 + sc->sc_nsessions++;
11479 + ses->ses_used = 1;
11483 + /* XXX may read fewer than requested */
11484 + read_random(ses->ses_iv, sizeof(ses->ses_iv));
11486 + ses->ses_klen = (encini->cri_klen + 7) / 8;
11487 + memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
11489 + /* doing hash on top of cipher */
11490 + ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
11491 + memcpy(ses->ses_hmac, macini->cri_key,
11492 + ses->ses_hmac_len);
11494 + } else if (macini) {
11496 + ses->ses_klen = (macini->cri_klen + 7) / 8;
11497 + memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
11500 + /* back compat way of determining MSC result len */
11502 + ses->ses_mlen = macini->cri_mlen;
11503 + if (ses->ses_mlen == 0) {
11504 + if (macini->cri_alg == CRYPTO_MD5_HMAC)
11505 + ses->ses_mlen = MD5_HASH_LEN;
11507 + ses->ses_mlen = SHA1_HASH_LEN;
11511 + /* really should make up a template td here,
11512 + * and only fill things like i/o and direction in process() */
11514 + /* assign session ID */
11515 + *sidp = TALITOS_SID(sc->sc_num, sesn);
11520 + * Deallocate a session.
11523 +talitos_freesession(device_t dev, u_int64_t tid)
11525 + struct talitos_softc *sc = device_get_softc(dev);
11526 + int session, ret;
11527 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
11531 + session = TALITOS_SESSION(sid);
11532 + if (session < sc->sc_nsessions) {
11533 + memset(&sc->sc_sessions[session], 0,
11534 + sizeof(sc->sc_sessions[session]));
11542 + * launch device processing - it will come back with done notification
11543 + * in the form of an interrupt and/or HDR_DONE_BITS in header
11547 + struct talitos_softc *sc,
11548 + struct talitos_desc *td,
11553 + v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
11554 + talitos_write(sc->sc_base_addr +
11555 + chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
11556 + talitos_write(sc->sc_base_addr +
11557 + chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
11562 +talitos_process(device_t dev, struct cryptop *crp, int hint)
11564 + int i, err = 0, ivsize;
11565 + struct talitos_softc *sc = device_get_softc(dev);
11566 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
11568 + struct talitos_session *ses;
11569 + struct talitos_desc *td;
11570 + unsigned long flags;
11571 + /* descriptor mappings */
11572 + int hmac_key, hmac_data, cipher_iv, cipher_key,
11573 + in_fifo, out_fifo, cipher_iv_out;
11574 + static int chsel = -1;
11576 + DPRINTF("%s()\n", __FUNCTION__);
11578 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
11581 + crp->crp_etype = 0;
11582 + if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
11586 + ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
11588 + /* enter the channel scheduler */
11589 + spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11591 + /* reuse channel that already had/has requests for the required EU */
11592 + for (i = 0; i < sc->sc_num_channels; i++) {
11593 + if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
11596 + if (i == sc->sc_num_channels) {
11598 + * haven't seen this algo the last sc_num_channels or more
11599 + * use round robin in this case
11600 + * nb: sc->sc_num_channels must be power of 2
11602 + chsel = (chsel + 1) & (sc->sc_num_channels - 1);
11605 + * matches channel with same target execution unit;
11606 + * use same channel in this case
11610 + sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
11612 + /* release the channel scheduler lock */
11613 + spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11615 + /* acquire the selected channel fifo lock */
11616 + spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
11618 + /* find and reserve next available descriptor-cryptop pair */
11619 + for (i = 0; i < sc->sc_chfifo_len; i++) {
11620 + if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
11622 + * ensure correct descriptor formation by
11623 + * avoiding inadvertently setting "optional" entries
11624 + * e.g. not using "optional" dptr2 for MD/HMAC descs
11626 + memset(&sc->sc_chnfifo[chsel][i].cf_desc,
11628 + /* reserve it with done notification request bit */
11629 + sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
11630 + TALITOS_DONE_NOTIFY;
11634 + spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
11636 + if (i == sc->sc_chfifo_len) {
11642 + td = &sc->sc_chnfifo[chsel][i].cf_desc;
11643 + sc->sc_chnfifo[chsel][i].cf_crp = crp;
11645 + crd1 = crp->crp_desc;
11646 + if (crd1 == NULL) {
11650 + crd2 = crd1->crd_next;
11651 + /* prevent compiler warning */
11654 + if (crd2 == NULL) {
11655 + td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
11656 + /* assign descriptor dword ptr mappings for this desc. type */
11660 + cipher_iv_out = 5;
11661 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
11662 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11663 + crd1->crd_alg == CRYPTO_SHA1 ||
11664 + crd1->crd_alg == CRYPTO_MD5) {
11668 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
11669 + crd1->crd_alg == CRYPTO_3DES_CBC ||
11670 + crd1->crd_alg == CRYPTO_AES_CBC ||
11671 + crd1->crd_alg == CRYPTO_ARC4) {
11676 + DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
11681 + if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
11682 + td->hdr |= TD_TYPE_IPSEC_ESP;
11684 + DPRINTF("unimplemented: multiple descriptor ipsec\n");
11688 + /* assign descriptor dword ptr mappings for this desc. type */
11695 + cipher_iv_out = 6;
11696 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
11697 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11698 + crd1->crd_alg == CRYPTO_MD5 ||
11699 + crd1->crd_alg == CRYPTO_SHA1) &&
11700 + (crd2->crd_alg == CRYPTO_DES_CBC ||
11701 + crd2->crd_alg == CRYPTO_3DES_CBC ||
11702 + crd2->crd_alg == CRYPTO_AES_CBC ||
11703 + crd2->crd_alg == CRYPTO_ARC4) &&
11704 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
11707 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
11708 + crd1->crd_alg == CRYPTO_ARC4 ||
11709 + crd1->crd_alg == CRYPTO_3DES_CBC ||
11710 + crd1->crd_alg == CRYPTO_AES_CBC) &&
11711 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
11712 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
11713 + crd2->crd_alg == CRYPTO_MD5 ||
11714 + crd2->crd_alg == CRYPTO_SHA1) &&
11715 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
11719 + /* We cannot order the SEC as requested */
11720 + printk("%s: cannot do the order\n",
11721 + device_get_nameunit(sc->sc_cdev));
11726 + /* assign in_fifo and out_fifo based on input/output struct type */
11727 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
11728 + /* using SKB buffers */
11729 + struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
11730 + if (skb_shinfo(skb)->nr_frags) {
11731 + printk("%s: skb frags unimplemented\n",
11732 + device_get_nameunit(sc->sc_cdev));
11736 + td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
11737 + skb->len, DMA_TO_DEVICE);
11738 + td->ptr[in_fifo].len = skb->len;
11739 + td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
11740 + skb->len, DMA_TO_DEVICE);
11741 + td->ptr[out_fifo].len = skb->len;
11742 + td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
11743 + skb->len, DMA_TO_DEVICE);
11744 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
11745 + /* using IOV buffers */
11746 + struct uio *uiop = (struct uio *)crp->crp_buf;
11747 + if (uiop->uio_iovcnt > 1) {
11748 + printk("%s: iov frags unimplemented\n",
11749 + device_get_nameunit(sc->sc_cdev));
11753 + td->ptr[in_fifo].ptr = dma_map_single(NULL,
11754 + uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
11755 + td->ptr[in_fifo].len = crp->crp_ilen;
11756 + /* crp_olen is never set; always use crp_ilen */
11757 + td->ptr[out_fifo].ptr = dma_map_single(NULL,
11758 + uiop->uio_iov->iov_base,
11759 + crp->crp_ilen, DMA_TO_DEVICE);
11760 + td->ptr[out_fifo].len = crp->crp_ilen;
11762 + /* using contig buffers */
11763 + td->ptr[in_fifo].ptr = dma_map_single(NULL,
11764 + crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
11765 + td->ptr[in_fifo].len = crp->crp_ilen;
11766 + td->ptr[out_fifo].ptr = dma_map_single(NULL,
11767 + crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
11768 + td->ptr[out_fifo].len = crp->crp_ilen;
11771 + switch (enccrd->crd_alg) {
11772 + case CRYPTO_3DES_CBC:
11773 + td->hdr |= TALITOS_MODE0_DEU_3DES;
11774 + /* FALLTHROUGH */
11775 + case CRYPTO_DES_CBC:
11776 + td->hdr |= TALITOS_SEL0_DEU
11777 + | TALITOS_MODE0_DEU_CBC;
11778 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
11779 + td->hdr |= TALITOS_MODE0_DEU_ENC;
11780 + ivsize = 2*sizeof(u_int32_t);
11781 + DPRINTF("%cDES ses %d ch %d len %d\n",
11782 + (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
11783 + (u32)TALITOS_SESSION(crp->crp_sid),
11784 + chsel, td->ptr[in_fifo].len);
11786 + case CRYPTO_AES_CBC:
11787 + td->hdr |= TALITOS_SEL0_AESU
11788 + | TALITOS_MODE0_AESU_CBC;
11789 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
11790 + td->hdr |= TALITOS_MODE0_AESU_ENC;
11791 + ivsize = 4*sizeof(u_int32_t);
11792 + DPRINTF("AES ses %d ch %d len %d\n",
11793 + (u32)TALITOS_SESSION(crp->crp_sid),
11794 + chsel, td->ptr[in_fifo].len);
11797 + printk("%s: unimplemented enccrd->crd_alg %d\n",
11798 + device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
11803 + * Setup encrypt/decrypt state. When using basic ops
11804 + * we can't use an inline IV because hash/crypt offset
11805 + * must be from the end of the IV to the start of the
11806 + * crypt data and this leaves out the preceding header
11807 + * from the hash calculation. Instead we place the IV
11808 + * in the state record and set the hash/crypt offset to
11809 + * copy both the header+IV.
11811 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
11812 + td->hdr |= TALITOS_DIR_OUTBOUND;
11813 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
11814 + iv = enccrd->crd_iv;
11816 + iv = (caddr_t) ses->ses_iv;
11817 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
11818 + crypto_copyback(crp->crp_flags, crp->crp_buf,
11819 + enccrd->crd_inject, ivsize, iv);
11822 + td->hdr |= TALITOS_DIR_INBOUND;
11823 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
11824 + iv = enccrd->crd_iv;
11825 + bcopy(enccrd->crd_iv, iv, ivsize);
11827 + iv = (caddr_t) ses->ses_iv;
11828 + crypto_copydata(crp->crp_flags, crp->crp_buf,
11829 + enccrd->crd_inject, ivsize, iv);
11832 + td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
11834 + td->ptr[cipher_iv].len = ivsize;
11836 + * we don't need the cipher iv out length/pointer
11837 + * field to do ESP IPsec. Therefore we set the len field as 0,
11838 + * which tells the SEC not to do anything with this len/ptr
11839 + * field. Previously, when length/pointer as pointing to iv,
11840 + * it gave us corruption of packets.
11842 + td->ptr[cipher_iv_out].len = 0;
11844 + if (enccrd && maccrd) {
11845 + /* this is ipsec only for now */
11846 + td->hdr |= TALITOS_SEL1_MDEU
11847 + | TALITOS_MODE1_MDEU_INIT
11848 + | TALITOS_MODE1_MDEU_PAD;
11849 + switch (maccrd->crd_alg) {
11851 + td->hdr |= TALITOS_MODE1_MDEU_MD5;
11853 + case CRYPTO_MD5_HMAC:
11854 + td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
11856 + case CRYPTO_SHA1:
11857 + td->hdr |= TALITOS_MODE1_MDEU_SHA1;
11859 + case CRYPTO_SHA1_HMAC:
11860 + td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
11863 + /* We cannot order the SEC as requested */
11864 + printk("%s: cannot do the order\n",
11865 + device_get_nameunit(sc->sc_cdev));
11869 + if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
11870 + (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
11872 + * The offset from hash data to the start of
11873 + * crypt data is the difference in the skips.
11875 + /* ipsec only for now */
11876 + td->ptr[hmac_key].ptr = dma_map_single(NULL,
11877 + ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
11878 + td->ptr[hmac_key].len = ses->ses_hmac_len;
11879 + td->ptr[in_fifo].ptr += enccrd->crd_skip;
11880 + td->ptr[in_fifo].len = enccrd->crd_len;
11881 + td->ptr[out_fifo].ptr += enccrd->crd_skip;
11882 + td->ptr[out_fifo].len = enccrd->crd_len;
11883 + /* bytes of HMAC to postpend to ciphertext */
11884 + td->ptr[out_fifo].extent = ses->ses_mlen;
11885 + td->ptr[hmac_data].ptr += maccrd->crd_skip;
11886 + td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
11888 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
11889 + printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
11890 + device_get_nameunit(sc->sc_cdev));
11893 + if (!enccrd && maccrd) {
11894 + /* single MD5 or SHA */
11895 + td->hdr |= TALITOS_SEL0_MDEU
11896 + | TALITOS_MODE0_MDEU_INIT
11897 + | TALITOS_MODE0_MDEU_PAD;
11898 + switch (maccrd->crd_alg) {
11900 + td->hdr |= TALITOS_MODE0_MDEU_MD5;
11901 + DPRINTF("MD5 ses %d ch %d len %d\n",
11902 + (u32)TALITOS_SESSION(crp->crp_sid),
11903 + chsel, td->ptr[in_fifo].len);
11905 + case CRYPTO_MD5_HMAC:
11906 + td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
11908 + case CRYPTO_SHA1:
11909 + td->hdr |= TALITOS_MODE0_MDEU_SHA1;
11910 + DPRINTF("SHA1 ses %d ch %d len %d\n",
11911 + (u32)TALITOS_SESSION(crp->crp_sid),
11912 + chsel, td->ptr[in_fifo].len);
11914 + case CRYPTO_SHA1_HMAC:
11915 + td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
11918 + /* We cannot order the SEC as requested */
11919 + DPRINTF("cannot do the order\n");
11924 + if (crp->crp_flags & CRYPTO_F_IOV)
11925 + td->ptr[out_fifo].ptr += maccrd->crd_inject;
11927 + if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
11928 + (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
11929 + td->ptr[hmac_key].ptr = dma_map_single(NULL,
11930 + ses->ses_hmac, ses->ses_hmac_len,
11932 + td->ptr[hmac_key].len = ses->ses_hmac_len;
11936 + /* using process key (session data has duplicate) */
11937 + td->ptr[cipher_key].ptr = dma_map_single(NULL,
11938 + enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
11940 + td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
11942 + /* descriptor complete - GO! */
11943 + return talitos_submit(sc, td, chsel);
11946 + if (err != ERESTART) {
11947 + crp->crp_etype = err;
11948 + crypto_done(crp);
11953 +/* go through all channels descriptors, notifying OCF what has
11954 + * _and_hasn't_ successfully completed and reset the device
11955 + * (otherwise it's up to decoding desc hdrs!)
11957 +static void talitos_errorprocessing(struct talitos_softc *sc)
11959 + unsigned long flags;
11962 + /* disable further scheduling until under control */
11963 + spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11965 + if (debug) dump_talitos_status(sc);
11966 + /* go through descriptors, try and salvage those successfully done,
11967 + * and EIO those that weren't
11969 + for (i = 0; i < sc->sc_num_channels; i++) {
11970 + spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
11971 + for (j = 0; j < sc->sc_chfifo_len; j++) {
11972 + if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
11973 + if ((sc->sc_chnfifo[i][j].cf_desc.hdr
11974 + & TALITOS_HDR_DONE_BITS)
11975 + != TALITOS_HDR_DONE_BITS) {
11976 + /* this one didn't finish */
11977 + /* signify in crp->etype */
11978 + sc->sc_chnfifo[i][j].cf_crp->crp_etype
11982 + continue; /* free entry */
11983 + /* either way, notify ocf */
11984 + crypto_done(sc->sc_chnfifo[i][j].cf_crp);
11985 + /* and tag it available again
11987 + * memset to ensure correct descriptor formation by
11988 + * avoiding inadvertently setting "optional" entries
11989 + * e.g. not using "optional" dptr2 MD/HMAC processing
11991 + memset(&sc->sc_chnfifo[i][j].cf_desc,
11992 + 0, sizeof(struct talitos_desc));
11994 + spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
11996 + /* reset and initialize the SEC h/w device */
11997 + talitos_reset_device(sc);
11998 + talitos_init_device(sc);
11999 +#ifdef CONFIG_OCF_RANDOMHARVEST
12000 + if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
12001 + talitos_rng_init(sc);
12004 + /* Okay. Stand by. */
12005 + spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
12010 +/* go through all channels descriptors, notifying OCF what's been done */
12011 +static void talitos_doneprocessing(struct talitos_softc *sc)
12013 + unsigned long flags;
12016 + /* go through descriptors looking for done bits */
12017 + for (i = 0; i < sc->sc_num_channels; i++) {
12018 + spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
12019 + for (j = 0; j < sc->sc_chfifo_len; j++) {
12020 + /* descriptor has done bits set? */
12021 + if ((sc->sc_chnfifo[i][j].cf_desc.hdr
12022 + & TALITOS_HDR_DONE_BITS)
12023 + == TALITOS_HDR_DONE_BITS) {
12025 + crypto_done(sc->sc_chnfifo[i][j].cf_crp);
12026 + /* and tag it available again
12028 + * memset to ensure correct descriptor formation by
12029 + * avoiding inadvertently setting "optional" entries
12030 + * e.g. not using "optional" dptr2 MD/HMAC processing
12032 + memset(&sc->sc_chnfifo[i][j].cf_desc,
12033 + 0, sizeof(struct talitos_desc));
12036 + spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
12041 +static irqreturn_t
12042 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
12043 +talitos_intr(int irq, void *arg)
12045 +talitos_intr(int irq, void *arg, struct pt_regs *regs)
12048 + struct talitos_softc *sc = arg;
12049 + u_int32_t v, v_hi;
12052 + v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
12053 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
12054 + talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
12055 + talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
12057 + if (unlikely(v & TALITOS_ISR_ERROR)) {
12058 + /* Okay, Houston, we've had a problem here. */
12059 + printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
12060 + device_get_nameunit(sc->sc_cdev), v, v_hi);
12061 + talitos_errorprocessing(sc);
12063 + if (likely(v & TALITOS_ISR_DONE)) {
12064 + talitos_doneprocessing(sc);
12066 + return IRQ_HANDLED;
12070 + * Initialize registers we need to touch only once.
12073 +talitos_init_device(struct talitos_softc *sc)
12078 + DPRINTF("%s()\n", __FUNCTION__);
12080 + /* init all channels */
12081 + for (i = 0; i < sc->sc_num_channels; i++) {
12082 + v = talitos_read(sc->sc_base_addr +
12083 + i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
12084 + v |= TALITOS_CH_CCCR_HI_CDWE
12085 + | TALITOS_CH_CCCR_HI_CDIE; /* invoke interrupt if done */
12086 + talitos_write(sc->sc_base_addr +
12087 + i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
12089 + /* enable all interrupts */
12090 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
12091 + v |= TALITOS_IMR_ALL;
12092 + talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
12093 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
12094 + v |= TALITOS_IMR_HI_ERRONLY;
12095 + talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
12100 + * set the master reset bit on the device.
12103 +talitos_reset_device_master(struct talitos_softc *sc)
12107 + /* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
12108 + v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
12109 + talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
12111 + while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
12118 + * Resets the device. Values in the registers are left as is
12119 + * from the reset (i.e. initial values are assigned elsewhere).
12122 +talitos_reset_device(struct talitos_softc *sc)
12127 + DPRINTF("%s()\n", __FUNCTION__);
12131 + * errata documentation: warning: certain SEC interrupts
12132 + * are not fully cleared by writing the MCR:SWR bit,
12133 + * set bit twice to completely reset
12135 + talitos_reset_device_master(sc); /* once */
12136 + talitos_reset_device_master(sc); /* and once again */
12138 + /* reset all channels */
12139 + for (i = 0; i < sc->sc_num_channels; i++) {
12140 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
12141 + TALITOS_CH_CCCR);
12142 + talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
12143 + TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
12147 +/* Set up the crypto device structure, private data,
12148 + * and anything else we need before we start */
12149 +#ifdef CONFIG_PPC_MERGE
12150 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
12152 +static int talitos_probe(struct platform_device *pdev)
12155 + struct talitos_softc *sc = NULL;
12156 + struct resource *r;
12157 +#ifdef CONFIG_PPC_MERGE
12158 + struct device *device = &ofdev->dev;
12159 + struct device_node *np = ofdev->node;
12160 + const unsigned int *prop;
12162 + struct resource res;
12164 + static int num_chips = 0;
12168 + DPRINTF("%s()\n", __FUNCTION__);
12170 + sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
12173 + memset(sc, 0, sizeof(*sc));
12175 + softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
12179 +#ifndef CONFIG_PPC_MERGE
12180 + sc->sc_dev = pdev;
12182 + sc->sc_num = num_chips++;
12184 +#ifdef CONFIG_PPC_MERGE
12185 + dev_set_drvdata(device, sc);
12187 + platform_set_drvdata(sc->sc_dev, sc);
12190 + /* get the irq line */
12191 +#ifdef CONFIG_PPC_MERGE
12192 + err = of_address_to_resource(np, 0, &res);
12197 + sc->sc_irq = irq_of_parse_and_map(np, 0);
12199 + /* get a pointer to the register memory */
12200 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12202 + sc->sc_irq = platform_get_irq(pdev, 0);
12204 + rc = request_irq(sc->sc_irq, talitos_intr, 0,
12205 + device_get_nameunit(sc->sc_cdev), sc);
12207 + printk(KERN_ERR "%s: failed to hook irq %d\n",
12208 + device_get_nameunit(sc->sc_cdev), sc->sc_irq);
12213 + sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
12214 + if (!sc->sc_base_addr) {
12215 + printk(KERN_ERR "%s: failed to ioremap\n",
12216 + device_get_nameunit(sc->sc_cdev));
12220 + /* figure out our SEC's properties and capabilities */
12221 + sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
12222 + | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
12223 + DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
12225 +#ifdef CONFIG_PPC_MERGE
12226 + /* get SEC properties from device tree, defaulting to SEC 2.0 */
12228 + prop = of_get_property(np, "num-channels", NULL);
12229 + sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
12231 + prop = of_get_property(np, "channel-fifo-len", NULL);
12232 + sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
12234 + prop = of_get_property(np, "exec-units-mask", NULL);
12235 + sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
12237 + prop = of_get_property(np, "descriptor-types-mask", NULL);
12238 + sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
12240 + /* bulk should go away with openfirmware flat device tree support */
12241 + if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
12242 + sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
12243 + sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
12244 + sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
12245 + sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
12247 + printk(KERN_ERR "%s: failed to id device\n",
12248 + device_get_nameunit(sc->sc_cdev));
12253 + /* + 1 is for the meta-channel lock used by the channel scheduler */
12254 + sc->sc_chnfifolock = (spinlock_t *) kmalloc(
12255 + (sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
12256 + if (!sc->sc_chnfifolock)
12258 + for (i = 0; i < sc->sc_num_channels + 1; i++) {
12259 + spin_lock_init(&sc->sc_chnfifolock[i]);
12262 + sc->sc_chnlastalg = (int *) kmalloc(
12263 + sc->sc_num_channels * sizeof(int), GFP_KERNEL);
12264 + if (!sc->sc_chnlastalg)
12266 + memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
12268 + sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
12269 + sc->sc_num_channels * sizeof(struct desc_cryptop_pair *),
12271 + if (!sc->sc_chnfifo)
12273 + for (i = 0; i < sc->sc_num_channels; i++) {
12274 + sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
12275 + sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair),
12277 + if (!sc->sc_chnfifo[i])
12279 + memset(sc->sc_chnfifo[i], 0,
12280 + sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
12283 + /* reset and initialize the SEC h/w device */
12284 + talitos_reset_device(sc);
12285 + talitos_init_device(sc);
12287 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
12288 + if (sc->sc_cid < 0) {
12289 + printk(KERN_ERR "%s: could not get crypto driver id\n",
12290 + device_get_nameunit(sc->sc_cdev));
12294 + /* register algorithms with the framework */
12295 + printk("%s:", device_get_nameunit(sc->sc_cdev));
12297 + if (sc->sc_exec_units & TALITOS_HAS_EU_RNG) {
12299 +#ifdef CONFIG_OCF_RANDOMHARVEST
12300 + talitos_rng_init(sc);
12301 + crypto_rregister(sc->sc_cid, talitos_read_random, sc);
12304 + if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
12305 + printk(" des/3des");
12306 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
12307 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
12309 + if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
12311 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
12313 + if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
12315 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
12316 + /* HMAC support only with IPsec for now */
12317 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
12319 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
12320 + /* HMAC support only with IPsec for now */
12321 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
12327 +#ifndef CONFIG_PPC_MERGE
12328 + talitos_remove(pdev);
12333 +#ifdef CONFIG_PPC_MERGE
12334 +static int talitos_remove(struct of_device *ofdev)
12336 +static int talitos_remove(struct platform_device *pdev)
12339 +#ifdef CONFIG_PPC_MERGE
12340 + struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
12342 + struct talitos_softc *sc = platform_get_drvdata(pdev);
12346 + DPRINTF("%s()\n", __FUNCTION__);
12347 + if (sc->sc_cid >= 0)
12348 + crypto_unregister_all(sc->sc_cid);
12349 + if (sc->sc_chnfifo) {
12350 + for (i = 0; i < sc->sc_num_channels; i++)
12351 + if (sc->sc_chnfifo[i])
12352 + kfree(sc->sc_chnfifo[i]);
12353 + kfree(sc->sc_chnfifo);
12355 + if (sc->sc_chnlastalg)
12356 + kfree(sc->sc_chnlastalg);
12357 + if (sc->sc_chnfifolock)
12358 + kfree(sc->sc_chnfifolock);
12359 + if (sc->sc_irq != -1)
12360 + free_irq(sc->sc_irq, sc);
12361 + if (sc->sc_base_addr)
12362 + iounmap((void *) sc->sc_base_addr);
12367 +#ifdef CONFIG_PPC_MERGE
12368 +static struct of_device_id talitos_match[] = {
12370 + .type = "crypto",
12371 + .compatible = "talitos",
12376 +MODULE_DEVICE_TABLE(of, talitos_match);
12378 +static struct of_platform_driver talitos_driver = {
12379 + .name = DRV_NAME,
12380 + .match_table = talitos_match,
12381 + .probe = talitos_probe,
12382 + .remove = talitos_remove,
12385 +static int __init talitos_init(void)
12387 + return of_register_platform_driver(&talitos_driver);
12390 +static void __exit talitos_exit(void)
12392 + of_unregister_platform_driver(&talitos_driver);
12395 +/* Structure for a platform device driver */
12396 +static struct platform_driver talitos_driver = {
12397 + .probe = talitos_probe,
12398 + .remove = talitos_remove,
12400 + .name = "fsl-sec2",
12404 +static int __init talitos_init(void)
12406 + return platform_driver_register(&talitos_driver);
12409 +static void __exit talitos_exit(void)
12411 + platform_driver_unregister(&talitos_driver);
12415 +module_init(talitos_init);
12416 +module_exit(talitos_exit);
12418 +MODULE_LICENSE("Dual BSD/GPL");
12419 +MODULE_AUTHOR("kim.phillips@freescale.com");
12420 +MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
12422 +++ b/crypto/ocf/talitos/talitos_soft.h
12425 + * Freescale SEC data structures for integration with ocf-linux
12427 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
12429 + * Redistribution and use in source and binary forms, with or without
12430 + * modification, are permitted provided that the following conditions
12433 + * 1. Redistributions of source code must retain the above copyright
12434 + * notice, this list of conditions and the following disclaimer.
12435 + * 2. Redistributions in binary form must reproduce the above copyright
12436 + * notice, this list of conditions and the following disclaimer in the
12437 + * documentation and/or other materials provided with the distribution.
12438 + * 3. The name of the author may not be used to endorse or promote products
12439 + * derived from this software without specific prior written permission.
12441 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12442 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12443 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12444 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12445 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12446 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12447 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12448 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12449 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12450 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12454 + * paired descriptor and associated crypto operation
12456 +struct desc_cryptop_pair {
12457 + struct talitos_desc cf_desc; /* descriptor ptr */
12458 + struct cryptop *cf_crp; /* cryptop ptr */
12462 + * Holds data specific to a single talitos device.
12464 +struct talitos_softc {
12465 + softc_device_decl sc_cdev;
12466 + struct platform_device *sc_dev; /* device backpointer */
12467 + ocf_iomem_t sc_base_addr;
12469 + int sc_num; /* if we have multiple chips */
12470 + int32_t sc_cid; /* crypto tag */
12471 + u64 sc_chiprev; /* major/minor chip revision */
12472 + int sc_nsessions;
12473 + struct talitos_session *sc_sessions;
12474 + int sc_num_channels;/* number of crypto channels */
12475 + int sc_chfifo_len; /* channel fetch fifo len */
12476 + int sc_exec_units; /* execution units mask */
12477 + int sc_desc_types; /* descriptor types mask */
12479 + * mutual exclusion for intra-channel resources, e.g. fetch fifos
12480 + * the last entry is a meta-channel lock used by the channel scheduler
12482 + spinlock_t *sc_chnfifolock;
12483 + /* sc_chnlastalgo contains last algorithm for that channel */
12484 + int *sc_chnlastalg;
12485 + /* sc_chnfifo holds pending descriptor--crypto operation pairs */
12486 + struct desc_cryptop_pair **sc_chnfifo;
12489 +struct talitos_session {
12490 + u_int32_t ses_used;
12491 + u_int32_t ses_klen; /* key length in bits */
12492 + u_int32_t ses_key[8]; /* DES/3DES/AES key */
12493 + u_int32_t ses_hmac[5]; /* hmac inner state */
12494 + u_int32_t ses_hmac_len; /* hmac length */
12495 + u_int32_t ses_iv[4]; /* DES/3DES/AES iv */
12496 + u_int32_t ses_mlen; /* desired hash result len (12=ipsec or 16) */
12499 +#define TALITOS_SESSION(sid) ((sid) & 0x0fffffff)
12500 +#define TALITOS_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
12502 +++ b/crypto/ocf/talitos/talitos_dev.h
12505 + * Freescale SEC (talitos) device dependent data structures
12507 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
12509 + * Redistribution and use in source and binary forms, with or without
12510 + * modification, are permitted provided that the following conditions
12513 + * 1. Redistributions of source code must retain the above copyright
12514 + * notice, this list of conditions and the following disclaimer.
12515 + * 2. Redistributions in binary form must reproduce the above copyright
12516 + * notice, this list of conditions and the following disclaimer in the
12517 + * documentation and/or other materials provided with the distribution.
12518 + * 3. The name of the author may not be used to endorse or promote products
12519 + * derived from this software without specific prior written permission.
12521 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12522 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12523 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12524 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12525 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12526 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12527 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12528 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12529 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12530 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12534 +/* device ID register values */
12535 +#define TALITOS_ID_SEC_2_0 0x40
12536 +#define TALITOS_ID_SEC_2_1 0x40 /* cross ref with IP block revision reg */
12539 + * following num_channels, channel-fifo-depth, exec-unit-mask, and
12540 + * descriptor-types-mask are for forward-compatibility with openfirmware
12541 + * flat device trees
12545 + * num_channels : the number of channels available in each SEC version.
12548 +/* n.b. this driver requires these values be a power of 2 */
12549 +#define TALITOS_NCHANNELS_SEC_1_0 4
12550 +#define TALITOS_NCHANNELS_SEC_1_2 1
12551 +#define TALITOS_NCHANNELS_SEC_2_0 4
12552 +#define TALITOS_NCHANNELS_SEC_2_01 4
12553 +#define TALITOS_NCHANNELS_SEC_2_1 4
12554 +#define TALITOS_NCHANNELS_SEC_2_4 4
12557 + * channel-fifo-depth : The number of descriptor
12558 + * pointers a channel fetch fifo can hold.
12560 +#define TALITOS_CHFIFOLEN_SEC_1_0 1
12561 +#define TALITOS_CHFIFOLEN_SEC_1_2 1
12562 +#define TALITOS_CHFIFOLEN_SEC_2_0 24
12563 +#define TALITOS_CHFIFOLEN_SEC_2_01 24
12564 +#define TALITOS_CHFIFOLEN_SEC_2_1 24
12565 +#define TALITOS_CHFIFOLEN_SEC_2_4 24
12568 + * exec-unit-mask : The bitmask representing what Execution Units (EUs)
12569 + * are available. EU information should be encoded following the SEC's
12570 + * EU_SEL0 bitfield documentation, i.e. as follows:
12572 + * bit 31 = set if SEC permits no-EU selection (should be always set)
12573 + * bit 30 = set if SEC has the ARC4 EU (AFEU)
12574 + * bit 29 = set if SEC has the des/3des EU (DEU)
12575 + * bit 28 = set if SEC has the message digest EU (MDEU)
12576 + * bit 27 = set if SEC has the random number generator EU (RNG)
12577 + * bit 26 = set if SEC has the public key EU (PKEU)
12578 + * bit 25 = set if SEC has the aes EU (AESU)
12579 + * bit 24 = set if SEC has the Kasumi EU (KEU)
12582 +#define TALITOS_HAS_EU_NONE (1<<0)
12583 +#define TALITOS_HAS_EU_AFEU (1<<1)
12584 +#define TALITOS_HAS_EU_DEU (1<<2)
12585 +#define TALITOS_HAS_EU_MDEU (1<<3)
12586 +#define TALITOS_HAS_EU_RNG (1<<4)
12587 +#define TALITOS_HAS_EU_PKEU (1<<5)
12588 +#define TALITOS_HAS_EU_AESU (1<<6)
12589 +#define TALITOS_HAS_EU_KEU (1<<7)
12591 +/* the corresponding masks for each SEC version */
12592 +#define TALITOS_HAS_EUS_SEC_1_0 0x7f
12593 +#define TALITOS_HAS_EUS_SEC_1_2 0x4d
12594 +#define TALITOS_HAS_EUS_SEC_2_0 0x7f
12595 +#define TALITOS_HAS_EUS_SEC_2_01 0x7f
12596 +#define TALITOS_HAS_EUS_SEC_2_1 0xff
12597 +#define TALITOS_HAS_EUS_SEC_2_4 0x7f
12600 + * descriptor-types-mask : The bitmask representing what descriptors
12601 + * are available. Descriptor type information should be encoded
12602 + * following the SEC's Descriptor Header Dword DESC_TYPE field
12603 + * documentation, i.e. as follows:
12605 + * bit 0 = set if SEC supports the aesu_ctr_nonsnoop desc. type
12606 + * bit 1 = set if SEC supports the ipsec_esp descriptor type
12607 + * bit 2 = set if SEC supports the common_nonsnoop desc. type
12608 + * bit 3 = set if SEC supports the 802.11i AES ccmp desc. type
12609 + * bit 4 = set if SEC supports the hmac_snoop_no_afeu desc. type
12610 + * bit 5 = set if SEC supports the srtp descriptor type
12611 + * bit 6 = set if SEC supports the non_hmac_snoop_no_afeu desc.type
12612 + * bit 7 = set if SEC supports the pkeu_assemble descriptor type
12613 + * bit 8 = set if SEC supports the aesu_key_expand_output desc.type
12614 + * bit 9 = set if SEC supports the pkeu_ptmul descriptor type
12615 + * bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
12616 + * bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
12618 + * ..and so on and so forth.
12620 +#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP (1<<0)
12621 +#define TALITOS_HAS_DT_IPSEC_ESP (1<<1)
12622 +#define TALITOS_HAS_DT_COMMON_NONSNOOP (1<<2)
12624 +/* the corresponding masks for each SEC version */
12625 +#define TALITOS_HAS_DESCTYPES_SEC_2_0 0x01010ebf
12626 +#define TALITOS_HAS_DESCTYPES_SEC_2_1 0x012b0ebf
12629 + * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
12632 +/* global register offset addresses */
12633 +#define TALITOS_ID 0x1020
12634 +#define TALITOS_ID_HI 0x1024
12635 +#define TALITOS_MCR 0x1030 /* master control register */
12636 +#define TALITOS_MCR_HI 0x1038 /* master control register */
12637 +#define TALITOS_MCR_SWR 0x1
12638 +#define TALITOS_IMR 0x1008 /* interrupt mask register */
12639 +#define TALITOS_IMR_ALL 0x00010fff /* enable all interrupts mask */
12640 +#define TALITOS_IMR_ERRONLY 0x00010aaa /* enable error interrupts */
12641 +#define TALITOS_IMR_HI 0x100C /* interrupt mask register */
12642 +#define TALITOS_IMR_HI_ALL 0x00323333 /* enable all interrupts mask */
12643 +#define TALITOS_IMR_HI_ERRONLY 0x00222222 /* enable error interrupts */
12644 +#define TALITOS_ISR 0x1010 /* interrupt status register */
12645 +#define TALITOS_ISR_ERROR 0x00010faa /* errors mask */
12646 +#define TALITOS_ISR_DONE 0x00000055 /* channel(s) done mask */
12647 +#define TALITOS_ISR_HI 0x1014 /* interrupt status register */
12648 +#define TALITOS_ICR 0x1018 /* interrupt clear register */
12649 +#define TALITOS_ICR_HI 0x101C /* interrupt clear register */
12651 +/* channel register address stride */
12652 +#define TALITOS_CH_OFFSET 0x100
12654 +/* channel register offset addresses and bits */
12655 +#define TALITOS_CH_CCCR 0x1108 /* Crypto-Channel Config Register */
12656 +#define TALITOS_CH_CCCR_RESET 0x1 /* Channel Reset bit */
12657 +#define TALITOS_CH_CCCR_HI 0x110c /* Crypto-Channel Config Register */
12658 +#define TALITOS_CH_CCCR_HI_CDWE 0x10 /* Channel done writeback enable bit */
12659 +#define TALITOS_CH_CCCR_HI_NT 0x4 /* Notification type bit */
12660 +#define TALITOS_CH_CCCR_HI_CDIE 0x2 /* Channel Done Interrupt Enable bit */
12661 +#define TALITOS_CH_CCPSR 0x1110 /* Crypto-Channel Pointer Status Reg */
12662 +#define TALITOS_CH_CCPSR_HI 0x1114 /* Crypto-Channel Pointer Status Reg */
12663 +#define TALITOS_CH_FF 0x1148 /* Fetch FIFO */
12664 +#define TALITOS_CH_FF_HI 0x114c /* Fetch FIFO's FETCH_ADRS */
12665 +#define TALITOS_CH_CDPR 0x1140 /* Crypto-Channel Pointer Status Reg */
12666 +#define TALITOS_CH_CDPR_HI 0x1144 /* Crypto-Channel Pointer Status Reg */
12667 +#define TALITOS_CH_DESCBUF 0x1180 /* (thru 11bf) Crypto-Channel
12668 + * Descriptor Buffer (debug) */
12670 +/* execution unit register offset addresses and bits */
12671 +#define TALITOS_DEUSR 0x2028 /* DEU status register */
12672 +#define TALITOS_DEUSR_HI 0x202c /* DEU status register */
12673 +#define TALITOS_DEUISR 0x2030 /* DEU interrupt status register */
12674 +#define TALITOS_DEUISR_HI 0x2034 /* DEU interrupt status register */
12675 +#define TALITOS_DEUICR 0x2038 /* DEU interrupt control register */
12676 +#define TALITOS_DEUICR_HI 0x203c /* DEU interrupt control register */
12677 +#define TALITOS_AESUISR 0x4030 /* AESU interrupt status register */
12678 +#define TALITOS_AESUISR_HI 0x4034 /* AESU interrupt status register */
12679 +#define TALITOS_AESUICR 0x4038 /* AESU interrupt control register */
12680 +#define TALITOS_AESUICR_HI 0x403c /* AESU interrupt control register */
12681 +#define TALITOS_MDEUISR 0x6030 /* MDEU interrupt status register */
12682 +#define TALITOS_MDEUISR_HI 0x6034 /* MDEU interrupt status register */
12683 +#define TALITOS_RNGSR 0xa028 /* RNG status register */
12684 +#define TALITOS_RNGSR_HI 0xa02c /* RNG status register */
12685 +#define TALITOS_RNGSR_HI_RD 0x1 /* RNG Reset done */
12686 +#define TALITOS_RNGSR_HI_OFL 0xff0000/* number of dwords in RNG output FIFO*/
12687 +#define TALITOS_RNGDSR 0xa010 /* RNG data size register */
12688 +#define TALITOS_RNGDSR_HI 0xa014 /* RNG data size register */
12689 +#define TALITOS_RNG_FIFO 0xa800 /* RNG FIFO - pool of random numbers */
12690 +#define TALITOS_RNGISR 0xa030 /* RNG Interrupt status register */
12691 +#define TALITOS_RNGISR_HI 0xa034 /* RNG Interrupt status register */
12692 +#define TALITOS_RNGRCR 0xa018 /* RNG Reset control register */
12693 +#define TALITOS_RNGRCR_HI 0xa01c /* RNG Reset control register */
12694 +#define TALITOS_RNGRCR_HI_SR 0x1 /* RNG RNGRCR:Software Reset */
12696 +/* descriptor pointer entry */
12697 +struct talitos_desc_ptr {
12698 + u16 len; /* length */
12699 + u8 extent; /* jump (to s/g link table) and extent */
12700 + u8 res; /* reserved */
12701 + u32 ptr; /* pointer */
12705 +struct talitos_desc {
12706 + u32 hdr; /* header */
12707 + u32 res; /* reserved */
12708 + struct talitos_desc_ptr ptr[7]; /* ptr/len pair array */
12711 +/* talitos descriptor header (hdr) bits */
12713 +/* primary execution unit select */
12714 +#define TALITOS_SEL0_AFEU 0x10000000
12715 +#define TALITOS_SEL0_DEU 0x20000000
12716 +#define TALITOS_SEL0_MDEU 0x30000000
12717 +#define TALITOS_SEL0_RNG 0x40000000
12718 +#define TALITOS_SEL0_PKEU 0x50000000
12719 +#define TALITOS_SEL0_AESU 0x60000000
12721 +/* primary execution unit mode (MODE0) and derivatives */
12722 +#define TALITOS_MODE0_AESU_CBC 0x00200000
12723 +#define TALITOS_MODE0_AESU_ENC 0x00100000
12724 +#define TALITOS_MODE0_DEU_CBC 0x00400000
12725 +#define TALITOS_MODE0_DEU_3DES 0x00200000
12726 +#define TALITOS_MODE0_DEU_ENC 0x00100000
12727 +#define TALITOS_MODE0_MDEU_INIT 0x01000000 /* init starting regs */
12728 +#define TALITOS_MODE0_MDEU_HMAC 0x00800000
12729 +#define TALITOS_MODE0_MDEU_PAD 0x00400000 /* PD */
12730 +#define TALITOS_MODE0_MDEU_MD5 0x00200000
12731 +#define TALITOS_MODE0_MDEU_SHA256 0x00100000
12732 +#define TALITOS_MODE0_MDEU_SHA1 0x00000000 /* SHA-160 */
12733 +#define TALITOS_MODE0_MDEU_MD5_HMAC \
12734 + (TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
12735 +#define TALITOS_MODE0_MDEU_SHA256_HMAC \
12736 + (TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
12737 +#define TALITOS_MODE0_MDEU_SHA1_HMAC \
12738 + (TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
12740 +/* secondary execution unit select (SEL1) */
12741 +/* it's MDEU or nothing */
12742 +#define TALITOS_SEL1_MDEU 0x00030000
12744 +/* secondary execution unit mode (MODE1) and derivatives */
12745 +#define TALITOS_MODE1_MDEU_INIT 0x00001000 /* init starting regs */
12746 +#define TALITOS_MODE1_MDEU_HMAC 0x00000800
12747 +#define TALITOS_MODE1_MDEU_PAD 0x00000400 /* PD */
12748 +#define TALITOS_MODE1_MDEU_MD5 0x00000200
12749 +#define TALITOS_MODE1_MDEU_SHA256 0x00000100
12750 +#define TALITOS_MODE1_MDEU_SHA1 0x00000000 /* SHA-160 */
12751 +#define TALITOS_MODE1_MDEU_MD5_HMAC \
12752 + (TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
12753 +#define TALITOS_MODE1_MDEU_SHA256_HMAC \
12754 + (TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
12755 +#define TALITOS_MODE1_MDEU_SHA1_HMAC \
12756 + (TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
12758 +/* direction of overall data flow (DIR) */
12759 +#define TALITOS_DIR_OUTBOUND 0x00000000
12760 +#define TALITOS_DIR_INBOUND 0x00000002
12762 +/* done notification (DN) */
12763 +#define TALITOS_DONE_NOTIFY 0x00000001
12765 +/* descriptor types */
12766 +/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
12767 +#define TD_TYPE_AESU_CTR_NONSNOOP (0 << 3)
12768 +#define TD_TYPE_IPSEC_ESP (1 << 3)
12769 +#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU (2 << 3)
12770 +#define TD_TYPE_HMAC_SNOOP_NO_AFEU (4 << 3)
12772 +#define TALITOS_HDR_DONE_BITS 0xff000000
12774 +#define DPRINTF(a...) do { \
12776 + printk("%s: ", sc ? \
12777 + device_get_nameunit(sc->sc_cdev) : "talitos"); \
12782 +++ b/crypto/ocf/random.c
12785 + * A system independant way of adding entropy to the kernels pool
12786 + * this way the drivers can focus on the real work and we can take
12787 + * care of pushing it to the appropriate place in the kernel.
12789 + * This should be fast and callable from timers/interrupts
12791 + * Written by David McCullough <david_mccullough@securecomputing.com>
12792 + * Copyright (C) 2006-2007 David McCullough
12793 + * Copyright (C) 2004-2005 Intel Corporation.
12797 + * The free distribution and use of this software in both source and binary
12798 + * form is allowed (with or without changes) provided that:
12800 + * 1. distributions of this source code include the above copyright
12801 + * notice, this list of conditions and the following disclaimer;
12803 + * 2. distributions in binary form include the above copyright
12804 + * notice, this list of conditions and the following disclaimer
12805 + * in the documentation and/or other associated materials;
12807 + * 3. the copyright holder's name is not used to endorse products
12808 + * built using this software without specific written permission.
12810 + * ALTERNATIVELY, provided that this notice is retained in full, this product
12811 + * may be distributed under the terms of the GNU General Public License (GPL),
12812 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
12816 + * This software is provided 'as is' with no explicit or implied warranties
12817 + * in respect of its properties, including, but not limited to, correctness
12818 + * and/or fitness for purpose.
12821 +#ifndef AUTOCONF_INCLUDED
12822 +#include <linux/config.h>
12824 +#include <linux/module.h>
12825 +#include <linux/init.h>
12826 +#include <linux/list.h>
12827 +#include <linux/slab.h>
12828 +#include <linux/wait.h>
12829 +#include <linux/sched.h>
12830 +#include <linux/spinlock.h>
12831 +#include <linux/version.h>
12832 +#include <linux/unistd.h>
12833 +#include <linux/poll.h>
12834 +#include <linux/random.h>
12835 +#include <cryptodev.h>
12837 +#ifdef CONFIG_OCF_FIPS
12838 +#include "rndtest.h"
12841 +#ifndef HAS_RANDOM_INPUT_WAIT
12842 +#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
12846 + * a hack to access the debug levels from the crypto driver
12848 +extern int crypto_debug;
12849 +#define debug crypto_debug
12852 + * a list of all registered random providers
12854 +static LIST_HEAD(random_ops);
12855 +static int started = 0;
12856 +static int initted = 0;
12858 +struct random_op {
12859 + struct list_head random_list;
12860 + u_int32_t driverid;
12861 + int (*read_random)(void *arg, u_int32_t *buf, int len);
12865 +static int random_proc(void *arg);
12867 +static pid_t randomproc = (pid_t) -1;
12868 +static spinlock_t random_lock;
12871 + * just init the spin locks
12874 +crypto_random_init(void)
12876 + spin_lock_init(&random_lock);
12882 + * Add the given random reader to our list (if not present)
12883 + * and start the thread (if not already started)
12885 + * we have to assume that driver id is ok for now
12889 + u_int32_t driverid,
12890 + int (*read_random)(void *arg, u_int32_t *buf, int len),
12893 + unsigned long flags;
12895 + struct random_op *rops, *tmp;
12897 + dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
12898 + __FUNCTION__, driverid, read_random, arg);
12901 + crypto_random_init();
12904 + struct cryptocap *cap;
12906 + cap = crypto_checkdriver(driverid);
12911 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12912 + if (rops->driverid == driverid && rops->read_random == read_random)
12916 + rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
12920 + rops->driverid = driverid;
12921 + rops->read_random = read_random;
12924 + spin_lock_irqsave(&random_lock, flags);
12925 + list_add_tail(&rops->random_list, &random_ops);
12927 + randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
12928 + if (randomproc < 0) {
12929 + ret = randomproc;
12930 + printk("crypto: crypto_rregister cannot start random thread; "
12931 + "error %d", ret);
12935 + spin_unlock_irqrestore(&random_lock, flags);
12939 +EXPORT_SYMBOL(crypto_rregister);
12942 +crypto_runregister_all(u_int32_t driverid)
12944 + struct random_op *rops, *tmp;
12945 + unsigned long flags;
12947 + dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
12949 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12950 + if (rops->driverid == driverid) {
12951 + list_del(&rops->random_list);
12956 + spin_lock_irqsave(&random_lock, flags);
12957 + if (list_empty(&random_ops) && started)
12958 + kill_proc(randomproc, SIGKILL, 1);
12959 + spin_unlock_irqrestore(&random_lock, flags);
12962 +EXPORT_SYMBOL(crypto_runregister_all);
12965 + * while we can add entropy to random.c continue to read random data from
12966 + * the drivers and push it to random.
12969 +random_proc(void *arg)
12977 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
12979 + spin_lock_irq(¤t->sigmask_lock);
12980 + sigemptyset(¤t->blocked);
12981 + recalc_sigpending(current);
12982 + spin_unlock_irq(¤t->sigmask_lock);
12983 + sprintf(current->comm, "ocf-random");
12985 + daemonize("ocf-random");
12986 + allow_signal(SIGKILL);
12990 + set_fs(get_ds());
12992 +#ifdef CONFIG_OCF_FIPS
12993 +#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
12995 +#define NUM_INT 32
12999 + * some devices can transferr their RNG data direct into memory,
13000 + * so make sure it is device friendly
13002 + buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
13003 + if (NULL == buf) {
13004 + printk("crypto: RNG could not allocate memory\n");
13005 + retval = -ENOMEM;
13009 + wantcnt = NUM_INT; /* start by adding some entropy */
13012 + * its possible due to errors or driver removal that we no longer
13013 + * have anything to do, if so exit or we will consume all the CPU
13016 + while (!list_empty(&random_ops)) {
13017 + struct random_op *rops, *tmp;
13019 +#ifdef CONFIG_OCF_FIPS
13021 + wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
13024 + /* see if we can get enough entropy to make the world
13025 + * a better place.
13027 + while (bufcnt < wantcnt && bufcnt < NUM_INT) {
13028 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
13030 + n = (*rops->read_random)(rops->arg, &buf[bufcnt],
13031 + NUM_INT - bufcnt);
13033 + /* on failure remove the random number generator */
13035 + list_del(&rops->random_list);
13036 + printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
13039 + } else if (n > 0)
13042 + /* give up CPU for a bit, just in case as this is a loop */
13047 +#ifdef CONFIG_OCF_FIPS
13048 + if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
13049 + dprintk("crypto: buffer had fips errors, discarding\n");
13055 + * if we have a certified buffer, we can send some data
13056 + * to /dev/random and move along
13058 + if (bufcnt > 0) {
13059 + /* add what we have */
13060 + random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
13064 + /* give up CPU for a bit so we don't hog while filling */
13067 + /* wait for needing more */
13068 + wantcnt = random_input_wait();
13070 + if (wantcnt <= 0)
13071 + wantcnt = 0; /* try to get some info again */
13073 + /* round up to one word or we can loop forever */
13074 + wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
13075 + if (wantcnt > NUM_INT) {
13076 + wantcnt = NUM_INT;
13079 + if (signal_pending(current)) {
13080 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13081 + spin_lock_irq(¤t->sigmask_lock);
13083 + flush_signals(current);
13084 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13085 + spin_unlock_irq(¤t->sigmask_lock);
13093 + spin_lock_irq(&random_lock);
13094 + randomproc = (pid_t) -1;
13096 + spin_unlock_irq(&random_lock);
13102 +++ b/crypto/ocf/ocf-bench.c
13105 + * A loadable module that benchmarks the OCF crypto speed from kernel space.
13107 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
13111 + * The free distribution and use of this software in both source and binary
13112 + * form is allowed (with or without changes) provided that:
13114 + * 1. distributions of this source code include the above copyright
13115 + * notice, this list of conditions and the following disclaimer;
13117 + * 2. distributions in binary form include the above copyright
13118 + * notice, this list of conditions and the following disclaimer
13119 + * in the documentation and/or other associated materials;
13121 + * 3. the copyright holder's name is not used to endorse products
13122 + * built using this software without specific written permission.
13124 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13125 + * may be distributed under the terms of the GNU General Public License (GPL),
13126 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13130 + * This software is provided 'as is' with no explicit or implied warranties
13131 + * in respect of its properties, including, but not limited to, correctness
13132 + * and/or fitness for purpose.
13136 +#ifndef AUTOCONF_INCLUDED
13137 +#include <linux/config.h>
13139 +#include <linux/module.h>
13140 +#include <linux/init.h>
13141 +#include <linux/list.h>
13142 +#include <linux/slab.h>
13143 +#include <linux/wait.h>
13144 +#include <linux/sched.h>
13145 +#include <linux/spinlock.h>
13146 +#include <linux/version.h>
13147 +#include <linux/interrupt.h>
13148 +#include <cryptodev.h>
13150 +#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
13151 +#define BENCH_IXP_ACCESS_LIB 1
13153 +#ifdef BENCH_IXP_ACCESS_LIB
13154 +#include <IxTypes.h>
13155 +#include <IxOsBuffMgt.h>
13156 +#include <IxNpeDl.h>
13157 +#include <IxCryptoAcc.h>
13158 +#include <IxQMgr.h>
13159 +#include <IxOsServices.h>
13160 +#include <IxOsCacheMMU.h>
13164 + * support for access lib version 1.4
13166 +#ifndef IX_MBUF_PRIV
13167 +#define IX_MBUF_PRIV(x) ((x)->priv)
13171 + * the number of simultaneously active requests
13173 +static int request_q_len = 20;
13174 +module_param(request_q_len, int, 0);
13175 +MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
13177 + * how many requests we want to have processed
13179 +static int request_num = 1024;
13180 +module_param(request_num, int, 0);
13181 +MODULE_PARM_DESC(request_num, "run for at least this many requests");
13183 + * the size of each request
13185 +static int request_size = 1500;
13186 +module_param(request_size, int, 0);
13187 +MODULE_PARM_DESC(request_size, "size of each request");
13190 + * a structure for each request
13193 + struct work_struct work;
13194 +#ifdef BENCH_IXP_ACCESS_LIB
13197 + unsigned char *buffer;
13200 +static request_t *requests;
13202 +static int outstanding;
13205 +/*************************************************************************/
13207 + * OCF benchmark routines
13210 +static uint64_t ocf_cryptoid;
13211 +static int ocf_init(void);
13212 +static int ocf_cb(struct cryptop *crp);
13213 +static void ocf_request(void *arg);
13214 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13215 +static void ocf_request_wq(struct work_struct *work);
13222 + struct cryptoini crie, cria;
13223 + struct cryptodesc crda, crde;
13225 + memset(&crie, 0, sizeof(crie));
13226 + memset(&cria, 0, sizeof(cria));
13227 + memset(&crde, 0, sizeof(crde));
13228 + memset(&crda, 0, sizeof(crda));
13230 + cria.cri_alg = CRYPTO_SHA1_HMAC;
13231 + cria.cri_klen = 20 * 8;
13232 + cria.cri_key = "0123456789abcdefghij";
13234 + crie.cri_alg = CRYPTO_3DES_CBC;
13235 + crie.cri_klen = 24 * 8;
13236 + crie.cri_key = "0123456789abcdefghijklmn";
13238 + crie.cri_next = &cria;
13240 + error = crypto_newsession(&ocf_cryptoid, &crie, 0);
13242 + printk("crypto_newsession failed %d\n", error);
13249 +ocf_cb(struct cryptop *crp)
13251 + request_t *r = (request_t *) crp->crp_opaque;
13253 + if (crp->crp_etype)
13254 + printk("Error in OCF processing: %d\n", crp->crp_etype);
13256 + crypto_freereq(crp);
13259 + if (total > request_num) {
13264 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13265 + INIT_WORK(&r->work, ocf_request_wq);
13267 + INIT_WORK(&r->work, ocf_request, r);
13269 + schedule_work(&r->work);
13275 +ocf_request(void *arg)
13277 + request_t *r = arg;
13278 + struct cryptop *crp = crypto_getreq(2);
13279 + struct cryptodesc *crde, *crda;
13286 + crde = crp->crp_desc;
13287 + crda = crde->crd_next;
13289 + crda->crd_skip = 0;
13290 + crda->crd_flags = 0;
13291 + crda->crd_len = request_size;
13292 + crda->crd_inject = request_size;
13293 + crda->crd_alg = CRYPTO_SHA1_HMAC;
13294 + crda->crd_key = "0123456789abcdefghij";
13295 + crda->crd_klen = 20 * 8;
13297 + crde->crd_skip = 0;
13298 + crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
13299 + crde->crd_len = request_size;
13300 + crde->crd_inject = request_size;
13301 + crde->crd_alg = CRYPTO_3DES_CBC;
13302 + crde->crd_key = "0123456789abcdefghijklmn";
13303 + crde->crd_klen = 24 * 8;
13305 + crp->crp_ilen = request_size + 64;
13306 + crp->crp_flags = CRYPTO_F_CBIMM;
13307 + crp->crp_buf = (caddr_t) r->buffer;
13308 + crp->crp_callback = ocf_cb;
13309 + crp->crp_sid = ocf_cryptoid;
13310 + crp->crp_opaque = (caddr_t) r;
13311 + crypto_dispatch(crp);
13314 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13316 +ocf_request_wq(struct work_struct *work)
13318 + request_t *r = container_of(work, request_t, work);
13323 +/*************************************************************************/
13324 +#ifdef BENCH_IXP_ACCESS_LIB
13325 +/*************************************************************************/
13327 + * CryptoAcc benchmark routines
13330 +static IxCryptoAccCtx ixp_ctx;
13331 +static UINT32 ixp_ctx_id;
13332 +static IX_MBUF ixp_pri;
13333 +static IX_MBUF ixp_sec;
13334 +static int ixp_registered = 0;
13336 +static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
13337 + IxCryptoAccStatus status);
13338 +static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
13339 + IxCryptoAccStatus status);
13340 +static void ixp_request(void *arg);
13341 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13342 +static void ixp_request_wq(struct work_struct *work);
13348 + IxCryptoAccStatus status;
13350 + ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13351 + ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13352 + ixp_ctx.cipherCtx.cipherKeyLen = 24;
13353 + ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13354 + ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
13355 + memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
13357 + ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13358 + ixp_ctx.authCtx.authDigestLen = 12;
13359 + ixp_ctx.authCtx.aadLen = 0;
13360 + ixp_ctx.authCtx.authKeyLen = 20;
13361 + memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
13363 + ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13364 + ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
13366 + IX_MBUF_MLEN(&ixp_pri) = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
13367 + IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13368 + IX_MBUF_MLEN(&ixp_sec) = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
13369 + IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13371 + status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
13372 + ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
13374 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
13375 + while (!ixp_registered)
13377 + return ixp_registered < 0 ? -1 : 0;
13380 + printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
13385 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
13388 + IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
13389 + kfree(IX_MBUF_MDATA(bufp));
13390 + IX_MBUF_MDATA(bufp) = NULL;
13393 + if (IX_CRYPTO_ACC_STATUS_WAIT == status)
13395 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
13396 + ixp_registered = 1;
13398 + ixp_registered = -1;
13406 + IxCryptoAccStatus status)
13408 + request_t *r = NULL;
13411 + if (total > request_num) {
13416 + if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
13417 + printk("crappo %p %p\n", sbufp, r);
13422 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13423 + INIT_WORK(&r->work, ixp_request_wq);
13425 + INIT_WORK(&r->work, ixp_request, r);
13427 + schedule_work(&r->work);
13431 +ixp_request(void *arg)
13433 + request_t *r = arg;
13434 + IxCryptoAccStatus status;
13436 + memset(&r->mbuf, 0, sizeof(r->mbuf));
13437 + IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
13438 + IX_MBUF_MDATA(&r->mbuf) = r->buffer;
13439 + IX_MBUF_PRIV(&r->mbuf) = r;
13440 + status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
13441 + 0, request_size, 0, request_size, request_size, r->buffer);
13442 + if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
13443 + printk("status1 = %d\n", status);
13450 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13452 +ixp_request_wq(struct work_struct *work)
13454 + request_t *r = container_of(work, request_t, work);
13459 +/*************************************************************************/
13460 +#endif /* BENCH_IXP_ACCESS_LIB */
13461 +/*************************************************************************/
13464 +ocfbench_init(void)
13466 + int i, jstart, jstop;
13468 + printk("Crypto Speed tests\n");
13470 + requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
13472 + printk("malloc failed\n");
13476 + for (i = 0; i < request_q_len; i++) {
13477 + /* +64 for return data */
13478 + requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
13479 + if (!requests[i].buffer) {
13480 + printk("malloc failed\n");
13483 + memset(requests[i].buffer, '0' + i, request_size + 128);
13489 + printk("OCF: testing ...\n");
13491 + total = outstanding = 0;
13492 + jstart = jiffies;
13493 + for (i = 0; i < request_q_len; i++) {
13495 + ocf_request(&requests[i]);
13497 + while (outstanding > 0)
13501 + printk("OCF: %d requests of %d bytes in %d jiffies\n", total, request_size,
13504 +#ifdef BENCH_IXP_ACCESS_LIB
13508 + printk("IXP: testing ...\n");
13510 + total = outstanding = 0;
13511 + jstart = jiffies;
13512 + for (i = 0; i < request_q_len; i++) {
13514 + ixp_request(&requests[i]);
13516 + while (outstanding > 0)
13520 + printk("IXP: %d requests of %d bytes in %d jiffies\n", total, request_size,
13522 +#endif /* BENCH_IXP_ACCESS_LIB */
13524 + for (i = 0; i < request_q_len; i++)
13525 + kfree(requests[i].buffer);
13527 + return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
13530 +static void __exit ocfbench_exit(void)
13534 +module_init(ocfbench_init);
13535 +module_exit(ocfbench_exit);
13537 +MODULE_LICENSE("BSD");
13538 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
13539 +MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
13541 +++ b/crypto/ocf/ixp4xx/ixp4xx.c
13544 + * An OCF module that uses Intels IXP CryptACC API to do the crypto.
13545 + * This driver requires the IXP400 Access Library that is available
13546 + * from Intel in order to operate (or compile).
13548 + * Written by David McCullough <david_mccullough@securecomputing.com>
13549 + * Copyright (C) 2006-2007 David McCullough
13550 + * Copyright (C) 2004-2005 Intel Corporation.
13554 + * The free distribution and use of this software in both source and binary
13555 + * form is allowed (with or without changes) provided that:
13557 + * 1. distributions of this source code include the above copyright
13558 + * notice, this list of conditions and the following disclaimer;
13560 + * 2. distributions in binary form include the above copyright
13561 + * notice, this list of conditions and the following disclaimer
13562 + * in the documentation and/or other associated materials;
13564 + * 3. the copyright holder's name is not used to endorse products
13565 + * built using this software without specific written permission.
13567 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13568 + * may be distributed under the terms of the GNU General Public License (GPL),
13569 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13573 + * This software is provided 'as is' with no explicit or implied warranties
13574 + * in respect of its properties, including, but not limited to, correctness
13575 + * and/or fitness for purpose.
13578 +#ifndef AUTOCONF_INCLUDED
13579 +#include <linux/config.h>
13581 +#include <linux/module.h>
13582 +#include <linux/init.h>
13583 +#include <linux/list.h>
13584 +#include <linux/slab.h>
13585 +#include <linux/sched.h>
13586 +#include <linux/wait.h>
13587 +#include <linux/crypto.h>
13588 +#include <linux/interrupt.h>
13589 +#include <asm/scatterlist.h>
13591 +#include <IxTypes.h>
13592 +#include <IxOsBuffMgt.h>
13593 +#include <IxNpeDl.h>
13594 +#include <IxCryptoAcc.h>
13595 +#include <IxQMgr.h>
13596 +#include <IxOsServices.h>
13597 +#include <IxOsCacheMMU.h>
13599 +#include <cryptodev.h>
13602 +#ifndef IX_MBUF_PRIV
13603 +#define IX_MBUF_PRIV(x) ((x)->priv)
13609 + struct list_head ixp_q_list;
13610 + struct ixp_data *ixp_q_data;
13611 + struct cryptop *ixp_q_crp;
13612 + struct cryptodesc *ixp_q_ccrd;
13613 + struct cryptodesc *ixp_q_acrd;
13614 + IX_MBUF ixp_q_mbuf;
13615 + UINT8 *ixp_hash_dest; /* Location for hash in client buffer */
13616 + UINT8 *ixp_hash_src; /* Location of hash in internal buffer */
13617 + unsigned char ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
13618 + unsigned char *ixp_q_iv;
13622 + int ixp_registered; /* is the context registered */
13623 + int ixp_crd_flags; /* detect direction changes */
13625 + int ixp_cipher_alg;
13626 + int ixp_auth_alg;
13628 + UINT32 ixp_ctx_id;
13629 + UINT32 ixp_hash_key_id; /* used when hashing */
13630 + IxCryptoAccCtx ixp_ctx;
13631 + IX_MBUF ixp_pri_mbuf;
13632 + IX_MBUF ixp_sec_mbuf;
13634 + struct work_struct ixp_pending_work;
13635 + struct work_struct ixp_registration_work;
13636 + struct list_head ixp_q; /* unprocessed requests */
13641 +#define MAX_IOP_SIZE 64 /* words */
13642 +#define MAX_OOP_SIZE 128
13644 +#define MAX_PARAMS 3
13647 + struct list_head pkq_list;
13648 + struct cryptkop *pkq_krp;
13650 + IxCryptoAccPkeEauInOperands pkq_op;
13651 + IxCryptoAccPkeEauOpResult pkq_result;
13653 + UINT32 pkq_ibuf0[MAX_IOP_SIZE];
13654 + UINT32 pkq_ibuf1[MAX_IOP_SIZE];
13655 + UINT32 pkq_ibuf2[MAX_IOP_SIZE];
13656 + UINT32 pkq_obuf[MAX_OOP_SIZE];
13659 +static LIST_HEAD(ixp_pkq); /* current PK wait list */
13660 +static struct ixp_pkq *ixp_pk_cur;
13661 +static spinlock_t ixp_pkq_lock;
13663 +#endif /* __ixp46X */
13665 +static int ixp_blocked = 0;
13667 +static int32_t ixp_id = -1;
13668 +static struct ixp_data **ixp_sessions = NULL;
13669 +static u_int32_t ixp_sesnum = 0;
13671 +static int ixp_process(device_t, struct cryptop *, int);
13672 +static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
13673 +static int ixp_freesession(device_t, u_int64_t);
13675 +static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
13678 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
13679 +static kmem_cache_t *qcache;
13681 +static struct kmem_cache *qcache;
13684 +#define debug ixp_debug
13685 +static int ixp_debug = 0;
13686 +module_param(ixp_debug, int, 0644);
13687 +MODULE_PARM_DESC(ixp_debug, "Enable debug");
13689 +static int ixp_init_crypto = 1;
13690 +module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
13691 +MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
13693 +static void ixp_process_pending(void *arg);
13694 +static void ixp_registration(void *arg);
13695 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13696 +static void ixp_process_pending_wq(struct work_struct *work);
13697 +static void ixp_registration_wq(struct work_struct *work);
13701 + * dummy device structure
13705 + softc_device_decl sc_dev;
13708 +static device_method_t ixp_methods = {
13709 + /* crypto device methods */
13710 + DEVMETHOD(cryptodev_newsession, ixp_newsession),
13711 + DEVMETHOD(cryptodev_freesession,ixp_freesession),
13712 + DEVMETHOD(cryptodev_process, ixp_process),
13714 + DEVMETHOD(cryptodev_kprocess, ixp_kprocess),
13719 + * Generate a new software session.
13722 +ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
13724 + struct ixp_data *ixp;
13726 +#define AUTH_LEN(cri, def) \
13727 + (cri->cri_mlen ? cri->cri_mlen : (def))
13729 + dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
13730 + if (sid == NULL || cri == NULL) {
13731 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
13735 + if (ixp_sessions) {
13736 + for (i = 1; i < ixp_sesnum; i++)
13737 + if (ixp_sessions[i] == NULL)
13740 + i = 1; /* NB: to silence compiler warning */
13742 + if (ixp_sessions == NULL || i == ixp_sesnum) {
13743 + struct ixp_data **ixpd;
13745 + if (ixp_sessions == NULL) {
13746 + i = 1; /* We leave ixp_sessions[0] empty */
13747 + ixp_sesnum = CRYPTO_SW_SESSIONS;
13751 + ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
13752 + if (ixpd == NULL) {
13753 + /* Reset session number */
13754 + if (ixp_sesnum == CRYPTO_SW_SESSIONS)
13758 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
13761 + memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
13763 + /* Copy existing sessions */
13764 + if (ixp_sessions) {
13765 + memcpy(ixpd, ixp_sessions,
13766 + (ixp_sesnum / 2) * sizeof(struct ixp_data *));
13767 + kfree(ixp_sessions);
13770 + ixp_sessions = ixpd;
13773 + ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
13775 + if (ixp_sessions[i] == NULL) {
13776 + ixp_freesession(NULL, i);
13777 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13783 + ixp = ixp_sessions[i];
13784 + memset(ixp, 0, sizeof(*ixp));
13786 + ixp->ixp_cipher_alg = -1;
13787 + ixp->ixp_auth_alg = -1;
13788 + ixp->ixp_ctx_id = -1;
13789 + INIT_LIST_HEAD(&ixp->ixp_q);
13791 + ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13794 + switch (cri->cri_alg) {
13795 + case CRYPTO_DES_CBC:
13796 + ixp->ixp_cipher_alg = cri->cri_alg;
13797 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
13798 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13799 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13800 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13801 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13802 + IX_CRYPTO_ACC_DES_IV_64;
13803 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13804 + cri->cri_key, (cri->cri_klen + 7) / 8);
13807 + case CRYPTO_3DES_CBC:
13808 + ixp->ixp_cipher_alg = cri->cri_alg;
13809 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13810 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13811 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13812 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13813 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13814 + IX_CRYPTO_ACC_DES_IV_64;
13815 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13816 + cri->cri_key, (cri->cri_klen + 7) / 8);
13819 + case CRYPTO_RIJNDAEL128_CBC:
13820 + ixp->ixp_cipher_alg = cri->cri_alg;
13821 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
13822 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13823 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13824 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
13825 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
13826 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13827 + cri->cri_key, (cri->cri_klen + 7) / 8);
13831 + case CRYPTO_MD5_HMAC:
13832 + ixp->ixp_auth_alg = cri->cri_alg;
13833 + ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
13834 + ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
13835 + ixp->ixp_ctx.authCtx.aadLen = 0;
13836 + /* Only MD5_HMAC needs a key */
13837 + if (cri->cri_alg == CRYPTO_MD5_HMAC) {
13838 + ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13839 + if (ixp->ixp_ctx.authCtx.authKeyLen >
13840 + sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13842 + "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
13844 + ixp_freesession(NULL, i);
13847 + memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13848 + cri->cri_key, (cri->cri_klen + 7) / 8);
13852 + case CRYPTO_SHA1:
13853 + case CRYPTO_SHA1_HMAC:
13854 + ixp->ixp_auth_alg = cri->cri_alg;
13855 + ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13856 + ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
13857 + ixp->ixp_ctx.authCtx.aadLen = 0;
13858 + /* Only SHA1_HMAC needs a key */
13859 + if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
13860 + ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13861 + if (ixp->ixp_ctx.authCtx.authKeyLen >
13862 + sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13864 + "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
13866 + ixp_freesession(NULL, i);
13869 + memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13870 + cri->cri_key, (cri->cri_klen + 7) / 8);
13875 + printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
13876 + ixp_freesession(NULL, i);
13879 + cri = cri->cri_next;
13882 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13883 + INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
13884 + INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
13886 + INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
13887 + INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
13895 + * Free a session.
13898 +ixp_freesession(device_t dev, u_int64_t tid)
13900 + u_int32_t sid = CRYPTO_SESID2LID(tid);
13902 + dprintk("%s()\n", __FUNCTION__);
13903 + if (sid > ixp_sesnum || ixp_sessions == NULL ||
13904 + ixp_sessions[sid] == NULL) {
13905 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13909 + /* Silently accept and return */
13913 + if (ixp_sessions[sid]) {
13914 + if (ixp_sessions[sid]->ixp_ctx_id != -1) {
13915 + ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
13916 + ixp_sessions[sid]->ixp_ctx_id = -1;
13919 + flush_scheduled_work();
13921 + kfree(ixp_sessions[sid]);
13923 + ixp_sessions[sid] = NULL;
13924 + if (ixp_blocked) {
13926 + crypto_unblock(ixp_id, CRYPTO_SYMQ);
13933 + * callback for when hash processing is complete
13937 +ixp_hash_perform_cb(
13938 + UINT32 hash_key_id,
13940 + IxCryptoAccStatus status)
13944 + dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
13946 + if (bufp == NULL) {
13947 + printk("ixp: NULL buf in %s\n", __FUNCTION__);
13951 + q = IX_MBUF_PRIV(bufp);
13953 + printk("ixp: NULL priv in %s\n", __FUNCTION__);
13957 + if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
13958 + /* On success, need to copy hash back into original client buffer */
13959 + memcpy(q->ixp_hash_dest, q->ixp_hash_src,
13960 + (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
13961 + SHA1_HASH_LEN : MD5_HASH_LEN);
13964 + printk("ixp: hash perform failed status=%d\n", status);
13965 + q->ixp_q_crp->crp_etype = EINVAL;
13968 + /* Free internal buffer used for hashing */
13969 + kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
13971 + crypto_done(q->ixp_q_crp);
13972 + kmem_cache_free(qcache, q);
13976 + * setup a request and perform it
13979 +ixp_q_process(struct ixp_q *q)
13981 + IxCryptoAccStatus status;
13982 + struct ixp_data *ixp = q->ixp_q_data;
13983 + int auth_off = 0;
13984 + int auth_len = 0;
13985 + int crypt_off = 0;
13986 + int crypt_len = 0;
13988 + char *crypt_func;
13990 + dprintk("%s(%p)\n", __FUNCTION__, q);
13992 + if (q->ixp_q_ccrd) {
13993 + if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
13994 + q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
13996 + q->ixp_q_iv = q->ixp_q_iv_data;
13997 + crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
13998 + q->ixp_q_ccrd->crd_inject,
13999 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
14000 + (caddr_t) q->ixp_q_iv);
14003 + if (q->ixp_q_acrd) {
14004 + auth_off = q->ixp_q_acrd->crd_skip;
14005 + auth_len = q->ixp_q_acrd->crd_len;
14006 + icv_off = q->ixp_q_acrd->crd_inject;
14009 + crypt_off = q->ixp_q_ccrd->crd_skip;
14010 + crypt_len = q->ixp_q_ccrd->crd_len;
14011 + } else { /* if (q->ixp_q_acrd) */
14012 + auth_off = q->ixp_q_acrd->crd_skip;
14013 + auth_len = q->ixp_q_acrd->crd_len;
14014 + icv_off = q->ixp_q_acrd->crd_inject;
14017 + if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
14018 + struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
14019 + if (skb_shinfo(skb)->nr_frags) {
14021 + * DAVIDM fix this limitation one day by using
14022 + * a buffer pool and chaining, it is not currently
14023 + * needed for current user/kernel space acceleration
14025 + printk("ixp: Cannot handle fragmented skb's yet !\n");
14026 + q->ixp_q_crp->crp_etype = ENOENT;
14029 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
14030 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = skb->len;
14031 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
14032 + } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
14033 + struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
14034 + if (uiop->uio_iovcnt != 1) {
14036 + * DAVIDM fix this limitation one day by using
14037 + * a buffer pool and chaining, it is not currently
14038 + * needed for current user/kernel space acceleration
14040 + printk("ixp: Cannot handle more than 1 iovec yet !\n");
14041 + q->ixp_q_crp->crp_etype = ENOENT;
14044 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
14045 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
14046 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
14047 + } else /* contig buffer */ {
14048 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
14049 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
14050 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
14053 + IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
14055 + if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
14057 + * For SHA1 and MD5 hash, need to create an internal buffer that is big
14058 + * enough to hold the original data + the appropriate padding for the
14059 + * hash algorithm.
14061 + UINT8 *tbuf = NULL;
14063 + IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
14064 + ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
14065 + tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
14067 + if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
14068 + printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
14069 + IX_MBUF_MLEN(&q->ixp_q_mbuf));
14070 + q->ixp_q_crp->crp_etype = ENOMEM;
14073 + memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
14075 + /* Set location in client buffer to copy hash into */
14076 + q->ixp_hash_dest =
14077 + &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
14079 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
14081 + /* Set location in internal buffer for where hash starts */
14082 + q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
14084 + crypt_func = "ixCryptoAccHashPerform";
14085 + status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
14086 + &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
14087 + &ixp->ixp_hash_key_id);
14090 + crypt_func = "ixCryptoAccAuthCryptPerform";
14091 + status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
14092 + NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
14096 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14099 + if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
14100 + q->ixp_q_crp->crp_etype = ENOMEM;
14104 + printk("ixp: %s failed %u\n", crypt_func, status);
14105 + q->ixp_q_crp->crp_etype = EINVAL;
14108 + crypto_done(q->ixp_q_crp);
14109 + kmem_cache_free(qcache, q);
14114 + * because we cannot process the Q from the Register callback
14115 + * we do it here on a task Q.
14119 +ixp_process_pending(void *arg)
14121 + struct ixp_data *ixp = arg;
14122 + struct ixp_q *q = NULL;
14124 + dprintk("%s(%p)\n", __FUNCTION__, arg);
14129 + while (!list_empty(&ixp->ixp_q)) {
14130 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14131 + list_del(&q->ixp_q_list);
14132 + ixp_q_process(q);
14136 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14138 +ixp_process_pending_wq(struct work_struct *work)
14140 + struct ixp_data *ixp = container_of(work, struct ixp_data,
14141 + ixp_pending_work);
14142 + ixp_process_pending(ixp);
14147 + * callback for when context registration is complete
14151 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
14154 + struct ixp_data *ixp;
14157 + dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
14160 + * free any buffer passed in to this routine
14163 + IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
14164 + kfree(IX_MBUF_MDATA(bufp));
14165 + IX_MBUF_MDATA(bufp) = NULL;
14168 + for (i = 0; i < ixp_sesnum; i++) {
14169 + ixp = ixp_sessions[i];
14170 + if (ixp && ixp->ixp_ctx_id == ctx_id)
14173 + if (i >= ixp_sesnum) {
14174 + printk("ixp: invalid context id %d\n", ctx_id);
14178 + if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
14179 + /* this is normal to free the first of two buffers */
14180 + dprintk("ixp: register not finished yet.\n");
14184 + if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
14185 + printk("ixp: register failed 0x%x\n", status);
14186 + while (!list_empty(&ixp->ixp_q)) {
14187 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14188 + list_del(&q->ixp_q_list);
14189 + q->ixp_q_crp->crp_etype = EINVAL;
14190 + crypto_done(q->ixp_q_crp);
14191 + kmem_cache_free(qcache, q);
14197 + * we are now registered, we cannot start processing the Q here
14198 + * or we get strange errors with AES (DES/3DES seem to be ok).
14200 + ixp->ixp_registered = 1;
14201 + schedule_work(&ixp->ixp_pending_work);
14206 + * callback for when data processing is complete
14214 + IxCryptoAccStatus status)
14218 + dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
14221 + if (sbufp == NULL) {
14222 + printk("ixp: NULL sbuf in ixp_perform_cb\n");
14226 + q = IX_MBUF_PRIV(sbufp);
14228 + printk("ixp: NULL priv in ixp_perform_cb\n");
14232 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14233 + printk("ixp: perform failed status=%d\n", status);
14234 + q->ixp_q_crp->crp_etype = EINVAL;
14237 + crypto_done(q->ixp_q_crp);
14238 + kmem_cache_free(qcache, q);
14243 + * registration is not callable at IRQ time, so we defer
14244 + * to a task queue, this routines completes the registration for us
14245 + * when the task queue runs
14247 + * Unfortunately this means we cannot tell OCF that the driver is blocked,
14248 + * we do that on the next request.
14252 +ixp_registration(void *arg)
14254 + struct ixp_data *ixp = arg;
14255 + struct ixp_q *q = NULL;
14256 + IX_MBUF *pri = NULL, *sec = NULL;
14257 + int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
14260 + printk("ixp: ixp_registration with no arg\n");
14264 + if (ixp->ixp_ctx_id != -1) {
14265 + ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
14266 + ixp->ixp_ctx_id = -1;
14269 + if (list_empty(&ixp->ixp_q)) {
14270 + printk("ixp: ixp_registration with no Q\n");
14275 + * setup the primary and secondary buffers
14277 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14278 + if (q->ixp_q_acrd) {
14279 + pri = &ixp->ixp_pri_mbuf;
14280 + sec = &ixp->ixp_sec_mbuf;
14281 + IX_MBUF_MLEN(pri) = IX_MBUF_PKT_LEN(pri) = 128;
14282 + IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
14283 + IX_MBUF_MLEN(sec) = IX_MBUF_PKT_LEN(sec) = 128;
14284 + IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
14287 + /* Only need to register if a crypt op or HMAC op */
14288 + if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
14289 + ixp->ixp_auth_alg == CRYPTO_MD5)) {
14290 + status = ixCryptoAccCtxRegister(
14295 + &ixp->ixp_ctx_id);
14298 + /* Otherwise we start processing pending q */
14299 + schedule_work(&ixp->ixp_pending_work);
14302 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14305 + if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
14306 + printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
14308 + /* perhaps we should return EGAIN on queued ops ? */
14312 + printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
14313 + ixp->ixp_ctx_id = -1;
14316 + * everything waiting is toasted
14318 + while (!list_empty(&ixp->ixp_q)) {
14319 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14320 + list_del(&q->ixp_q_list);
14321 + q->ixp_q_crp->crp_etype = ENOENT;
14322 + crypto_done(q->ixp_q_crp);
14323 + kmem_cache_free(qcache, q);
14327 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14329 +ixp_registration_wq(struct work_struct *work)
14331 + struct ixp_data *ixp = container_of(work, struct ixp_data,
14332 + ixp_registration_work);
14333 + ixp_registration(ixp);
14338 + * Process a request.
14341 +ixp_process(device_t dev, struct cryptop *crp, int hint)
14343 + struct ixp_data *ixp;
14344 + unsigned int lid;
14345 + struct ixp_q *q = NULL;
14348 + dprintk("%s()\n", __FUNCTION__);
14350 + /* Sanity check */
14351 + if (crp == NULL) {
14352 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
14356 + crp->crp_etype = 0;
14361 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
14362 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
14363 + crp->crp_etype = EINVAL;
14368 + * find the session we are using
14371 + lid = crp->crp_sid & 0xffffffff;
14372 + if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
14373 + ixp_sessions[lid] == NULL) {
14374 + crp->crp_etype = ENOENT;
14375 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
14378 + ixp = ixp_sessions[lid];
14381 + * setup a new request ready for queuing
14383 + q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
14385 + dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
14386 + crp->crp_etype = ENOMEM;
14390 + * save some cycles by only zeroing the important bits
14392 + memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
14393 + q->ixp_q_ccrd = NULL;
14394 + q->ixp_q_acrd = NULL;
14395 + q->ixp_q_crp = crp;
14396 + q->ixp_q_data = ixp;
14399 + * point the cipher and auth descriptors appropriately
14400 + * check that we have something to do
14402 + if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
14403 + q->ixp_q_ccrd = crp->crp_desc;
14404 + else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
14405 + q->ixp_q_acrd = crp->crp_desc;
14407 + crp->crp_etype = ENOENT;
14408 + dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
14411 + if (crp->crp_desc->crd_next) {
14412 + if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
14413 + q->ixp_q_ccrd = crp->crp_desc->crd_next;
14414 + else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
14415 + q->ixp_q_acrd = crp->crp_desc->crd_next;
14417 + crp->crp_etype = ENOENT;
14418 + dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
14424 + * If there is a direction change for this context then we mark it as
14425 + * unregistered and re-register is for the new direction. This is not
14426 + * a very expensive operation and currently only tends to happen when
14427 + * user-space application are doing benchmarks
14429 + * DM - we should be checking for pending requests before unregistering.
14431 + if (q->ixp_q_ccrd && ixp->ixp_registered &&
14432 + ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
14433 + dprintk("%s - detected direction change on session\n", __FUNCTION__);
14434 + ixp->ixp_registered = 0;
14438 + * if we are registered, call straight into the perform code
14440 + if (ixp->ixp_registered) {
14441 + ixp_q_process(q);
14446 + * the only part of the context not set in newsession is the direction
14447 + * dependent parts
14449 + if (q->ixp_q_ccrd) {
14450 + ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
14451 + if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
14452 + ixp->ixp_ctx.operation = q->ixp_q_acrd ?
14453 + IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
14455 + ixp->ixp_ctx.operation = q->ixp_q_acrd ?
14456 + IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
14459 + /* q->ixp_q_acrd must be set if we are here */
14460 + ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
14463 + status = list_empty(&ixp->ixp_q);
14464 + list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
14466 + schedule_work(&ixp->ixp_registration_work);
14471 + kmem_cache_free(qcache, q);
14472 + crypto_done(crp);
14479 + * key processing support for the ixp465
14484 + * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
14485 + * assume zeroed and only copy bits that are significant
14489 +ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
14491 + unsigned char *src = (unsigned char *) p->crp_p;
14492 + unsigned char *dst;
14493 + int len, bits = p->crp_nbits;
14495 + dprintk("%s()\n", __FUNCTION__);
14497 + if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
14498 + dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
14499 + bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
14503 + len = (bits + 31) / 32; /* the number UINT32's needed */
14505 + dst = (unsigned char *) &buf[len];
14508 + while (bits > 0) {
14513 +#if 0 /* no need to zero remaining bits as it is done during request alloc */
14514 + while (dst > (unsigned char *) buf)
14519 + op->dataLen = len;
14524 + * copy out the result, be as forgiving as we can about small output buffers
14528 +ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
14530 + unsigned char *dst = (unsigned char *) p->crp_p;
14531 + unsigned char *src = (unsigned char *) buf;
14532 + int len, z, bits = p->crp_nbits;
14534 + dprintk("%s()\n", __FUNCTION__);
14536 + len = op->dataLen * sizeof(UINT32);
14538 + /* skip leading zeroes to be small buffer friendly */
14540 + while (z < len && src[z] == '\0')
14547 + while (len > 0 && bits > 0) {
14553 + while (bits > 0) {
14559 + dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
14560 + __FUNCTION__, len, z, p->crp_nbits / 8);
14569 + * the parameter offsets for exp_mod
14572 +#define IXP_PARAM_BASE 0
14573 +#define IXP_PARAM_EXP 1
14574 +#define IXP_PARAM_MOD 2
14575 +#define IXP_PARAM_RES 3
14578 + * key processing complete callback, is also used to start processing
14579 + * by passing a NULL for pResult
14584 + IxCryptoAccPkeEauOperation operation,
14585 + IxCryptoAccPkeEauOpResult *pResult,
14586 + BOOL carryOrBorrow,
14587 + IxCryptoAccStatus status)
14589 + struct ixp_pkq *q, *tmp;
14590 + unsigned long flags;
14592 + dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
14593 + carryOrBorrow, status);
14595 + /* handle a completed request */
14597 + if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
14599 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14600 + dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
14601 + q->pkq_krp->krp_status = ERANGE; /* could do better */
14603 + /* copy out the result */
14604 + if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
14605 + &q->pkq_result, q->pkq_obuf))
14606 + q->pkq_krp->krp_status = ERANGE;
14608 + crypto_kdone(q->pkq_krp);
14610 + ixp_pk_cur = NULL;
14612 + printk("%s - callback with invalid result pointer\n", __FUNCTION__);
14615 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14616 + if (ixp_pk_cur || list_empty(&ixp_pkq)) {
14617 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14621 + list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
14623 + list_del(&q->pkq_list);
14626 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14628 + status = ixCryptoAccPkeEauPerform(
14629 + IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
14634 + if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
14635 + dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
14636 + return; /* callback will return here for callback */
14637 + } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
14638 + printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
14640 + printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
14641 + __FUNCTION__, status);
14643 + q->pkq_krp->krp_status = ERANGE; /* could do better */
14644 + crypto_kdone(q->pkq_krp);
14646 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14648 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14653 +ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
14655 + struct ixp_pkq *q;
14657 + unsigned long flags;
14659 + dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
14660 + krp->krp_param[IXP_PARAM_BASE].crp_nbits,
14661 + krp->krp_param[IXP_PARAM_EXP].crp_nbits,
14662 + krp->krp_param[IXP_PARAM_MOD].crp_nbits,
14663 + krp->krp_param[IXP_PARAM_RES].crp_nbits);
14666 + if (krp->krp_op != CRK_MOD_EXP) {
14667 + krp->krp_status = EOPNOTSUPP;
14671 + q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
14673 + krp->krp_status = ENOMEM;
14678 + * The PKE engine does not appear to zero the output buffer
14679 + * appropriately, so we need to do it all here.
14681 + memset(q, 0, sizeof(*q));
14683 + q->pkq_krp = krp;
14684 + INIT_LIST_HEAD(&q->pkq_list);
14686 + if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
14689 + if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
14690 + &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
14692 + if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
14693 + &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
14698 + krp->krp_status = ERANGE;
14702 + q->pkq_result.pData = q->pkq_obuf;
14703 + q->pkq_result.dataLen =
14704 + (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
14706 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14707 + list_add_tail(&q->pkq_list, &ixp_pkq);
14708 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14711 + ixp_kperform_cb(0, NULL, 0, 0);
14715 + crypto_kdone(krp);
14721 +#ifdef CONFIG_OCF_RANDOMHARVEST
14723 + * We run the random number generator output through SHA so that it
14724 + * is FIPS compliant.
14727 +static volatile int sha_done = 0;
14728 +static unsigned char sha_digest[20];
14731 +ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
14733 + dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
14734 + if (sha_digest != digest)
14735 + printk("digest error\n");
14736 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14739 + sha_done = -status;
14743 +ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
14745 + IxCryptoAccStatus status;
14748 + dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
14749 + memset(buf, 0, maxwords * sizeof(*buf));
14750 + status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
14751 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14752 + dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
14753 + __FUNCTION__, status);
14758 + * run the random data through SHA to make it look more random
14761 + n = sizeof(sha_digest); /* process digest bytes at a time */
14764 + for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
14765 + if ((maxwords - i) * sizeof(*buf) < n)
14766 + n = (maxwords - i) * sizeof(*buf);
14768 + status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
14769 + (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
14770 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14771 + dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
14774 + while (!sha_done)
14776 + if (sha_done < 0) {
14777 + dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
14780 + memcpy(&buf[i], sha_digest, n);
14781 + rc += n / sizeof(*buf);;
14786 +#endif /* CONFIG_OCF_RANDOMHARVEST */
14788 +#endif /* __ixp46X */
14793 + * our driver startup and shutdown routines
14799 + dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
14801 + if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
14802 + printk("ixCryptoAccInit failed, assuming already initialised!\n");
14804 + qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
14805 + SLAB_HWCACHE_ALIGN, NULL
14806 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
14811 + printk("failed to create Qcache\n");
14815 + memset(&ixpdev, 0, sizeof(ixpdev));
14816 + softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
14818 + ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
14819 + CRYPTOCAP_F_HARDWARE);
14821 + panic("IXP/OCF crypto device cannot initialize!");
14823 +#define REGISTER(alg) \
14824 + crypto_register(ixp_id,alg,0,0)
14826 + REGISTER(CRYPTO_DES_CBC);
14827 + REGISTER(CRYPTO_3DES_CBC);
14828 + REGISTER(CRYPTO_RIJNDAEL128_CBC);
14829 +#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
14830 + REGISTER(CRYPTO_MD5);
14831 + REGISTER(CRYPTO_SHA1);
14833 + REGISTER(CRYPTO_MD5_HMAC);
14834 + REGISTER(CRYPTO_SHA1_HMAC);
14838 + spin_lock_init(&ixp_pkq_lock);
14840 + * we do not enable the go fast options here as they can potentially
14841 + * allow timing based attacks
14843 + * http://www.openssl.org/news/secadv_20030219.txt
14845 + ixCryptoAccPkeEauExpConfig(0, 0);
14846 + crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
14847 +#ifdef CONFIG_OCF_RANDOMHARVEST
14848 + crypto_rregister(ixp_id, ixp_read_random, NULL);
14858 + dprintk("%s()\n", __FUNCTION__);
14859 + crypto_unregister_all(ixp_id);
14861 + kmem_cache_destroy(qcache);
14865 +module_init(ixp_init);
14866 +module_exit(ixp_exit);
14868 +MODULE_LICENSE("Dual BSD/GPL");
14869 +MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
14870 +MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
14872 +++ b/crypto/ocf/cryptodev.c
14874 +/* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */
14877 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
14878 + * Copyright (C) 2006-2007 David McCullough
14879 + * Copyright (C) 2004-2005 Intel Corporation.
14880 + * The license and original author are listed below.
14882 + * Copyright (c) 2001 Theo de Raadt
14883 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
14885 + * Redistribution and use in source and binary forms, with or without
14886 + * modification, are permitted provided that the following conditions
14889 + * 1. Redistributions of source code must retain the above copyright
14890 + * notice, this list of conditions and the following disclaimer.
14891 + * 2. Redistributions in binary form must reproduce the above copyright
14892 + * notice, this list of conditions and the following disclaimer in the
14893 + * documentation and/or other materials provided with the distribution.
14894 + * 3. The name of the author may not be used to endorse or promote products
14895 + * derived from this software without specific prior written permission.
14897 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14898 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
14899 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
14900 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
14901 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
14902 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
14903 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
14904 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14905 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
14906 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14908 + * Effort sponsored in part by the Defense Advanced Research Projects
14909 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
14910 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
14912 +__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
14915 +#ifndef AUTOCONF_INCLUDED
14916 +#include <linux/config.h>
14918 +#include <linux/types.h>
14919 +#include <linux/time.h>
14920 +#include <linux/delay.h>
14921 +#include <linux/list.h>
14922 +#include <linux/init.h>
14923 +#include <linux/sched.h>
14924 +#include <linux/unistd.h>
14925 +#include <linux/module.h>
14926 +#include <linux/wait.h>
14927 +#include <linux/slab.h>
14928 +#include <linux/fs.h>
14929 +#include <linux/dcache.h>
14930 +#include <linux/file.h>
14931 +#include <linux/mount.h>
14932 +#include <linux/miscdevice.h>
14933 +#include <linux/version.h>
14934 +#include <asm/uaccess.h>
14936 +#include <cryptodev.h>
14939 +extern asmlinkage long sys_dup(unsigned int fildes);
14941 +#define debug cryptodev_debug
14942 +int cryptodev_debug = 0;
14943 +module_param(cryptodev_debug, int, 0644);
14944 +MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
14946 +struct csession_info {
14947 + u_int16_t blocksize;
14948 + u_int16_t minkey, maxkey;
14950 + u_int16_t keysize;
14951 + /* u_int16_t hashsize; */
14952 + u_int16_t authsize;
14953 + /* u_int16_t ctxsize; */
14957 + struct list_head list;
14961 + wait_queue_head_t waitq;
14963 + u_int32_t cipher;
14969 + u_char tmp_iv[EALG_MAX_BLOCK_LEN];
14974 + struct csession_info info;
14976 + struct iovec iovec;
14982 + struct list_head csessions;
14986 +static struct csession *csefind(struct fcrypt *, u_int);
14987 +static int csedelete(struct fcrypt *, struct csession *);
14988 +static struct csession *cseadd(struct fcrypt *, struct csession *);
14989 +static struct csession *csecreate(struct fcrypt *, u_int64_t,
14990 + struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
14991 +static int csefree(struct csession *);
14993 +static int cryptodev_op(struct csession *, struct crypt_op *);
14994 +static int cryptodev_key(struct crypt_kop *);
14995 +static int cryptodev_find(struct crypt_find_op *);
14997 +static int cryptodev_cb(void *);
14998 +static int cryptodev_open(struct inode *inode, struct file *filp);
15001 + * Check a crypto identifier to see if it requested
15002 + * a valid crid and it's capabilities match.
15005 +checkcrid(int crid)
15007 + int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
15008 + int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
15011 + /* if the user hasn't selected a driver, then just call newsession */
15012 + if (hid == 0 && typ != 0)
15015 + caps = crypto_getcaps(hid);
15017 + /* didn't find anything with capabilities */
15019 + dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
15023 + /* the user didn't specify SW or HW, so the driver is ok */
15027 + /* if the type specified didn't match */
15028 + if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
15029 + dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
15038 +cryptodev_op(struct csession *cse, struct crypt_op *cop)
15040 + struct cryptop *crp = NULL;
15041 + struct cryptodesc *crde = NULL, *crda = NULL;
15044 + dprintk("%s()\n", __FUNCTION__);
15045 + if (cop->len > CRYPTO_MAX_DATA_LEN) {
15046 + dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
15050 + if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
15051 + dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
15056 + cse->uio.uio_iov = &cse->iovec;
15057 + cse->uio.uio_iovcnt = 1;
15058 + cse->uio.uio_offset = 0;
15060 + cse->uio.uio_resid = cop->len;
15061 + cse->uio.uio_segflg = UIO_SYSSPACE;
15062 + cse->uio.uio_rw = UIO_WRITE;
15063 + cse->uio.uio_td = td;
15065 + cse->uio.uio_iov[0].iov_len = cop->len;
15066 + if (cse->info.authsize)
15067 + cse->uio.uio_iov[0].iov_len += cse->info.authsize;
15068 + cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
15071 + if (cse->uio.uio_iov[0].iov_base == NULL) {
15072 + dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
15073 + cse->uio.uio_iov[0].iov_len);
15077 + crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
15078 + if (crp == NULL) {
15079 + dprintk("%s: ENOMEM\n", __FUNCTION__);
15084 + if (cse->info.authsize) {
15085 + crda = crp->crp_desc;
15086 + if (cse->info.blocksize)
15087 + crde = crda->crd_next;
15089 + if (cse->info.blocksize)
15090 + crde = crp->crp_desc;
15092 + dprintk("%s: bad request\n", __FUNCTION__);
15098 + if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
15100 + dprintk("%s: bad copy\n", __FUNCTION__);
15105 + crda->crd_skip = 0;
15106 + crda->crd_len = cop->len;
15107 + crda->crd_inject = cop->len;
15109 + crda->crd_alg = cse->mac;
15110 + crda->crd_key = cse->mackey;
15111 + crda->crd_klen = cse->mackeylen * 8;
15115 + if (cop->op == COP_ENCRYPT)
15116 + crde->crd_flags |= CRD_F_ENCRYPT;
15118 + crde->crd_flags &= ~CRD_F_ENCRYPT;
15119 + crde->crd_len = cop->len;
15120 + crde->crd_inject = 0;
15122 + crde->crd_alg = cse->cipher;
15123 + crde->crd_key = cse->key;
15124 + crde->crd_klen = cse->keylen * 8;
15127 + crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
15128 + crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
15129 + | (cop->flags & COP_F_BATCH);
15130 + crp->crp_buf = (caddr_t)&cse->uio;
15131 + crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
15132 + crp->crp_sid = cse->sid;
15133 + crp->crp_opaque = (void *)cse;
15136 + if (crde == NULL) {
15138 + dprintk("%s no crde\n", __FUNCTION__);
15141 + if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
15143 + dprintk("%s arc4 with IV\n", __FUNCTION__);
15146 + if ((error = copy_from_user(cse->tmp_iv, cop->iv,
15147 + cse->info.blocksize))) {
15148 + dprintk("%s bad iv copy\n", __FUNCTION__);
15151 + memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
15152 + crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
15153 + crde->crd_skip = 0;
15154 + } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
15155 + crde->crd_skip = 0;
15156 + } else if (crde) {
15157 + crde->crd_flags |= CRD_F_IV_PRESENT;
15158 + crde->crd_skip = cse->info.blocksize;
15159 + crde->crd_len -= cse->info.blocksize;
15162 + if (cop->mac && crda == NULL) {
15164 + dprintk("%s no crda\n", __FUNCTION__);
15169 + * Let the dispatch run unlocked, then, interlock against the
15170 + * callback before checking if the operation completed and going
15171 + * to sleep. This insures drivers don't inherit our lock which
15172 + * results in a lock order reversal between crypto_dispatch forced
15173 + * entry and the crypto_done callback into us.
15175 + error = crypto_dispatch(crp);
15176 + if (error == 0) {
15177 + dprintk("%s about to WAIT\n", __FUNCTION__);
15179 + * we really need to wait for driver to complete to maintain
15180 + * state, luckily interrupts will be remembered
15183 + error = wait_event_interruptible(crp->crp_waitq,
15184 + ((crp->crp_flags & CRYPTO_F_DONE) != 0));
15186 + * we can't break out of this loop or we will leave behind
15187 + * a huge mess, however, staying here means if your driver
15188 + * is broken user applications can hang and not be killed.
15189 + * The solution, fix your driver :-)
15195 + } while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
15196 + dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
15199 + if (crp->crp_etype != 0) {
15200 + error = crp->crp_etype;
15201 + dprintk("%s error in crp processing\n", __FUNCTION__);
15205 + if (cse->error) {
15206 + error = cse->error;
15207 + dprintk("%s error in cse processing\n", __FUNCTION__);
15211 + if (cop->dst && (error = copy_to_user(cop->dst,
15212 + cse->uio.uio_iov[0].iov_base, cop->len))) {
15213 + dprintk("%s bad dst copy\n", __FUNCTION__);
15218 + (error=copy_to_user(cop->mac,
15219 + (caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
15220 + cse->info.authsize))) {
15221 + dprintk("%s bad mac copy\n", __FUNCTION__);
15227 + crypto_freereq(crp);
15228 + if (cse->uio.uio_iov[0].iov_base)
15229 + kfree(cse->uio.uio_iov[0].iov_base);
15235 +cryptodev_cb(void *op)
15237 + struct cryptop *crp = (struct cryptop *) op;
15238 + struct csession *cse = (struct csession *)crp->crp_opaque;
15241 + dprintk("%s()\n", __FUNCTION__);
15242 + error = crp->crp_etype;
15243 + if (error == EAGAIN) {
15244 + crp->crp_flags &= ~CRYPTO_F_DONE;
15247 + * DAVIDM I am fairly sure that we should turn this into a batch
15248 + * request to stop bad karma/lockup, revisit
15250 + crp->crp_flags |= CRYPTO_F_BATCH;
15252 + return crypto_dispatch(crp);
15254 + if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
15255 + cse->error = error;
15256 + wake_up_interruptible(&crp->crp_waitq);
15262 +cryptodevkey_cb(void *op)
15264 + struct cryptkop *krp = (struct cryptkop *) op;
15265 + dprintk("%s()\n", __FUNCTION__);
15266 + wake_up_interruptible(&krp->krp_waitq);
15271 +cryptodev_key(struct crypt_kop *kop)
15273 + struct cryptkop *krp = NULL;
15274 + int error = EINVAL;
15275 + int in, out, size, i;
15277 + dprintk("%s()\n", __FUNCTION__);
15278 + if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
15279 + dprintk("%s params too big\n", __FUNCTION__);
15283 + in = kop->crk_iparams;
15284 + out = kop->crk_oparams;
15285 + switch (kop->crk_op) {
15286 + case CRK_MOD_EXP:
15287 + if (in == 3 && out == 1)
15290 + case CRK_MOD_EXP_CRT:
15291 + if (in == 6 && out == 1)
15294 + case CRK_DSA_SIGN:
15295 + if (in == 5 && out == 2)
15298 + case CRK_DSA_VERIFY:
15299 + if (in == 7 && out == 0)
15302 + case CRK_DH_COMPUTE_KEY:
15303 + if (in == 3 && out == 1)
15310 + krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
15313 + bzero(krp, sizeof *krp);
15314 + krp->krp_op = kop->crk_op;
15315 + krp->krp_status = kop->crk_status;
15316 + krp->krp_iparams = kop->crk_iparams;
15317 + krp->krp_oparams = kop->crk_oparams;
15318 + krp->krp_crid = kop->crk_crid;
15319 + krp->krp_status = 0;
15320 + krp->krp_flags = CRYPTO_KF_CBIMM;
15321 + krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
15322 + init_waitqueue_head(&krp->krp_waitq);
15324 + for (i = 0; i < CRK_MAXPARAM; i++)
15325 + krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
15326 + for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
15327 + size = (krp->krp_param[i].crp_nbits + 7) / 8;
15330 + krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
15331 + if (i >= krp->krp_iparams)
15333 + error = copy_from_user(krp->krp_param[i].crp_p,
15334 + kop->crk_param[i].crp_p, size);
15339 + error = crypto_kdispatch(krp);
15344 + error = wait_event_interruptible(krp->krp_waitq,
15345 + ((krp->krp_flags & CRYPTO_KF_DONE) != 0));
15347 + * we can't break out of this loop or we will leave behind
15348 + * a huge mess, however, staying here means if your driver
15349 + * is broken user applications can hang and not be killed.
15350 + * The solution, fix your driver :-)
15356 + } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
15358 + dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
15360 + kop->crk_crid = krp->krp_crid; /* device that did the work */
15361 + if (krp->krp_status != 0) {
15362 + error = krp->krp_status;
15366 + for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
15367 + size = (krp->krp_param[i].crp_nbits + 7) / 8;
15370 + error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
15378 + kop->crk_status = krp->krp_status;
15379 + for (i = 0; i < CRK_MAXPARAM; i++) {
15380 + if (krp->krp_param[i].crp_p)
15381 + kfree(krp->krp_param[i].crp_p);
15389 +cryptodev_find(struct crypt_find_op *find)
15393 + if (find->crid != -1) {
15394 + dev = crypto_find_device_byhid(find->crid);
15397 + strlcpy(find->name, device_get_nameunit(dev),
15398 + sizeof(find->name));
15400 + find->crid = crypto_find_driver(find->name);
15401 + if (find->crid == -1)
15407 +static struct csession *
15408 +csefind(struct fcrypt *fcr, u_int ses)
15410 + struct csession *cse;
15412 + dprintk("%s()\n", __FUNCTION__);
15413 + list_for_each_entry(cse, &fcr->csessions, list)
15414 + if (cse->ses == ses)
15420 +csedelete(struct fcrypt *fcr, struct csession *cse_del)
15422 + struct csession *cse;
15424 + dprintk("%s()\n", __FUNCTION__);
15425 + list_for_each_entry(cse, &fcr->csessions, list) {
15426 + if (cse == cse_del) {
15427 + list_del(&cse->list);
15434 +static struct csession *
15435 +cseadd(struct fcrypt *fcr, struct csession *cse)
15437 + dprintk("%s()\n", __FUNCTION__);
15438 + list_add_tail(&cse->list, &fcr->csessions);
15439 + cse->ses = fcr->sesn++;
15443 +static struct csession *
15444 +csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
15445 + struct cryptoini *cria, struct csession_info *info)
15447 + struct csession *cse;
15449 + dprintk("%s()\n", __FUNCTION__);
15450 + cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
15453 + memset(cse, 0, sizeof(struct csession));
15455 + INIT_LIST_HEAD(&cse->list);
15456 + init_waitqueue_head(&cse->waitq);
15458 + cse->key = crie->cri_key;
15459 + cse->keylen = crie->cri_klen/8;
15460 + cse->mackey = cria->cri_key;
15461 + cse->mackeylen = cria->cri_klen/8;
15463 + cse->cipher = crie->cri_alg;
15464 + cse->mac = cria->cri_alg;
15465 + cse->info = *info;
15466 + cseadd(fcr, cse);
15471 +csefree(struct csession *cse)
15475 + dprintk("%s()\n", __FUNCTION__);
15476 + error = crypto_freesession(cse->sid);
15480 + kfree(cse->mackey);
15487 + struct inode *inode,
15488 + struct file *filp,
15489 + unsigned int cmd,
15490 + unsigned long arg)
15492 + struct cryptoini cria, crie;
15493 + struct fcrypt *fcr = filp->private_data;
15494 + struct csession *cse;
15495 + struct csession_info info;
15496 + struct session2_op sop;
15497 + struct crypt_op cop;
15498 + struct crypt_kop kop;
15499 + struct crypt_find_op fop;
15502 + int feat, fd, error = 0, crid;
15505 + dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
15510 + dprintk("%s(CRIOGET)\n", __FUNCTION__);
15512 + set_fs(get_ds());
15513 + for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
15514 + if (files_fdtable(current->files)->fd[fd] == filp)
15516 + fd = sys_dup(fd);
15518 + put_user(fd, (int *) arg);
15519 + return IS_ERR_VALUE(fd) ? fd : 0;
15522 +#define CIOCGSESSSTR (cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
15523 + case CIOCGSESSION:
15524 + case CIOCGSESSION2:
15525 + dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
15526 + memset(&crie, 0, sizeof(crie));
15527 + memset(&cria, 0, sizeof(cria));
15528 + memset(&info, 0, sizeof(info));
15529 + memset(&sop, 0, sizeof(sop));
15531 + if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
15532 + sizeof(struct session_op) : sizeof(sop))) {
15533 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15538 + switch (sop.cipher) {
15540 + dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
15542 + case CRYPTO_NULL_CBC:
15543 + info.blocksize = NULL_BLOCK_LEN;
15544 + info.minkey = NULL_MIN_KEY_LEN;
15545 + info.maxkey = NULL_MAX_KEY_LEN;
15547 + case CRYPTO_DES_CBC:
15548 + info.blocksize = DES_BLOCK_LEN;
15549 + info.minkey = DES_MIN_KEY_LEN;
15550 + info.maxkey = DES_MAX_KEY_LEN;
15552 + case CRYPTO_3DES_CBC:
15553 + info.blocksize = DES3_BLOCK_LEN;
15554 + info.minkey = DES3_MIN_KEY_LEN;
15555 + info.maxkey = DES3_MAX_KEY_LEN;
15557 + case CRYPTO_BLF_CBC:
15558 + info.blocksize = BLOWFISH_BLOCK_LEN;
15559 + info.minkey = BLOWFISH_MIN_KEY_LEN;
15560 + info.maxkey = BLOWFISH_MAX_KEY_LEN;
15562 + case CRYPTO_CAST_CBC:
15563 + info.blocksize = CAST128_BLOCK_LEN;
15564 + info.minkey = CAST128_MIN_KEY_LEN;
15565 + info.maxkey = CAST128_MAX_KEY_LEN;
15567 + case CRYPTO_SKIPJACK_CBC:
15568 + info.blocksize = SKIPJACK_BLOCK_LEN;
15569 + info.minkey = SKIPJACK_MIN_KEY_LEN;
15570 + info.maxkey = SKIPJACK_MAX_KEY_LEN;
15572 + case CRYPTO_AES_CBC:
15573 + info.blocksize = AES_BLOCK_LEN;
15574 + info.minkey = AES_MIN_KEY_LEN;
15575 + info.maxkey = AES_MAX_KEY_LEN;
15577 + case CRYPTO_ARC4:
15578 + info.blocksize = ARC4_BLOCK_LEN;
15579 + info.minkey = ARC4_MIN_KEY_LEN;
15580 + info.maxkey = ARC4_MAX_KEY_LEN;
15582 + case CRYPTO_CAMELLIA_CBC:
15583 + info.blocksize = CAMELLIA_BLOCK_LEN;
15584 + info.minkey = CAMELLIA_MIN_KEY_LEN;
15585 + info.maxkey = CAMELLIA_MAX_KEY_LEN;
15588 + dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
15593 + switch (sop.mac) {
15595 + dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
15597 + case CRYPTO_NULL_HMAC:
15598 + info.authsize = NULL_HASH_LEN;
15601 + info.authsize = MD5_HASH_LEN;
15603 + case CRYPTO_SHA1:
15604 + info.authsize = SHA1_HASH_LEN;
15606 + case CRYPTO_SHA2_256:
15607 + info.authsize = SHA2_256_HASH_LEN;
15609 + case CRYPTO_SHA2_384:
15610 + info.authsize = SHA2_384_HASH_LEN;
15612 + case CRYPTO_SHA2_512:
15613 + info.authsize = SHA2_512_HASH_LEN;
15615 + case CRYPTO_RIPEMD160:
15616 + info.authsize = RIPEMD160_HASH_LEN;
15618 + case CRYPTO_MD5_HMAC:
15619 + info.authsize = MD5_HASH_LEN;
15621 + case CRYPTO_SHA1_HMAC:
15622 + info.authsize = SHA1_HASH_LEN;
15624 + case CRYPTO_SHA2_256_HMAC:
15625 + info.authsize = SHA2_256_HASH_LEN;
15627 + case CRYPTO_SHA2_384_HMAC:
15628 + info.authsize = SHA2_384_HASH_LEN;
15630 + case CRYPTO_SHA2_512_HMAC:
15631 + info.authsize = SHA2_512_HASH_LEN;
15633 + case CRYPTO_RIPEMD160_HMAC:
15634 + info.authsize = RIPEMD160_HASH_LEN;
15637 + dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
15642 + if (info.blocksize) {
15643 + crie.cri_alg = sop.cipher;
15644 + crie.cri_klen = sop.keylen * 8;
15645 + if ((info.maxkey && sop.keylen > info.maxkey) ||
15646 + sop.keylen < info.minkey) {
15647 + dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
15652 + crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
15653 + if (copy_from_user(crie.cri_key, sop.key,
15654 + crie.cri_klen/8)) {
15655 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15659 + if (info.authsize)
15660 + crie.cri_next = &cria;
15663 + if (info.authsize) {
15664 + cria.cri_alg = sop.mac;
15665 + cria.cri_klen = sop.mackeylen * 8;
15666 + if ((info.maxkey && sop.mackeylen > info.maxkey) ||
15667 + sop.keylen < info.minkey) {
15668 + dprintk("%s(%s) - mackeylen %d\n", __FUNCTION__, CIOCGSESSSTR,
15674 + if (cria.cri_klen) {
15675 + cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
15676 + if (copy_from_user(cria.cri_key, sop.mackey,
15677 + cria.cri_klen / 8)) {
15678 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15685 + /* NB: CIOGSESSION2 has the crid */
15686 + if (cmd == CIOCGSESSION2) {
15688 + error = checkcrid(crid);
15690 + dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
15691 + CIOCGSESSSTR, error);
15695 + /* allow either HW or SW to be used */
15696 + crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
15698 + error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
15700 + dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
15704 + cse = csecreate(fcr, sid, &crie, &cria, &info);
15705 + if (cse == NULL) {
15706 + crypto_freesession(sid);
15708 + dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
15711 + sop.ses = cse->ses;
15713 + if (cmd == CIOCGSESSION2) {
15714 + /* return hardware/driver id */
15715 + sop.crid = CRYPTO_SESID2HID(cse->sid);
15718 + if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
15719 + sizeof(struct session_op) : sizeof(sop))) {
15720 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15725 + dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
15726 + if (crie.cri_key)
15727 + kfree(crie.cri_key);
15728 + if (cria.cri_key)
15729 + kfree(cria.cri_key);
15732 + case CIOCFSESSION:
15733 + dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
15734 + get_user(ses, (uint32_t*)arg);
15735 + cse = csefind(fcr, ses);
15736 + if (cse == NULL) {
15738 + dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
15741 + csedelete(fcr, cse);
15742 + error = csefree(cse);
15745 + dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
15746 + if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
15747 + dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
15751 + cse = csefind(fcr, cop.ses);
15752 + if (cse == NULL) {
15754 + dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
15757 + error = cryptodev_op(cse, &cop);
15758 + if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
15759 + dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
15766 + dprintk("%s(CIOCKEY)\n", __FUNCTION__);
15767 + if (!crypto_userasymcrypto)
15768 + return (EPERM); /* XXX compat? */
15769 + if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
15770 + dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
15774 + if (cmd == CIOCKEY) {
15775 + /* NB: crypto core enforces s/w driver use */
15777 + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
15779 + error = cryptodev_key(&kop);
15780 + if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
15781 + dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
15786 + case CIOCASYMFEAT:
15787 + dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
15788 + if (!crypto_userasymcrypto) {
15790 + * NB: if user asym crypto operations are
15791 + * not permitted return "no algorithms"
15792 + * so well-behaved applications will just
15793 + * fallback to doing them in software.
15797 + error = crypto_getfeat(&feat);
15799 + error = copy_to_user((void*)arg, &feat, sizeof(feat));
15802 + case CIOCFINDDEV:
15803 + if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
15804 + dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
15808 + error = cryptodev_find(&fop);
15809 + if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
15810 + dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
15816 + dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
15823 +#ifdef HAVE_UNLOCKED_IOCTL
15825 +cryptodev_unlocked_ioctl(
15826 + struct file *filp,
15827 + unsigned int cmd,
15828 + unsigned long arg)
15830 + return cryptodev_ioctl(NULL, filp, cmd, arg);
15835 +cryptodev_open(struct inode *inode, struct file *filp)
15837 + struct fcrypt *fcr;
15839 + dprintk("%s()\n", __FUNCTION__);
15840 + if (filp->private_data) {
15841 + printk("cryptodev: Private data already exists !\n");
15845 + fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
15847 + dprintk("%s() - malloc failed\n", __FUNCTION__);
15850 + memset(fcr, 0, sizeof(*fcr));
15852 + INIT_LIST_HEAD(&fcr->csessions);
15853 + filp->private_data = fcr;
15858 +cryptodev_release(struct inode *inode, struct file *filp)
15860 + struct fcrypt *fcr = filp->private_data;
15861 + struct csession *cse, *tmp;
15863 + dprintk("%s()\n", __FUNCTION__);
15865 + printk("cryptodev: No private data on release\n");
15869 + list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
15870 + list_del(&cse->list);
15871 + (void)csefree(cse);
15873 + filp->private_data = NULL;
15878 +static struct file_operations cryptodev_fops = {
15879 + .owner = THIS_MODULE,
15880 + .open = cryptodev_open,
15881 + .release = cryptodev_release,
15882 + .ioctl = cryptodev_ioctl,
15883 +#ifdef HAVE_UNLOCKED_IOCTL
15884 + .unlocked_ioctl = cryptodev_unlocked_ioctl,
15888 +static struct miscdevice cryptodev = {
15889 + .minor = CRYPTODEV_MINOR,
15890 + .name = "crypto",
15891 + .fops = &cryptodev_fops,
15895 +cryptodev_init(void)
15899 + dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
15900 + rc = misc_register(&cryptodev);
15902 + printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
15909 +static void __exit
15910 +cryptodev_exit(void)
15912 + dprintk("%s()\n", __FUNCTION__);
15913 + misc_deregister(&cryptodev);
15916 +module_init(cryptodev_init);
15917 +module_exit(cryptodev_exit);
15919 +MODULE_LICENSE("BSD");
15920 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
15921 +MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
15923 +++ b/crypto/ocf/cryptodev.h
15925 +/* $FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $ */
15926 +/* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */
15929 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
15930 + * Copyright (C) 2006-2007 David McCullough
15931 + * Copyright (C) 2004-2005 Intel Corporation.
15932 + * The license and original author are listed below.
15934 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
15935 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
15937 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
15938 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
15939 + * supported the development of this code.
15941 + * Copyright (c) 2000 Angelos D. Keromytis
15943 + * Permission to use, copy, and modify this software with or without fee
15944 + * is hereby granted, provided that this entire notice is included in
15945 + * all source code copies of any software which is or includes a copy or
15946 + * modification of this software.
15948 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
15949 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
15950 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
15951 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
15954 + * Copyright (c) 2001 Theo de Raadt
15956 + * Redistribution and use in source and binary forms, with or without
15957 + * modification, are permitted provided that the following conditions
15960 + * 1. Redistributions of source code must retain the above copyright
15961 + * notice, this list of conditions and the following disclaimer.
15962 + * 2. Redistributions in binary form must reproduce the above copyright
15963 + * notice, this list of conditions and the following disclaimer in the
15964 + * documentation and/or other materials provided with the distribution.
15965 + * 3. The name of the author may not be used to endorse or promote products
15966 + * derived from this software without specific prior written permission.
15968 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15969 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15970 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
15971 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
15972 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
15973 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
15974 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
15975 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15976 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
15977 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15979 + * Effort sponsored in part by the Defense Advanced Research Projects
15980 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
15981 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
15985 +#ifndef _CRYPTO_CRYPTO_H_
15986 +#define _CRYPTO_CRYPTO_H_
15988 +/* Some initial values */
15989 +#define CRYPTO_DRIVERS_INITIAL 4
15990 +#define CRYPTO_SW_SESSIONS 32
15993 +#define NULL_HASH_LEN 0
15994 +#define MD5_HASH_LEN 16
15995 +#define SHA1_HASH_LEN 20
15996 +#define RIPEMD160_HASH_LEN 20
15997 +#define SHA2_256_HASH_LEN 32
15998 +#define SHA2_384_HASH_LEN 48
15999 +#define SHA2_512_HASH_LEN 64
16000 +#define MD5_KPDK_HASH_LEN 16
16001 +#define SHA1_KPDK_HASH_LEN 20
16002 +/* Maximum hash algorithm result length */
16003 +#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */
16006 +#define NULL_HMAC_BLOCK_LEN 1
16007 +#define MD5_HMAC_BLOCK_LEN 64
16008 +#define SHA1_HMAC_BLOCK_LEN 64
16009 +#define RIPEMD160_HMAC_BLOCK_LEN 64
16010 +#define SHA2_256_HMAC_BLOCK_LEN 64
16011 +#define SHA2_384_HMAC_BLOCK_LEN 128
16012 +#define SHA2_512_HMAC_BLOCK_LEN 128
16013 +/* Maximum HMAC block length */
16014 +#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
16015 +#define HMAC_IPAD_VAL 0x36
16016 +#define HMAC_OPAD_VAL 0x5C
16018 +/* Encryption algorithm block sizes */
16019 +#define NULL_BLOCK_LEN 1
16020 +#define DES_BLOCK_LEN 8
16021 +#define DES3_BLOCK_LEN 8
16022 +#define BLOWFISH_BLOCK_LEN 8
16023 +#define SKIPJACK_BLOCK_LEN 8
16024 +#define CAST128_BLOCK_LEN 8
16025 +#define RIJNDAEL128_BLOCK_LEN 16
16026 +#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN
16027 +#define CAMELLIA_BLOCK_LEN 16
16028 +#define ARC4_BLOCK_LEN 1
16029 +#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */
16031 +/* Encryption algorithm min and max key sizes */
16032 +#define NULL_MIN_KEY_LEN 0
16033 +#define NULL_MAX_KEY_LEN 0
16034 +#define DES_MIN_KEY_LEN 8
16035 +#define DES_MAX_KEY_LEN 8
16036 +#define DES3_MIN_KEY_LEN 24
16037 +#define DES3_MAX_KEY_LEN 24
16038 +#define BLOWFISH_MIN_KEY_LEN 4
16039 +#define BLOWFISH_MAX_KEY_LEN 56
16040 +#define SKIPJACK_MIN_KEY_LEN 10
16041 +#define SKIPJACK_MAX_KEY_LEN 10
16042 +#define CAST128_MIN_KEY_LEN 5
16043 +#define CAST128_MAX_KEY_LEN 16
16044 +#define RIJNDAEL128_MIN_KEY_LEN 16
16045 +#define RIJNDAEL128_MAX_KEY_LEN 32
16046 +#define AES_MIN_KEY_LEN RIJNDAEL128_MIN_KEY_LEN
16047 +#define AES_MAX_KEY_LEN RIJNDAEL128_MAX_KEY_LEN
16048 +#define CAMELLIA_MIN_KEY_LEN 16
16049 +#define CAMELLIA_MAX_KEY_LEN 32
16050 +#define ARC4_MIN_KEY_LEN 1
16051 +#define ARC4_MAX_KEY_LEN 256
16053 +/* Max size of data that can be processed */
16054 +#define CRYPTO_MAX_DATA_LEN 64*1024 - 1
16056 +#define CRYPTO_ALGORITHM_MIN 1
16057 +#define CRYPTO_DES_CBC 1
16058 +#define CRYPTO_3DES_CBC 2
16059 +#define CRYPTO_BLF_CBC 3
16060 +#define CRYPTO_CAST_CBC 4
16061 +#define CRYPTO_SKIPJACK_CBC 5
16062 +#define CRYPTO_MD5_HMAC 6
16063 +#define CRYPTO_SHA1_HMAC 7
16064 +#define CRYPTO_RIPEMD160_HMAC 8
16065 +#define CRYPTO_MD5_KPDK 9
16066 +#define CRYPTO_SHA1_KPDK 10
16067 +#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
16068 +#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
16069 +#define CRYPTO_ARC4 12
16070 +#define CRYPTO_MD5 13
16071 +#define CRYPTO_SHA1 14
16072 +#define CRYPTO_NULL_HMAC 15
16073 +#define CRYPTO_NULL_CBC 16
16074 +#define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */
16075 +#define CRYPTO_SHA2_256_HMAC 18
16076 +#define CRYPTO_SHA2_384_HMAC 19
16077 +#define CRYPTO_SHA2_512_HMAC 20
16078 +#define CRYPTO_CAMELLIA_CBC 21
16079 +#define CRYPTO_SHA2_256 22
16080 +#define CRYPTO_SHA2_384 23
16081 +#define CRYPTO_SHA2_512 24
16082 +#define CRYPTO_RIPEMD160 25
16083 +#define CRYPTO_ALGORITHM_MAX 25 /* Keep updated - see below */
16085 +/* Algorithm flags */
16086 +#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */
16087 +#define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */
16088 +#define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */
16091 + * Crypto driver/device flags. They can set in the crid
16092 + * parameter when creating a session or submitting a key
16093 + * op to affect the device/driver assigned. If neither
16094 + * of these are specified then the crid is assumed to hold
16095 + * the driver id of an existing (and suitable) device that
16096 + * must be used to satisfy the request.
16098 +#define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */
16099 +#define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */
16101 +/* NB: deprecated */
16102 +struct session_op {
16103 + u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
16104 + u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
16106 + u_int32_t keylen; /* cipher key */
16108 + int mackeylen; /* mac key */
16111 + u_int32_t ses; /* returns: session # */
16114 +struct session2_op {
16115 + u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
16116 + u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
16118 + u_int32_t keylen; /* cipher key */
16120 + int mackeylen; /* mac key */
16123 + u_int32_t ses; /* returns: session # */
16124 + int crid; /* driver id + flags (rw) */
16125 + int pad[4]; /* for future expansion */
16130 + u_int16_t op; /* i.e. COP_ENCRYPT */
16131 +#define COP_NONE 0
16132 +#define COP_ENCRYPT 1
16133 +#define COP_DECRYPT 2
16135 +#define COP_F_BATCH 0x0008 /* Batch op if possible */
16137 + caddr_t src, dst; /* become iov[] inside kernel */
16138 + caddr_t mac; /* must be big enough for chosen MAC */
16143 + * Parameters for looking up a crypto driver/device by
16144 + * device name or by id. The latter are returned for
16145 + * created sessions (crid) and completed key operations.
16147 +struct crypt_find_op {
16148 + int crid; /* driver id + flags */
16149 + char name[32]; /* device/driver name */
16152 +/* bignum parameter, in packed bytes, ... */
16158 +#define CRK_MAXPARAM 8
16160 +struct crypt_kop {
16161 + u_int crk_op; /* ie. CRK_MOD_EXP or other */
16162 + u_int crk_status; /* return status */
16163 + u_short crk_iparams; /* # of input parameters */
16164 + u_short crk_oparams; /* # of output parameters */
16165 + u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */
16166 + struct crparam crk_param[CRK_MAXPARAM];
16168 +#define CRK_ALGORITM_MIN 0
16169 +#define CRK_MOD_EXP 0
16170 +#define CRK_MOD_EXP_CRT 1
16171 +#define CRK_DSA_SIGN 2
16172 +#define CRK_DSA_VERIFY 3
16173 +#define CRK_DH_COMPUTE_KEY 4
16174 +#define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */
16176 +#define CRF_MOD_EXP (1 << CRK_MOD_EXP)
16177 +#define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT)
16178 +#define CRF_DSA_SIGN (1 << CRK_DSA_SIGN)
16179 +#define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY)
16180 +#define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY)
16183 + * done against open of /dev/crypto, to get a cloned descriptor.
16184 + * Please use F_SETFD against the cloned descriptor.
16186 +#define CRIOGET _IOWR('c', 100, u_int32_t)
16187 +#define CRIOASYMFEAT CIOCASYMFEAT
16188 +#define CRIOFINDDEV CIOCFINDDEV
16190 +/* the following are done against the cloned descriptor */
16191 +#define CIOCGSESSION _IOWR('c', 101, struct session_op)
16192 +#define CIOCFSESSION _IOW('c', 102, u_int32_t)
16193 +#define CIOCCRYPT _IOWR('c', 103, struct crypt_op)
16194 +#define CIOCKEY _IOWR('c', 104, struct crypt_kop)
16195 +#define CIOCASYMFEAT _IOR('c', 105, u_int32_t)
16196 +#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op)
16197 +#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop)
16198 +#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op)
16200 +struct cryptotstat {
16201 + struct timespec acc; /* total accumulated time */
16202 + struct timespec min; /* min time */
16203 + struct timespec max; /* max time */
16204 + u_int32_t count; /* number of observations */
16207 +struct cryptostats {
16208 + u_int32_t cs_ops; /* symmetric crypto ops submitted */
16209 + u_int32_t cs_errs; /* symmetric crypto ops that failed */
16210 + u_int32_t cs_kops; /* asymetric/key ops submitted */
16211 + u_int32_t cs_kerrs; /* asymetric/key ops that failed */
16212 + u_int32_t cs_intrs; /* crypto swi thread activations */
16213 + u_int32_t cs_rets; /* crypto return thread activations */
16214 + u_int32_t cs_blocks; /* symmetric op driver block */
16215 + u_int32_t cs_kblocks; /* symmetric op driver block */
16217 + * When CRYPTO_TIMING is defined at compile time and the
16218 + * sysctl debug.crypto is set to 1, the crypto system will
16219 + * accumulate statistics about how long it takes to process
16220 + * crypto requests at various points during processing.
16222 + struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */
16223 + struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */
16224 + struct cryptotstat cs_cb; /* crypto_done -> callback */
16225 + struct cryptotstat cs_finis; /* callback -> callback return */
16227 + u_int32_t cs_drops; /* crypto ops dropped due to congestion */
16232 +/* Standard initialization structure beginning */
16233 +struct cryptoini {
16234 + int cri_alg; /* Algorithm to use */
16235 + int cri_klen; /* Key length, in bits */
16236 + int cri_mlen; /* Number of bytes we want from the
16237 + entire hash. 0 means all. */
16238 + caddr_t cri_key; /* key to use */
16239 + u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
16240 + struct cryptoini *cri_next;
16243 +/* Describe boundaries of a single crypto operation */
16244 +struct cryptodesc {
16245 + int crd_skip; /* How many bytes to ignore from start */
16246 + int crd_len; /* How many bytes to process */
16247 + int crd_inject; /* Where to inject results, if applicable */
16250 +#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
16251 +#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
16252 + place, so don't copy. */
16253 +#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
16254 +#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
16255 +#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
16256 +#define CRD_F_COMP 0x0f /* Set when doing compression */
16258 + struct cryptoini CRD_INI; /* Initialization/context data */
16259 +#define crd_iv CRD_INI.cri_iv
16260 +#define crd_key CRD_INI.cri_key
16261 +#define crd_alg CRD_INI.cri_alg
16262 +#define crd_klen CRD_INI.cri_klen
16264 + struct cryptodesc *crd_next;
16267 +/* Structure describing complete operation */
16269 + struct list_head crp_next;
16270 + wait_queue_head_t crp_waitq;
16272 + u_int64_t crp_sid; /* Session ID */
16273 + int crp_ilen; /* Input data total length */
16274 + int crp_olen; /* Result total length */
16276 + int crp_etype; /*
16277 + * Error type (zero means no error).
16278 + * All error codes except EAGAIN
16279 + * indicate possible data corruption (as in,
16280 + * the data have been touched). On all
16281 + * errors, the crp_sid may have changed
16282 + * (reset to a new one), so the caller
16283 + * should always check and use the new
16284 + * value on future requests.
16288 +#define CRYPTO_F_SKBUF 0x0001 /* Input/output are skbuf chains */
16289 +#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
16290 +#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
16291 +#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
16292 +#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
16293 +#define CRYPTO_F_DONE 0x0020 /* Operation completed */
16294 +#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */
16296 + caddr_t crp_buf; /* Data to be processed */
16297 + caddr_t crp_opaque; /* Opaque pointer, passed along */
16298 + struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
16300 + int (*crp_callback)(struct cryptop *); /* Callback function */
16303 +#define CRYPTO_BUF_CONTIG 0x0
16304 +#define CRYPTO_BUF_IOV 0x1
16305 +#define CRYPTO_BUF_SKBUF 0x2
16307 +#define CRYPTO_OP_DECRYPT 0x0
16308 +#define CRYPTO_OP_ENCRYPT 0x1
16311 + * Hints passed to process methods.
16313 +#define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */
16316 + struct list_head krp_next;
16317 + wait_queue_head_t krp_waitq;
16320 +#define CRYPTO_KF_DONE 0x0001 /* Operation completed */
16321 +#define CRYPTO_KF_CBIMM 0x0002 /* Do callback immediately */
16323 + u_int krp_op; /* ie. CRK_MOD_EXP or other */
16324 + u_int krp_status; /* return status */
16325 + u_short krp_iparams; /* # of input parameters */
16326 + u_short krp_oparams; /* # of output parameters */
16327 + u_int krp_crid; /* desired device, etc. */
16328 + u_int32_t krp_hid;
16329 + struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
16330 + int (*krp_callback)(struct cryptkop *);
16333 +#include <ocf-compat.h>
16336 + * Session ids are 64 bits. The lower 32 bits contain a "local id" which
16337 + * is a driver-private session identifier. The upper 32 bits contain a
16338 + * "hardware id" used by the core crypto code to identify the driver and
16339 + * a copy of the driver's capabilities that can be used by client code to
16340 + * optimize operation.
16342 +#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
16343 +#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000)
16344 +#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
16346 +extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
16347 +extern int crypto_freesession(u_int64_t sid);
16348 +#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
16349 +#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
16350 +#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
16351 +extern int32_t crypto_get_driverid(device_t dev, int flags);
16352 +extern int crypto_find_driver(const char *);
16353 +extern device_t crypto_find_device_byhid(int hid);
16354 +extern int crypto_getcaps(int hid);
16355 +extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
16356 + u_int32_t flags);
16357 +extern int crypto_kregister(u_int32_t, int, u_int32_t);
16358 +extern int crypto_unregister(u_int32_t driverid, int alg);
16359 +extern int crypto_unregister_all(u_int32_t driverid);
16360 +extern int crypto_dispatch(struct cryptop *crp);
16361 +extern int crypto_kdispatch(struct cryptkop *);
16362 +#define CRYPTO_SYMQ 0x1
16363 +#define CRYPTO_ASYMQ 0x2
16364 +extern int crypto_unblock(u_int32_t, int);
16365 +extern void crypto_done(struct cryptop *crp);
16366 +extern void crypto_kdone(struct cryptkop *);
16367 +extern int crypto_getfeat(int *);
16369 +extern void crypto_freereq(struct cryptop *crp);
16370 +extern struct cryptop *crypto_getreq(int num);
16372 +extern int crypto_usercrypto; /* userland may do crypto requests */
16373 +extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
16374 +extern int crypto_devallowsoft; /* only use hardware crypto */
16377 + * random number support, crypto_unregister_all will unregister
16379 +extern int crypto_rregister(u_int32_t driverid,
16380 + int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
16381 +extern int crypto_runregister_all(u_int32_t driverid);
16384 + * Crypto-related utility routines used mainly by drivers.
16386 + * XXX these don't really belong here; but for now they're
16387 + * kept apart from the rest of the system.
16390 +extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
16391 +extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
16392 +extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
16394 +extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
16396 +extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
16398 +extern int crypto_apply(int flags, caddr_t buf, int off, int len,
16399 + int (*f)(void *, void *, u_int), void *arg);
16401 +#endif /* __KERNEL__ */
16402 +#endif /* _CRYPTO_CRYPTO_H_ */
16404 +++ b/crypto/ocf/ocfnull/ocfnull.c
16407 + * An OCF module for determining the cost of crypto versus the cost of
16408 + * IPSec processing outside of OCF. This modules gives us the effect of
16409 + * zero cost encryption, of course you will need to run it at both ends
16410 + * since it does no crypto at all.
16412 + * Written by David McCullough <david_mccullough@securecomputing.com>
16413 + * Copyright (C) 2006-2007 David McCullough
16417 + * The free distribution and use of this software in both source and binary
16418 + * form is allowed (with or without changes) provided that:
16420 + * 1. distributions of this source code include the above copyright
16421 + * notice, this list of conditions and the following disclaimer;
16423 + * 2. distributions in binary form include the above copyright
16424 + * notice, this list of conditions and the following disclaimer
16425 + * in the documentation and/or other associated materials;
16427 + * 3. the copyright holder's name is not used to endorse products
16428 + * built using this software without specific written permission.
16430 + * ALTERNATIVELY, provided that this notice is retained in full, this product
16431 + * may be distributed under the terms of the GNU General Public License (GPL),
16432 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
16436 + * This software is provided 'as is' with no explicit or implied warranties
16437 + * in respect of its properties, including, but not limited to, correctness
16438 + * and/or fitness for purpose.
16441 +#ifndef AUTOCONF_INCLUDED
16442 +#include <linux/config.h>
16444 +#include <linux/module.h>
16445 +#include <linux/init.h>
16446 +#include <linux/list.h>
16447 +#include <linux/slab.h>
16448 +#include <linux/sched.h>
16449 +#include <linux/wait.h>
16450 +#include <linux/crypto.h>
16451 +#include <linux/interrupt.h>
16453 +#include <cryptodev.h>
16456 +static int32_t null_id = -1;
16457 +static u_int32_t null_sesnum = 0;
16459 +static int null_process(device_t, struct cryptop *, int);
16460 +static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
16461 +static int null_freesession(device_t, u_int64_t);
16463 +#define debug ocfnull_debug
16464 +int ocfnull_debug = 0;
16465 +module_param(ocfnull_debug, int, 0644);
16466 +MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
16469 + * dummy device structure
16473 + softc_device_decl sc_dev;
16476 +static device_method_t null_methods = {
16477 + /* crypto device methods */
16478 + DEVMETHOD(cryptodev_newsession, null_newsession),
16479 + DEVMETHOD(cryptodev_freesession,null_freesession),
16480 + DEVMETHOD(cryptodev_process, null_process),
16484 + * Generate a new software session.
16487 +null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
16489 + dprintk("%s()\n", __FUNCTION__);
16490 + if (sid == NULL || cri == NULL) {
16491 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
16495 + if (null_sesnum == 0)
16497 + *sid = null_sesnum++;
16503 + * Free a session.
16506 +null_freesession(device_t arg, u_int64_t tid)
16508 + u_int32_t sid = CRYPTO_SESID2LID(tid);
16510 + dprintk("%s()\n", __FUNCTION__);
16511 + if (sid > null_sesnum) {
16512 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16516 + /* Silently accept and return */
16524 + * Process a request.
16527 +null_process(device_t arg, struct cryptop *crp, int hint)
16529 + unsigned int lid;
16531 + dprintk("%s()\n", __FUNCTION__);
16533 + /* Sanity check */
16534 + if (crp == NULL) {
16535 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16539 + crp->crp_etype = 0;
16541 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
16542 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16543 + crp->crp_etype = EINVAL;
16548 + * find the session we are using
16551 + lid = crp->crp_sid & 0xffffffff;
16552 + if (lid >= null_sesnum || lid == 0) {
16553 + crp->crp_etype = ENOENT;
16554 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
16559 + crypto_done(crp);
16565 + * our driver startup and shutdown routines
16571 + dprintk("%s(%p)\n", __FUNCTION__, null_init);
16573 + memset(&nulldev, 0, sizeof(nulldev));
16574 + softc_device_init(&nulldev, "ocfnull", 0, null_methods);
16576 + null_id = crypto_get_driverid(softc_get_device(&nulldev),
16577 + CRYPTOCAP_F_HARDWARE);
16579 + panic("ocfnull: crypto device cannot initialize!");
16581 +#define REGISTER(alg) \
16582 + crypto_register(null_id,alg,0,0)
16583 + REGISTER(CRYPTO_DES_CBC);
16584 + REGISTER(CRYPTO_3DES_CBC);
16585 + REGISTER(CRYPTO_RIJNDAEL128_CBC);
16586 + REGISTER(CRYPTO_MD5);
16587 + REGISTER(CRYPTO_SHA1);
16588 + REGISTER(CRYPTO_MD5_HMAC);
16589 + REGISTER(CRYPTO_SHA1_HMAC);
16598 + dprintk("%s()\n", __FUNCTION__);
16599 + crypto_unregister_all(null_id);
16603 +module_init(null_init);
16604 +module_exit(null_exit);
16606 +MODULE_LICENSE("Dual BSD/GPL");
16607 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
16608 +MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
16610 +++ b/crypto/ocf/cryptosoft.c
16613 + * An OCF module that uses the linux kernel cryptoapi, based on the
16614 + * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
16615 + * but is mostly unrecognisable,
16617 + * Written by David McCullough <david_mccullough@securecomputing.com>
16618 + * Copyright (C) 2004-2007 David McCullough
16619 + * Copyright (C) 2004-2005 Intel Corporation.
16623 + * The free distribution and use of this software in both source and binary
16624 + * form is allowed (with or without changes) provided that:
16626 + * 1. distributions of this source code include the above copyright
16627 + * notice, this list of conditions and the following disclaimer;
16629 + * 2. distributions in binary form include the above copyright
16630 + * notice, this list of conditions and the following disclaimer
16631 + * in the documentation and/or other associated materials;
16633 + * 3. the copyright holder's name is not used to endorse products
16634 + * built using this software without specific written permission.
16636 + * ALTERNATIVELY, provided that this notice is retained in full, this product
16637 + * may be distributed under the terms of the GNU General Public License (GPL),
16638 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
16642 + * This software is provided 'as is' with no explicit or implied warranties
16643 + * in respect of its properties, including, but not limited to, correctness
16644 + * and/or fitness for purpose.
16645 + * ---------------------------------------------------------------------------
16648 +#ifndef AUTOCONF_INCLUDED
16649 +#include <linux/config.h>
16651 +#include <linux/module.h>
16652 +#include <linux/init.h>
16653 +#include <linux/list.h>
16654 +#include <linux/slab.h>
16655 +#include <linux/sched.h>
16656 +#include <linux/wait.h>
16657 +#include <linux/crypto.h>
16658 +#include <linux/mm.h>
16659 +#include <linux/skbuff.h>
16660 +#include <linux/random.h>
16661 +#include <asm/scatterlist.h>
16663 +#include <cryptodev.h>
16667 + softc_device_decl sc_dev;
16670 +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
16672 +/* Software session entry */
16674 +#define SW_TYPE_CIPHER 0
16675 +#define SW_TYPE_HMAC 1
16676 +#define SW_TYPE_AUTH2 2
16677 +#define SW_TYPE_HASH 3
16678 +#define SW_TYPE_COMP 4
16679 +#define SW_TYPE_BLKCIPHER 5
16681 +struct swcr_data {
16684 + struct crypto_tfm *sw_tfm;
16691 + void *sw_comp_buf;
16693 + struct swcr_data *sw_next;
16696 +#ifndef CRYPTO_TFM_MODE_CBC
16698 + * As of linux-2.6.21 this is no longer defined, and presumably no longer
16699 + * needed to be passed into the crypto core code.
16701 +#define CRYPTO_TFM_MODE_CBC 0
16702 +#define CRYPTO_TFM_MODE_ECB 0
16705 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
16707 + * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
16708 + * API into old API.
16711 + /* Symmetric/Block Cipher */
16712 + struct blkcipher_desc
16714 + struct crypto_tfm *tfm;
16717 + #define ecb(X) #X
16718 + #define cbc(X) #X
16719 + #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
16720 + #define crypto_blkcipher_cast(X) X
16721 + #define crypto_blkcipher_tfm(X) X
16722 + #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
16723 + #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
16724 + #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
16725 + #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
16726 + #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
16727 + crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
16728 + #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
16729 + crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
16731 + /* Hash/HMAC/Digest */
16734 + struct crypto_tfm *tfm;
16736 + #define hmac(X) #X
16737 + #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
16738 + #define crypto_hash_cast(X) X
16739 + #define crypto_hash_tfm(X) X
16740 + #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
16741 + #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
16742 + #define crypto_hash_digest(W, X, Y, Z) \
16743 + crypto_digest_digest((W)->tfm, X, sg_num, Z)
16745 + /* Asymmetric Cipher */
16746 + #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
16748 + /* Compression */
16749 + #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
16750 + #define crypto_comp_tfm(X) X
16751 + #define crypto_comp_cast(X) X
16752 + #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
16754 + #define ecb(X) "ecb(" #X ")"
16755 + #define cbc(X) "cbc(" #X ")"
16756 + #define hmac(X) "hmac(" #X ")"
16757 +#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
16759 +struct crypto_details
16767 + * This needs to be kept updated with CRYPTO_xxx list (cryptodev.h).
16768 + * If the Algorithm is not supported, then insert a {NULL, 0, 0} entry.
16770 + * IMPORTANT: The index to the array IS CRYPTO_xxx.
16772 +static struct crypto_details crypto_details[CRYPTO_ALGORITHM_MAX + 1] = {
16774 + /* CRYPTO_xxx index starts at 1 */
16775 + { cbc(des), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16776 + { cbc(des3_ede), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16777 + { cbc(blowfish), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16778 + { cbc(cast5), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16779 + { cbc(skipjack), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16780 + { hmac(md5), 0, SW_TYPE_HMAC },
16781 + { hmac(sha1), 0, SW_TYPE_HMAC },
16782 + { hmac(ripemd160), 0, SW_TYPE_HMAC },
16783 + { "md5-kpdk??", 0, SW_TYPE_HASH },
16784 + { "sha1-kpdk??", 0, SW_TYPE_HASH },
16785 + { cbc(aes), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16786 + { ecb(arc4), CRYPTO_TFM_MODE_ECB, SW_TYPE_BLKCIPHER },
16787 + { "md5", 0, SW_TYPE_HASH },
16788 + { "sha1", 0, SW_TYPE_HASH },
16789 + { hmac(digest_null), 0, SW_TYPE_HMAC },
16790 + { cbc(cipher_null), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16791 + { "deflate", 0, SW_TYPE_COMP },
16792 + { hmac(sha256), 0, SW_TYPE_HMAC },
16793 + { hmac(sha384), 0, SW_TYPE_HMAC },
16794 + { hmac(sha512), 0, SW_TYPE_HMAC },
16795 + { cbc(camellia), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16796 + { "sha256", 0, SW_TYPE_HASH },
16797 + { "sha384", 0, SW_TYPE_HASH },
16798 + { "sha512", 0, SW_TYPE_HASH },
16799 + { "ripemd160", 0, SW_TYPE_HASH },
16802 +int32_t swcr_id = -1;
16803 +module_param(swcr_id, int, 0444);
16804 +MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
16806 +int swcr_fail_if_compression_grows = 1;
16807 +module_param(swcr_fail_if_compression_grows, int, 0644);
16808 +MODULE_PARM_DESC(swcr_fail_if_compression_grows,
16809 + "Treat compression that results in more data as a failure");
16811 +static struct swcr_data **swcr_sessions = NULL;
16812 +static u_int32_t swcr_sesnum = 0;
16814 +static int swcr_process(device_t, struct cryptop *, int);
16815 +static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
16816 +static int swcr_freesession(device_t, u_int64_t);
16818 +static device_method_t swcr_methods = {
16819 + /* crypto device methods */
16820 + DEVMETHOD(cryptodev_newsession, swcr_newsession),
16821 + DEVMETHOD(cryptodev_freesession,swcr_freesession),
16822 + DEVMETHOD(cryptodev_process, swcr_process),
16825 +#define debug swcr_debug
16826 +int swcr_debug = 0;
16827 +module_param(swcr_debug, int, 0644);
16828 +MODULE_PARM_DESC(swcr_debug, "Enable debug");
16831 + * Generate a new software session.
16834 +swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
16836 + struct swcr_data **swd;
16840 + int mode, sw_type;
16842 + dprintk("%s()\n", __FUNCTION__);
16843 + if (sid == NULL || cri == NULL) {
16844 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
16848 + if (swcr_sessions) {
16849 + for (i = 1; i < swcr_sesnum; i++)
16850 + if (swcr_sessions[i] == NULL)
16853 + i = 1; /* NB: to silence compiler warning */
16855 + if (swcr_sessions == NULL || i == swcr_sesnum) {
16856 + if (swcr_sessions == NULL) {
16857 + i = 1; /* We leave swcr_sessions[0] empty */
16858 + swcr_sesnum = CRYPTO_SW_SESSIONS;
16860 + swcr_sesnum *= 2;
16862 + swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
16863 + if (swd == NULL) {
16864 + /* Reset session number */
16865 + if (swcr_sesnum == CRYPTO_SW_SESSIONS)
16868 + swcr_sesnum /= 2;
16869 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16872 + memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
16874 + /* Copy existing sessions */
16875 + if (swcr_sessions) {
16876 + memcpy(swd, swcr_sessions,
16877 + (swcr_sesnum / 2) * sizeof(struct swcr_data *));
16878 + kfree(swcr_sessions);
16881 + swcr_sessions = swd;
16884 + swd = &swcr_sessions[i];
16888 + *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
16890 + if (*swd == NULL) {
16891 + swcr_freesession(NULL, i);
16892 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16895 + memset(*swd, 0, sizeof(struct swcr_data));
16897 + if (cri->cri_alg > CRYPTO_ALGORITHM_MAX) {
16898 + printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
16899 + swcr_freesession(NULL, i);
16903 + algo = crypto_details[cri->cri_alg].alg_name;
16904 + if (!algo || !*algo) {
16905 + printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
16906 + swcr_freesession(NULL, i);
16910 + mode = crypto_details[cri->cri_alg].mode;
16911 + sw_type = crypto_details[cri->cri_alg].sw_type;
16913 + /* Algorithm specific configuration */
16914 + switch (cri->cri_alg) {
16915 + case CRYPTO_NULL_CBC:
16916 + cri->cri_klen = 0; /* make it work with crypto API */
16922 + if (sw_type == SW_TYPE_BLKCIPHER) {
16923 + dprintk("%s crypto_alloc_blkcipher(%s, 0x%x)\n", __FUNCTION__,
16926 + (*swd)->sw_tfm = crypto_blkcipher_tfm(
16927 + crypto_alloc_blkcipher(algo, 0,
16928 + CRYPTO_ALG_ASYNC));
16929 + if (!(*swd)->sw_tfm) {
16930 + dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s,0x%x)\n",
16932 + swcr_freesession(NULL, i);
16937 + dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
16938 + __FUNCTION__,cri->cri_klen,(cri->cri_klen + 7)/8);
16939 + for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
16941 + dprintk("%s0x%x", (i % 8) ? " " : "\n ",cri->cri_key[i]);
16945 + error = crypto_blkcipher_setkey(
16946 + crypto_blkcipher_cast((*swd)->sw_tfm), cri->cri_key,
16947 + (cri->cri_klen + 7) / 8);
16949 + printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
16950 + (*swd)->sw_tfm->crt_flags);
16951 + swcr_freesession(NULL, i);
16954 + } else if (sw_type == SW_TYPE_HMAC || sw_type == SW_TYPE_HASH) {
16955 + dprintk("%s crypto_alloc_hash(%s, 0x%x)\n", __FUNCTION__,
16958 + (*swd)->sw_tfm = crypto_hash_tfm(
16959 + crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
16961 + if (!(*swd)->sw_tfm) {
16962 + dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
16964 + swcr_freesession(NULL, i);
16968 + (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
16969 + (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
16971 + if ((*swd)->u.hmac.sw_key == NULL) {
16972 + swcr_freesession(NULL, i);
16973 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16976 + memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
16977 + if (cri->cri_mlen) {
16978 + (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
16980 + (*swd)->u.hmac.sw_mlen =
16981 + crypto_hash_digestsize(
16982 + crypto_hash_cast((*swd)->sw_tfm));
16984 + } else if (sw_type == SW_TYPE_COMP) {
16985 + (*swd)->sw_tfm = crypto_comp_tfm(
16986 + crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
16987 + if (!(*swd)->sw_tfm) {
16988 + dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
16990 + swcr_freesession(NULL, i);
16993 + (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
16994 + if ((*swd)->u.sw_comp_buf == NULL) {
16995 + swcr_freesession(NULL, i);
16996 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
17000 + printk("cryptosoft: Unhandled sw_type %d\n", sw_type);
17001 + swcr_freesession(NULL, i);
17005 + (*swd)->sw_alg = cri->cri_alg;
17006 + (*swd)->sw_type = sw_type;
17008 + cri = cri->cri_next;
17009 + swd = &((*swd)->sw_next);
17015 + * Free a session.
17018 +swcr_freesession(device_t dev, u_int64_t tid)
17020 + struct swcr_data *swd;
17021 + u_int32_t sid = CRYPTO_SESID2LID(tid);
17023 + dprintk("%s()\n", __FUNCTION__);
17024 + if (sid > swcr_sesnum || swcr_sessions == NULL ||
17025 + swcr_sessions[sid] == NULL) {
17026 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17030 + /* Silently accept and return */
17034 + while ((swd = swcr_sessions[sid]) != NULL) {
17035 + swcr_sessions[sid] = swd->sw_next;
17037 + crypto_free_tfm(swd->sw_tfm);
17038 + if (swd->sw_type == SW_TYPE_COMP) {
17039 + if (swd->u.sw_comp_buf)
17040 + kfree(swd->u.sw_comp_buf);
17042 + if (swd->u.hmac.sw_key)
17043 + kfree(swd->u.hmac.sw_key);
17051 + * Process a software request.
17054 +swcr_process(device_t dev, struct cryptop *crp, int hint)
17056 + struct cryptodesc *crd;
17057 + struct swcr_data *sw;
17059 +#define SCATTERLIST_MAX 16
17060 + struct scatterlist sg[SCATTERLIST_MAX];
17061 + int sg_num, sg_len, skip;
17062 + struct sk_buff *skb = NULL;
17063 + struct uio *uiop = NULL;
17065 + dprintk("%s()\n", __FUNCTION__);
17066 + /* Sanity check */
17067 + if (crp == NULL) {
17068 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17072 + crp->crp_etype = 0;
17074 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
17075 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17076 + crp->crp_etype = EINVAL;
17080 + lid = crp->crp_sid & 0xffffffff;
17081 + if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
17082 + swcr_sessions[lid] == NULL) {
17083 + crp->crp_etype = ENOENT;
17084 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
17089 + * do some error checking outside of the loop for SKB and IOV processing
17090 + * this leaves us with valid skb or uiop pointers for later
17092 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
17093 + skb = (struct sk_buff *) crp->crp_buf;
17094 + if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
17095 + printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
17096 + skb_shinfo(skb)->nr_frags);
17099 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
17100 + uiop = (struct uio *) crp->crp_buf;
17101 + if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
17102 + printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
17103 + uiop->uio_iovcnt);
17108 + /* Go through crypto descriptors, processing as we go */
17109 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
17111 + * Find the crypto context.
17113 + * XXX Note that the logic here prevents us from having
17114 + * XXX the same algorithm multiple times in a session
17115 + * XXX (or rather, we can but it won't give us the right
17116 + * XXX results). To do that, we'd need some way of differentiating
17117 + * XXX between the various instances of an algorithm (so we can
17118 + * XXX locate the correct crypto context).
17120 + for (sw = swcr_sessions[lid]; sw && sw->sw_alg != crd->crd_alg;
17121 + sw = sw->sw_next)
17124 + /* No such context ? */
17125 + if (sw == NULL) {
17126 + crp->crp_etype = EINVAL;
17127 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17131 + skip = crd->crd_skip;
17134 + * setup the SG list skip from the start of the buffer
17136 + memset(sg, 0, sizeof(sg));
17137 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
17143 + if (skip < skb_headlen(skb)) {
17144 + len = skb_headlen(skb) - skip;
17145 + if (len + sg_len > crd->crd_len)
17146 + len = crd->crd_len - sg_len;
17147 + sg_set_page(&sg[sg_num],
17148 + virt_to_page(skb->data + skip), len,
17149 + offset_in_page(skb->data + skip));
17154 + skip -= skb_headlen(skb);
17156 + for (i = 0; sg_len < crd->crd_len &&
17157 + i < skb_shinfo(skb)->nr_frags &&
17158 + sg_num < SCATTERLIST_MAX; i++) {
17159 + if (skip < skb_shinfo(skb)->frags[i].size) {
17160 + len = skb_shinfo(skb)->frags[i].size - skip;
17161 + if (len + sg_len > crd->crd_len)
17162 + len = crd->crd_len - sg_len;
17163 + sg_set_page(&sg[sg_num],
17164 + skb_shinfo(skb)->frags[i].page,
17166 + skb_shinfo(skb)->frags[i].page_offset + skip);
17171 + skip -= skb_shinfo(skb)->frags[i].size;
17173 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
17177 + for (sg_num = 0; sg_len <= crd->crd_len &&
17178 + sg_num < uiop->uio_iovcnt &&
17179 + sg_num < SCATTERLIST_MAX; sg_num++) {
17180 + if (skip <= uiop->uio_iov[sg_num].iov_len) {
17181 + len = uiop->uio_iov[sg_num].iov_len - skip;
17182 + if (len + sg_len > crd->crd_len)
17183 + len = crd->crd_len - sg_len;
17184 + sg_set_page(&sg[sg_num],
17185 + virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
17187 + offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
17191 + skip -= uiop->uio_iov[sg_num].iov_len;
17194 + sg_len = (crp->crp_ilen - skip);
17195 + if (sg_len > crd->crd_len)
17196 + sg_len = crd->crd_len;
17197 + sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip),
17198 + sg_len, offset_in_page(crp->crp_buf + skip));
17203 + switch (sw->sw_type) {
17204 + case SW_TYPE_BLKCIPHER: {
17205 + unsigned char iv[EALG_MAX_BLOCK_LEN];
17206 + unsigned char *ivp = iv;
17208 + crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
17209 + struct blkcipher_desc desc;
17211 + if (sg_len < crypto_blkcipher_blocksize(
17212 + crypto_blkcipher_cast(sw->sw_tfm))) {
17213 + crp->crp_etype = EINVAL;
17214 + dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
17215 + sg_len, crypto_blkcipher_blocksize(
17216 + crypto_blkcipher_cast(sw->sw_tfm)));
17220 + if (ivsize > sizeof(iv)) {
17221 + crp->crp_etype = EINVAL;
17222 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17226 + if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
17230 + dprintk("%s key:", __FUNCTION__);
17231 + for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
17232 + dprintk("%s0x%x", (i % 8) ? " " : "\n ",
17233 + crd->crd_key[i]);
17236 + error = crypto_blkcipher_setkey(
17237 + crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
17238 + (crd->crd_klen + 7) / 8);
17240 + dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
17241 + error, sw->sw_tfm->crt_flags);
17242 + crp->crp_etype = -error;
17246 + memset(&desc, 0, sizeof(desc));
17247 + desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
17249 + if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
17251 + if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
17252 + ivp = crd->crd_iv;
17254 + get_random_bytes(ivp, ivsize);
17257 + * do we have to copy the IV back to the buffer ?
17259 + if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
17260 + crypto_copyback(crp->crp_flags, crp->crp_buf,
17261 + crd->crd_inject, ivsize, (caddr_t)ivp);
17264 + crypto_blkcipher_encrypt_iv(&desc, sg, sg, sg_len);
17266 + } else { /*decrypt */
17268 + if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
17269 + ivp = crd->crd_iv;
17271 + crypto_copydata(crp->crp_flags, crp->crp_buf,
17272 + crd->crd_inject, ivsize, (caddr_t)ivp);
17275 + crypto_blkcipher_decrypt_iv(&desc, sg, sg, sg_len);
17278 + case SW_TYPE_HMAC:
17279 + case SW_TYPE_HASH:
17281 + char result[HASH_MAX_LEN];
17282 + struct hash_desc desc;
17284 + /* check we have room for the result */
17285 + if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
17287 + "cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d digestsize=%d\n",
17288 + crp->crp_ilen, crd->crd_skip + sg_len, crd->crd_inject,
17289 + sw->u.hmac.sw_mlen);
17290 + crp->crp_etype = EINVAL;
17294 + memset(&desc, 0, sizeof(desc));
17295 + desc.tfm = crypto_hash_cast(sw->sw_tfm);
17297 + memset(result, 0, sizeof(result));
17299 + if (sw->sw_type == SW_TYPE_HMAC) {
17300 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
17301 + crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
17302 + sg, sg_num, result);
17304 + crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
17305 + sw->u.hmac.sw_klen);
17306 + crypto_hash_digest(&desc, sg, sg_len, result);
17307 +#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
17309 + } else { /* SW_TYPE_HASH */
17310 + crypto_hash_digest(&desc, sg, sg_len, result);
17313 + crypto_copyback(crp->crp_flags, crp->crp_buf,
17314 + crd->crd_inject, sw->u.hmac.sw_mlen, result);
17318 + case SW_TYPE_COMP: {
17319 + void *ibuf = NULL;
17320 + void *obuf = sw->u.sw_comp_buf;
17321 + int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
17325 + * we need to use an additional copy if there is more than one
17326 + * input chunk since the kernel comp routines do not handle
17327 + * SG yet. Otherwise we just use the input buffer as is.
17328 + * Rather than allocate another buffer we just split the tmp
17329 + * buffer we already have.
17330 + * Perhaps we should just use zlib directly ?
17332 + if (sg_num > 1) {
17336 + for (blk = 0; blk < sg_num; blk++) {
17337 + memcpy(obuf, sg_virt(&sg[blk]),
17339 + obuf += sg[blk].length;
17343 + ibuf = sg_virt(&sg[0]);
17345 + if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
17346 + ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
17347 + ibuf, ilen, obuf, &olen);
17348 + if (!ret && olen > crd->crd_len) {
17349 + dprintk("cryptosoft: ERANGE compress %d into %d\n",
17350 + crd->crd_len, olen);
17351 + if (swcr_fail_if_compression_grows)
17354 + } else { /* decompress */
17355 + ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
17356 + ibuf, ilen, obuf, &olen);
17357 + if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
17358 + dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
17359 + "space for %d,at offset %d\n",
17360 + crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
17365 + dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
17368 + * on success copy result back,
17369 + * linux crpyto API returns -errno, we need to fix that
17371 + crp->crp_etype = ret < 0 ? -ret : ret;
17373 + /* copy back the result and return it's size */
17374 + crypto_copyback(crp->crp_flags, crp->crp_buf,
17375 + crd->crd_inject, olen, obuf);
17376 + crp->crp_olen = olen;
17383 + /* Unknown/unsupported algorithm */
17384 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17385 + crp->crp_etype = EINVAL;
17391 + crypto_done(crp);
17396 +cryptosoft_init(void)
17398 + int i, sw_type, mode;
17401 + dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
17403 + softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
17405 + swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
17406 + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
17407 + if (swcr_id < 0) {
17408 + printk("Software crypto device cannot initialize!");
17412 +#define REGISTER(alg) \
17413 + crypto_register(swcr_id, alg, 0,0);
17415 + for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
17418 + algo = crypto_details[i].alg_name;
17419 + if (!algo || !*algo)
17421 + dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
17425 + mode = crypto_details[i].mode;
17426 + sw_type = crypto_details[i].sw_type;
17430 + case SW_TYPE_CIPHER:
17431 + if (crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC))
17437 + dprintk("%s:CIPHER algorithm %d:'%s' not supported\n",
17438 + __FUNCTION__, i, algo);
17441 + case SW_TYPE_HMAC:
17442 + if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
17448 + dprintk("%s:HMAC algorithm %d:'%s' not supported\n",
17449 + __FUNCTION__, i, algo);
17452 + case SW_TYPE_HASH:
17453 + if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
17459 + dprintk("%s:HASH algorithm %d:'%s' not supported\n",
17460 + __FUNCTION__, i, algo);
17463 + case SW_TYPE_COMP:
17464 + if (crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC))
17470 + dprintk("%s:COMP algorithm %d:'%s' not supported\n",
17471 + __FUNCTION__, i, algo);
17474 + case SW_TYPE_BLKCIPHER:
17475 + if (crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC))
17481 + dprintk("%s:BLKCIPHER algorithm %d:'%s' not supported\n",
17482 + __FUNCTION__, i, algo);
17487 + "%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
17488 + __FUNCTION__, sw_type, i, algo);
17497 +cryptosoft_exit(void)
17499 + dprintk("%s()\n", __FUNCTION__);
17500 + crypto_unregister_all(swcr_id);
17504 +module_init(cryptosoft_init);
17505 +module_exit(cryptosoft_exit);
17507 +MODULE_LICENSE("Dual BSD/GPL");
17508 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
17509 +MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
17511 +++ b/crypto/ocf/rndtest.c
17516 + * OCF/Linux port done by David McCullough <david_mccullough@securecomputing.com>
17517 + * Copyright (C) 2006-2007 David McCullough
17518 + * Copyright (C) 2004-2005 Intel Corporation.
17519 + * The license and original author are listed below.
17521 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17522 + * All rights reserved.
17524 + * Redistribution and use in source and binary forms, with or without
17525 + * modification, are permitted provided that the following conditions
17527 + * 1. Redistributions of source code must retain the above copyright
17528 + * notice, this list of conditions and the following disclaimer.
17529 + * 2. Redistributions in binary form must reproduce the above copyright
17530 + * notice, this list of conditions and the following disclaimer in the
17531 + * documentation and/or other materials provided with the distribution.
17532 + * 3. All advertising materials mentioning features or use of this software
17533 + * must display the following acknowledgement:
17534 + * This product includes software developed by Jason L. Wright
17535 + * 4. The name of the author may not be used to endorse or promote products
17536 + * derived from this software without specific prior written permission.
17538 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17539 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17540 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17541 + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17542 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17543 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17544 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17545 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17546 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17547 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17548 + * POSSIBILITY OF SUCH DAMAGE.
17551 +#ifndef AUTOCONF_INCLUDED
17552 +#include <linux/config.h>
17554 +#include <linux/module.h>
17555 +#include <linux/list.h>
17556 +#include <linux/wait.h>
17557 +#include <linux/time.h>
17558 +#include <linux/version.h>
17559 +#include <linux/unistd.h>
17560 +#include <linux/kernel.h>
17561 +#include <linux/string.h>
17562 +#include <linux/time.h>
17563 +#include <cryptodev.h>
17564 +#include "rndtest.h"
17566 +static struct rndtest_stats rndstats;
17568 +static void rndtest_test(struct rndtest_state *);
17570 +/* The tests themselves */
17571 +static int rndtest_monobit(struct rndtest_state *);
17572 +static int rndtest_runs(struct rndtest_state *);
17573 +static int rndtest_longruns(struct rndtest_state *);
17574 +static int rndtest_chi_4(struct rndtest_state *);
17576 +static int rndtest_runs_check(struct rndtest_state *, int, int *);
17577 +static void rndtest_runs_record(struct rndtest_state *, int, int *);
17579 +static const struct rndtest_testfunc {
17580 + int (*test)(struct rndtest_state *);
17581 +} rndtest_funcs[] = {
17582 + { rndtest_monobit },
17583 + { rndtest_runs },
17584 + { rndtest_chi_4 },
17585 + { rndtest_longruns },
17588 +#define RNDTEST_NTESTS (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
17591 +rndtest_test(struct rndtest_state *rsp)
17595 + rndstats.rst_tests++;
17596 + for (i = 0; i < RNDTEST_NTESTS; i++)
17597 + rv |= (*rndtest_funcs[i].test)(rsp);
17598 + rsp->rs_discard = (rv != 0);
17602 +extern int crypto_debug;
17603 +#define rndtest_verbose 2
17604 +#define rndtest_report(rsp, failure, fmt, a...) \
17605 + { if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
17607 +#define RNDTEST_MONOBIT_MINONES 9725
17608 +#define RNDTEST_MONOBIT_MAXONES 10275
17611 +rndtest_monobit(struct rndtest_state *rsp)
17613 + int i, ones = 0, j;
17616 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17617 + r = rsp->rs_buf[i];
17618 + for (j = 0; j < 8; j++, r <<= 1)
17622 + if (ones > RNDTEST_MONOBIT_MINONES &&
17623 + ones < RNDTEST_MONOBIT_MAXONES) {
17624 + if (rndtest_verbose > 1)
17625 + rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
17626 + RNDTEST_MONOBIT_MINONES, ones,
17627 + RNDTEST_MONOBIT_MAXONES);
17630 + if (rndtest_verbose)
17631 + rndtest_report(rsp, 1,
17632 + "monobit failed (%d ones)", ones);
17633 + rndstats.rst_monobit++;
17638 +#define RNDTEST_RUNS_NINTERVAL 6
17640 +static const struct rndtest_runs_tabs {
17641 + u_int16_t min, max;
17642 +} rndtest_runs_tab[] = {
17652 +rndtest_runs(struct rndtest_state *rsp)
17654 + int i, j, ones, zeros, rv = 0;
17655 + int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
17658 + bzero(onei, sizeof(onei));
17659 + bzero(zeroi, sizeof(zeroi));
17660 + ones = zeros = 0;
17661 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17662 + c = rsp->rs_buf[i];
17663 + for (j = 0; j < 8; j++, c <<= 1) {
17666 + rndtest_runs_record(rsp, zeros, zeroi);
17670 + rndtest_runs_record(rsp, ones, onei);
17675 + rndtest_runs_record(rsp, ones, onei);
17676 + rndtest_runs_record(rsp, zeros, zeroi);
17678 + rv |= rndtest_runs_check(rsp, 0, zeroi);
17679 + rv |= rndtest_runs_check(rsp, 1, onei);
17682 + rndstats.rst_runs++;
17688 +rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
17692 + if (len > RNDTEST_RUNS_NINTERVAL)
17693 + len = RNDTEST_RUNS_NINTERVAL;
17699 +rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
17703 + for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
17704 + if (src[i] < rndtest_runs_tab[i].min ||
17705 + src[i] > rndtest_runs_tab[i].max) {
17706 + rndtest_report(rsp, 1,
17707 + "%s interval %d failed (%d, %d-%d)",
17708 + val ? "ones" : "zeros",
17709 + i + 1, src[i], rndtest_runs_tab[i].min,
17710 + rndtest_runs_tab[i].max);
17713 + rndtest_report(rsp, 0,
17714 + "runs pass %s interval %d (%d < %d < %d)",
17715 + val ? "ones" : "zeros",
17716 + i + 1, rndtest_runs_tab[i].min, src[i],
17717 + rndtest_runs_tab[i].max);
17724 +rndtest_longruns(struct rndtest_state *rsp)
17726 + int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
17729 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17730 + c = rsp->rs_buf[i];
17731 + for (j = 0; j < 8; j++, c <<= 1) {
17735 + if (ones > maxones)
17740 + if (zeros > maxzeros)
17741 + maxzeros = zeros;
17746 + if (maxones < 26 && maxzeros < 26) {
17747 + rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
17748 + maxones, maxzeros);
17751 + rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
17752 + maxones, maxzeros);
17753 + rndstats.rst_longruns++;
17759 + * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
17760 + * but it is really the chi^2 test over 4 bits (the poker test as described
17761 + * by Knuth vol 2 is something different, and I take him as authoritative
17762 + * on nomenclature over NIST).
17764 +#define RNDTEST_CHI4_K 16
17765 +#define RNDTEST_CHI4_K_MASK (RNDTEST_CHI4_K - 1)
17768 + * The unnormalized values are used so that we don't have to worry about
17769 + * fractional precision. The "real" value is found by:
17770 + * (V - 1562500) * (16 / 5000) = Vn (where V is the unnormalized value)
17772 +#define RNDTEST_CHI4_VMIN 1563181 /* 2.1792 */
17773 +#define RNDTEST_CHI4_VMAX 1576929 /* 46.1728 */
17776 +rndtest_chi_4(struct rndtest_state *rsp)
17778 + unsigned int freq[RNDTEST_CHI4_K], i, sum;
17780 + for (i = 0; i < RNDTEST_CHI4_K; i++)
17783 + /* Get number of occurances of each 4 bit pattern */
17784 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17785 + freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
17786 + freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
17789 + for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
17790 + sum += freq[i] * freq[i];
17792 + if (sum >= 1563181 && sum <= 1576929) {
17793 + rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
17796 + rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
17797 + rndstats.rst_chi++;
17803 +rndtest_buf(unsigned char *buf)
17805 + struct rndtest_state rsp;
17807 + memset(&rsp, 0, sizeof(rsp));
17808 + rsp.rs_buf = buf;
17809 + rndtest_test(&rsp);
17810 + return(rsp.rs_discard);
17814 +++ b/crypto/ocf/rndtest.h
17816 +/* $FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $ */
17820 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17821 + * All rights reserved.
17823 + * Redistribution and use in source and binary forms, with or without
17824 + * modification, are permitted provided that the following conditions
17826 + * 1. Redistributions of source code must retain the above copyright
17827 + * notice, this list of conditions and the following disclaimer.
17828 + * 2. Redistributions in binary form must reproduce the above copyright
17829 + * notice, this list of conditions and the following disclaimer in the
17830 + * documentation and/or other materials provided with the distribution.
17831 + * 3. All advertising materials mentioning features or use of this software
17832 + * must display the following acknowledgement:
17833 + * This product includes software developed by Jason L. Wright
17834 + * 4. The name of the author may not be used to endorse or promote products
17835 + * derived from this software without specific prior written permission.
17837 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17838 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17839 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17840 + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17841 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17842 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17843 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17844 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17845 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17846 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17847 + * POSSIBILITY OF SUCH DAMAGE.
17851 +/* Some of the tests depend on these values */
17852 +#define RNDTEST_NBYTES 2500
17853 +#define RNDTEST_NBITS (8 * RNDTEST_NBYTES)
17855 +struct rndtest_state {
17856 + int rs_discard; /* discard/accept random data */
17857 + u_int8_t *rs_buf;
17860 +struct rndtest_stats {
17861 + u_int32_t rst_discard; /* number of bytes discarded */
17862 + u_int32_t rst_tests; /* number of test runs */
17863 + u_int32_t rst_monobit; /* monobit test failures */
17864 + u_int32_t rst_runs; /* 0/1 runs failures */
17865 + u_int32_t rst_longruns; /* longruns failures */
17866 + u_int32_t rst_chi; /* chi^2 failures */
17869 +extern int rndtest_buf(unsigned char *buf);
17871 +++ b/crypto/ocf/ocf-compat.h
17873 +#ifndef _BSD_COMPAT_H_
17874 +#define _BSD_COMPAT_H_ 1
17875 +/****************************************************************************/
17877 + * Provide compat routines for older linux kernels and BSD kernels
17879 + * Written by David McCullough <david_mccullough@securecomputing.com>
17880 + * Copyright (C) 2007 David McCullough <david_mccullough@securecomputing.com>
17884 + * The free distribution and use of this software in both source and binary
17885 + * form is allowed (with or without changes) provided that:
17887 + * 1. distributions of this source code include the above copyright
17888 + * notice, this list of conditions and the following disclaimer;
17890 + * 2. distributions in binary form include the above copyright
17891 + * notice, this list of conditions and the following disclaimer
17892 + * in the documentation and/or other associated materials;
17894 + * 3. the copyright holder's name is not used to endorse products
17895 + * built using this software without specific written permission.
17897 + * ALTERNATIVELY, provided that this notice is retained in full, this file
17898 + * may be distributed under the terms of the GNU General Public License (GPL),
17899 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
17903 + * This software is provided 'as is' with no explicit or implied warranties
17904 + * in respect of its properties, including, but not limited to, correctness
17905 + * and/or fitness for purpose.
17907 +/****************************************************************************/
17910 + * fake some BSD driver interface stuff specifically for OCF use
17913 +typedef struct ocf_device *device_t;
17916 + int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
17917 + int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
17918 + int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
17919 + int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
17920 +} device_method_t;
17921 +#define DEVMETHOD(id, func) id: func
17923 +struct ocf_device {
17924 + char name[32]; /* the driver name */
17925 + char nameunit[32]; /* the driver name + HW instance */
17927 + device_method_t methods;
17931 +#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
17932 + ((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
17933 +#define CRYPTODEV_FREESESSION(dev, sid) \
17934 + ((*(dev)->methods.cryptodev_freesession)(dev, sid))
17935 +#define CRYPTODEV_PROCESS(dev, crp, hint) \
17936 + ((*(dev)->methods.cryptodev_process)(dev, crp, hint))
17937 +#define CRYPTODEV_KPROCESS(dev, krp, hint) \
17938 + ((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
17940 +#define device_get_name(dev) ((dev)->name)
17941 +#define device_get_nameunit(dev) ((dev)->nameunit)
17942 +#define device_get_unit(dev) ((dev)->unit)
17943 +#define device_get_softc(dev) ((dev)->softc)
17945 +#define softc_device_decl \
17946 + struct ocf_device _device; \
17949 +#define softc_device_init(_sc, _name, _unit, _methods) \
17951 + strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
17952 + snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
17953 + (_sc)->_device.unit = _unit; \
17954 + (_sc)->_device.methods = _methods; \
17955 + (_sc)->_device.softc = (void *) _sc; \
17956 + *(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
17959 +#define softc_get_device(_sc) (&(_sc)->_device)
17962 + * iomem support for 2.4 and 2.6 kernels
17964 +#include <linux/version.h>
17965 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17966 +#define ocf_iomem_t unsigned long
17969 + * implement simple workqueue like support for older kernels
17972 +#include <linux/tqueue.h>
17974 +#define work_struct tq_struct
17976 +#define INIT_WORK(wp, fp, ap) \
17978 + (wp)->sync = 0; \
17979 + (wp)->routine = (fp); \
17980 + (wp)->data = (ap); \
17983 +#define schedule_work(wp) \
17985 + queue_task((wp), &tq_immediate); \
17986 + mark_bh(IMMEDIATE_BH); \
17989 +#define flush_scheduled_work() run_task_queue(&tq_immediate)
17992 +#define ocf_iomem_t void __iomem *
17994 +#include <linux/workqueue.h>
17998 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
17999 +#include <linux/fdtable.h>
18000 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
18001 +#define files_fdtable(files) (files)
18004 +#ifdef MODULE_PARM
18005 +#undef module_param /* just in case */
18006 +#define module_param(a,b,c) MODULE_PARM(a,"i")
18009 +#define bzero(s,l) memset(s,0,l)
18010 +#define bcopy(s,d,l) memcpy(d,s,l)
18011 +#define bcmp(x, y, l) memcmp(x,y,l)
18013 +#define MIN(x,y) ((x) < (y) ? (x) : (y))
18015 +#define device_printf(dev, a...) ({ \
18016 + printk("%s: ", device_get_nameunit(dev)); printk(a); \
18020 +#define printf(fmt...) printk(fmt)
18022 +#define KASSERT(c,p) if (!(c)) { printk p ; } else
18024 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
18025 +#define ocf_daemonize(str) \
18027 + spin_lock_irq(¤t->sigmask_lock); \
18028 + sigemptyset(¤t->blocked); \
18029 + recalc_sigpending(current); \
18030 + spin_unlock_irq(¤t->sigmask_lock); \
18031 + sprintf(current->comm, str);
18033 +#define ocf_daemonize(str) daemonize(str);
18036 +#define TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
18037 +#define TAILQ_EMPTY(q) list_empty(q)
18038 +#define TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
18040 +#define read_random(p,l) get_random_bytes(p,l)
18042 +#define DELAY(x) ((x) > 2000 ? mdelay((x)/1000) : udelay(x))
18043 +#define strtoul simple_strtoul
18045 +#define pci_get_vendor(dev) ((dev)->vendor)
18046 +#define pci_get_device(dev) ((dev)->device)
18048 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
18049 +#define pci_set_consistent_dma_mask(dev, mask) (0)
18051 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
18052 +#define pci_dma_sync_single_for_cpu pci_dma_sync_single
18055 +#ifndef DMA_32BIT_MASK
18056 +#define DMA_32BIT_MASK 0x00000000ffffffffULL
18059 +#define htole32(x) cpu_to_le32(x)
18060 +#define htobe32(x) cpu_to_be32(x)
18061 +#define htole16(x) cpu_to_le16(x)
18062 +#define htobe16(x) cpu_to_be16(x)
18064 +/* older kernels don't have these */
18068 +#define IRQ_HANDLED
18069 +#define irqreturn_t void
18071 +#ifndef IRQF_SHARED
18072 +#define IRQF_SHARED SA_SHIRQ
18075 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
18076 +# define strlcpy(dest,src,len) \
18077 + ({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
18081 +#define MAX_ERRNO 4095
18083 +#ifndef IS_ERR_VALUE
18084 +#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
18088 + * common debug for all
18091 +#define dprintk(a...) do { if (debug) printk(a); } while(0)
18093 +#define dprintk(a...)
18096 +#ifndef SLAB_ATOMIC
18097 +/* Changed in 2.6.20, must use GFP_ATOMIC now */
18098 +#define SLAB_ATOMIC GFP_ATOMIC
18102 + * need some additional support for older kernels */
18103 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
18104 +#define pci_register_driver_compat(driver, rc) \
18106 + if ((rc) > 0) { \
18108 + } else if (rc == 0) { \
18109 + (rc) = -ENODEV; \
18111 + pci_unregister_driver(driver); \
18114 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
18115 +#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
18117 +#define pci_register_driver_compat(driver,rc)
18120 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
18122 +#include <asm/scatterlist.h>
18124 +static inline void sg_set_page(struct scatterlist *sg, struct page *page,
18125 + unsigned int len, unsigned int offset)
18128 + sg->offset = offset;
18129 + sg->length = len;
18132 +static inline void *sg_virt(struct scatterlist *sg)
18134 + return page_address(sg->page) + sg->offset;
18139 +#endif /* __KERNEL__ */
18141 +/****************************************************************************/
18142 +#endif /* _BSD_COMPAT_H_ */
18144 +++ b/crypto/ocf/ep80579/icp_asym.c
18146 +/***************************************************************************
18148 + * This file is provided under a dual BSD/GPLv2 license. When using or
18149 + * redistributing this file, you may do so under either license.
18151 + * GPL LICENSE SUMMARY
18153 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
18155 + * This program is free software; you can redistribute it and/or modify
18156 + * it under the terms of version 2 of the GNU General Public License as
18157 + * published by the Free Software Foundation.
18159 + * This program is distributed in the hope that it will be useful, but
18160 + * WITHOUT ANY WARRANTY; without even the implied warranty of
18161 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18162 + * General Public License for more details.
18164 + * You should have received a copy of the GNU General Public License
18165 + * along with this program; if not, write to the Free Software
18166 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18167 + * The full GNU General Public License is included in this distribution
18168 + * in the file called LICENSE.GPL.
18170 + * Contact Information:
18171 + * Intel Corporation
18175 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
18176 + * All rights reserved.
18178 + * Redistribution and use in source and binary forms, with or without
18179 + * modification, are permitted provided that the following conditions
18182 + * * Redistributions of source code must retain the above copyright
18183 + * notice, this list of conditions and the following disclaimer.
18184 + * * Redistributions in binary form must reproduce the above copyright
18185 + * notice, this list of conditions and the following disclaimer in
18186 + * the documentation and/or other materials provided with the
18188 + * * Neither the name of Intel Corporation nor the names of its
18189 + * contributors may be used to endorse or promote products derived
18190 + * from this software without specific prior written permission.
18192 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18193 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18194 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18195 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18196 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18197 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
18198 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
18199 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
18200 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
18201 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
18202 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18205 + * version: Security.L.1.0.130
18207 + ***************************************************************************/
18209 +#include "icp_ocf.h"
18211 +/*The following define values (containing the word 'INDEX') are used to find
18212 +the index of each input buffer of the crypto_kop struct (see OCF cryptodev.h).
18213 +These values were found through analysis of the OCF OpenSSL patch. If the
18214 +calling program uses different input buffer positions, these defines will have
18217 +/*DIFFIE HELLMAN buffer index values*/
18218 +#define ICP_DH_KRP_PARAM_PRIME_INDEX (0)
18219 +#define ICP_DH_KRP_PARAM_BASE_INDEX (1)
18220 +#define ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX (2)
18221 +#define ICP_DH_KRP_PARAM_RESULT_INDEX (3)
18223 +/*MOD EXP buffer index values*/
18224 +#define ICP_MOD_EXP_KRP_PARAM_BASE_INDEX (0)
18225 +#define ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX (1)
18226 +#define ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX (2)
18227 +#define ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX (3)
18229 +#define SINGLE_BYTE_VALUE (4)
18231 +/*MOD EXP CRT buffer index values*/
18232 +#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX (0)
18233 +#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX (1)
18234 +#define ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX (2)
18235 +#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX (3)
18236 +#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX (4)
18237 +#define ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX (5)
18238 +#define ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX (6)
18240 +/*DSA sign buffer index values*/
18241 +#define ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX (0)
18242 +#define ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX (1)
18243 +#define ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX (2)
18244 +#define ICP_DSA_SIGN_KRP_PARAM_G_INDEX (3)
18245 +#define ICP_DSA_SIGN_KRP_PARAM_X_INDEX (4)
18246 +#define ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX (5)
18247 +#define ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX (6)
18249 +/*DSA verify buffer index values*/
18250 +#define ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX (0)
18251 +#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX (1)
18252 +#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX (2)
18253 +#define ICP_DSA_VERIFY_KRP_PARAM_G_INDEX (3)
18254 +#define ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX (4)
18255 +#define ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX (5)
18256 +#define ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX (6)
18258 +/*DSA sign prime Q vs random number K size check values*/
18259 +#define DONT_RUN_LESS_THAN_CHECK (0)
18260 +#define FAIL_A_IS_GREATER_THAN_B (1)
18261 +#define FAIL_A_IS_EQUAL_TO_B (1)
18262 +#define SUCCESS_A_IS_LESS_THAN_B (0)
18263 +#define DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS (500)
18265 +/* We need to set a cryptokp success value just in case it is set or allocated
18266 + and not set to zero outside of this module */
18267 +#define CRYPTO_OP_SUCCESS (0)
18269 +static int icp_ocfDrvDHComputeKey(struct cryptkop *krp);
18271 +static int icp_ocfDrvModExp(struct cryptkop *krp);
18273 +static int icp_ocfDrvModExpCRT(struct cryptkop *krp);
18276 +icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck);
18278 +static int icp_ocfDrvDsaSign(struct cryptkop *krp);
18280 +static int icp_ocfDrvDsaVerify(struct cryptkop *krp);
18283 +icp_ocfDrvDhP1CallBack(void *callbackTag,
18284 + CpaStatus status,
18285 + void *pOpData, CpaFlatBuffer * pLocalOctetStringPV);
18288 +icp_ocfDrvModExpCallBack(void *callbackTag,
18289 + CpaStatus status,
18290 + void *pOpData, CpaFlatBuffer * pResult);
18293 +icp_ocfDrvModExpCRTCallBack(void *callbackTag,
18294 + CpaStatus status,
18295 + void *pOpData, CpaFlatBuffer * pOutputData);
18298 +icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
18299 + CpaStatus status,
18300 + void *pOpData, CpaBoolean verifyStatus);
18303 +icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
18304 + CpaStatus status,
18306 + CpaBoolean protocolStatus,
18307 + CpaFlatBuffer * pR, CpaFlatBuffer * pS);
18309 +/* Name : icp_ocfDrvPkeProcess
18311 + * Description : This function will choose which PKE process to follow
18312 + * based on the input arguments
18314 +int icp_ocfDrvPkeProcess(device_t dev, struct cryptkop *krp, int hint)
18316 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18318 + if (NULL == krp) {
18319 + DPRINTK("%s(): Invalid input parameters, cryptkop = %p\n",
18320 + __FUNCTION__, krp);
18324 + if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
18325 + krp->krp_status = ECANCELED;
18326 + return ECANCELED;
18329 + switch (krp->krp_op) {
18330 + case CRK_DH_COMPUTE_KEY:
18331 + DPRINTK("%s() doing DH_COMPUTE_KEY\n", __FUNCTION__);
18332 + lacStatus = icp_ocfDrvDHComputeKey(krp);
18333 + if (CPA_STATUS_SUCCESS != lacStatus) {
18334 + EPRINTK("%s(): icp_ocfDrvDHComputeKey failed "
18335 + "(%d).\n", __FUNCTION__, lacStatus);
18336 + krp->krp_status = ECANCELED;
18337 + return ECANCELED;
18342 + case CRK_MOD_EXP:
18343 + DPRINTK("%s() doing MOD_EXP \n", __FUNCTION__);
18344 + lacStatus = icp_ocfDrvModExp(krp);
18345 + if (CPA_STATUS_SUCCESS != lacStatus) {
18346 + EPRINTK("%s(): icp_ocfDrvModExp failed (%d).\n",
18347 + __FUNCTION__, lacStatus);
18348 + krp->krp_status = ECANCELED;
18349 + return ECANCELED;
18354 + case CRK_MOD_EXP_CRT:
18355 + DPRINTK("%s() doing MOD_EXP_CRT \n", __FUNCTION__);
18356 + lacStatus = icp_ocfDrvModExpCRT(krp);
18357 + if (CPA_STATUS_SUCCESS != lacStatus) {
18358 + EPRINTK("%s(): icp_ocfDrvModExpCRT "
18359 + "failed (%d).\n", __FUNCTION__, lacStatus);
18360 + krp->krp_status = ECANCELED;
18361 + return ECANCELED;
18366 + case CRK_DSA_SIGN:
18367 + DPRINTK("%s() doing DSA_SIGN \n", __FUNCTION__);
18368 + lacStatus = icp_ocfDrvDsaSign(krp);
18369 + if (CPA_STATUS_SUCCESS != lacStatus) {
18370 + EPRINTK("%s(): icp_ocfDrvDsaSign "
18371 + "failed (%d).\n", __FUNCTION__, lacStatus);
18372 + krp->krp_status = ECANCELED;
18373 + return ECANCELED;
18378 + case CRK_DSA_VERIFY:
18379 + DPRINTK("%s() doing DSA_VERIFY \n", __FUNCTION__);
18380 + lacStatus = icp_ocfDrvDsaVerify(krp);
18381 + if (CPA_STATUS_SUCCESS != lacStatus) {
18382 + EPRINTK("%s(): icp_ocfDrvDsaVerify "
18383 + "failed (%d).\n", __FUNCTION__, lacStatus);
18384 + krp->krp_status = ECANCELED;
18385 + return ECANCELED;
18391 + EPRINTK("%s(): Asymettric function not "
18392 + "supported (%d).\n", __FUNCTION__, krp->krp_op);
18393 + krp->krp_status = EOPNOTSUPP;
18394 + return EOPNOTSUPP;
18397 + return ICP_OCF_DRV_STATUS_SUCCESS;
18400 +/* Name : icp_ocfDrvSwapBytes
18402 + * Description : This function is used to swap the byte order of a buffer.
18403 + * It has been seen that in general we are passed little endian byte order
18404 + * buffers, but LAC only accepts big endian byte order buffers.
18406 +static void inline
18407 +icp_ocfDrvSwapBytes(u_int8_t * num, u_int32_t buff_len_bytes)
18411 + u_int8_t *end_ptr;
18412 + u_int8_t hold_val;
18414 + end_ptr = num + (buff_len_bytes - 1);
18415 + buff_len_bytes = buff_len_bytes >> 1;
18416 + for (i = 0; i < buff_len_bytes; i++) {
18420 + *end_ptr = hold_val;
18425 +/* Name : icp_ocfDrvDHComputeKey
18427 + * Description : This function will map Diffie Hellman calls from OCF
18428 + * to the LAC API. OCF uses this function for Diffie Hellman Phase1 and
18429 + * Phase2. LAC has a separate Diffie Hellman Phase2 call, however both phases
18430 + * break down to a modular exponentiation.
18432 +static int icp_ocfDrvDHComputeKey(struct cryptkop *krp)
18434 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18435 + void *callbackTag = NULL;
18436 + CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
18437 + CpaFlatBuffer *pLocalOctetStringPV = NULL;
18438 + uint32_t dh_prime_len_bytes = 0, dh_prime_len_bits = 0;
18440 + /* Input checks - check prime is a multiple of 8 bits to allow for
18441 + allocation later */
18442 + dh_prime_len_bits =
18443 + (krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_nbits);
18445 + /* LAC can reject prime lengths based on prime key sizes, we just
18446 + need to make sure we can allocate space for the base and
18447 + exponent buffers correctly */
18448 + if ((dh_prime_len_bits % NUM_BITS_IN_BYTE) != 0) {
18449 + APRINTK("%s(): Warning Prime number buffer size is not a "
18450 + "multiple of 8 bits\n", __FUNCTION__);
18453 + /* Result storage space should be the same size as the prime as this
18454 + value can take up the same amount of storage space */
18455 + if (dh_prime_len_bits !=
18456 + krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits) {
18457 + DPRINTK("%s(): Return Buffer must be the same size "
18458 + "as the Prime buffer\n", __FUNCTION__);
18459 + krp->krp_status = EINVAL;
18462 + /* Switch to size in bytes */
18463 + BITS_TO_BYTES(dh_prime_len_bytes, dh_prime_len_bits);
18465 + callbackTag = krp;
18467 + pPhase1OpData = kmem_cache_zalloc(drvDH_zone, GFP_KERNEL);
18468 + if (NULL == pPhase1OpData) {
18469 + APRINTK("%s():Failed to get memory for key gen data\n",
18471 + krp->krp_status = ENOMEM;
18475 + pLocalOctetStringPV = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18476 + if (NULL == pLocalOctetStringPV) {
18477 + APRINTK("%s():Failed to get memory for pLocalOctetStringPV\n",
18479 + kmem_cache_free(drvDH_zone, pPhase1OpData);
18480 + krp->krp_status = ENOMEM;
18484 + /* Link parameters */
18485 + pPhase1OpData->primeP.pData =
18486 + krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_p;
18488 + pPhase1OpData->primeP.dataLenInBytes = dh_prime_len_bytes;
18490 + icp_ocfDrvSwapBytes(pPhase1OpData->primeP.pData, dh_prime_len_bytes);
18492 + pPhase1OpData->baseG.pData =
18493 + krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_p;
18495 + BITS_TO_BYTES(pPhase1OpData->baseG.dataLenInBytes,
18496 + krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_nbits);
18498 + icp_ocfDrvSwapBytes(pPhase1OpData->baseG.pData,
18499 + pPhase1OpData->baseG.dataLenInBytes);
18501 + pPhase1OpData->privateValueX.pData =
18502 + krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].crp_p;
18504 + BITS_TO_BYTES(pPhase1OpData->privateValueX.dataLenInBytes,
18505 + krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].
18508 + icp_ocfDrvSwapBytes(pPhase1OpData->privateValueX.pData,
18509 + pPhase1OpData->privateValueX.dataLenInBytes);
18511 + /* Output parameters */
18512 + pLocalOctetStringPV->pData =
18513 + krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_p;
18515 + BITS_TO_BYTES(pLocalOctetStringPV->dataLenInBytes,
18516 + krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits);
18518 + lacStatus = cpaCyDhKeyGenPhase1(CPA_INSTANCE_HANDLE_SINGLE,
18519 + icp_ocfDrvDhP1CallBack,
18520 + callbackTag, pPhase1OpData,
18521 + pLocalOctetStringPV);
18523 + if (CPA_STATUS_SUCCESS != lacStatus) {
18524 + EPRINTK("%s(): DH Phase 1 Key Gen failed (%d).\n",
18525 + __FUNCTION__, lacStatus);
18526 + icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
18527 + kmem_cache_free(drvDH_zone, pPhase1OpData);
18530 + return lacStatus;
18533 +/* Name : icp_ocfDrvModExp
18535 + * Description : This function will map ordinary Modular Exponentiation calls
18536 + * from OCF to the LAC API.
18539 +static int icp_ocfDrvModExp(struct cryptkop *krp)
18541 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18542 + void *callbackTag = NULL;
18543 + CpaCyLnModExpOpData *pModExpOpData = NULL;
18544 + CpaFlatBuffer *pResult = NULL;
18546 + if ((krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits %
18547 + NUM_BITS_IN_BYTE) != 0) {
18548 + DPRINTK("%s(): Warning - modulus buffer size (%d) is not a "
18549 + "multiple of 8 bits\n", __FUNCTION__,
18550 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
18554 + /* Result storage space should be the same size as the prime as this
18555 + value can take up the same amount of storage space */
18556 + if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits >
18557 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_nbits) {
18558 + APRINTK("%s(): Return Buffer size must be the same or"
18559 + " greater than the Modulus buffer\n", __FUNCTION__);
18560 + krp->krp_status = EINVAL;
18564 + callbackTag = krp;
18566 + pModExpOpData = kmem_cache_zalloc(drvLnModExp_zone, GFP_KERNEL);
18567 + if (NULL == pModExpOpData) {
18568 + APRINTK("%s():Failed to get memory for key gen data\n",
18570 + krp->krp_status = ENOMEM;
18574 + pResult = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18575 + if (NULL == pResult) {
18576 + APRINTK("%s():Failed to get memory for ModExp result\n",
18578 + kmem_cache_free(drvLnModExp_zone, pModExpOpData);
18579 + krp->krp_status = ENOMEM;
18583 + /* Link parameters */
18584 + pModExpOpData->modulus.pData =
18585 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_p;
18586 + BITS_TO_BYTES(pModExpOpData->modulus.dataLenInBytes,
18587 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
18590 + icp_ocfDrvSwapBytes(pModExpOpData->modulus.pData,
18591 + pModExpOpData->modulus.dataLenInBytes);
18593 + /*OCF patch to Openswan Pluto regularly sends the base value as 2
18594 + bits in size. In this case, it has been found it is better to
18595 + use the base size memory space as the input buffer (if the number
18596 + is in bits is less than a byte, the number of bits is the input
18598 + if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits <
18599 + NUM_BITS_IN_BYTE) {
18600 + DPRINTK("%s : base is small (%d)\n", __FUNCTION__, krp->
18601 + krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
18602 + pModExpOpData->base.dataLenInBytes = SINGLE_BYTE_VALUE;
18603 + pModExpOpData->base.pData =
18604 + (uint8_t *) & (krp->
18605 + krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
18607 + *((uint32_t *) pModExpOpData->base.pData) =
18608 + htonl(*((uint32_t *) pModExpOpData->base.pData));
18612 + DPRINTK("%s : base is big (%d)\n", __FUNCTION__, krp->
18613 + krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
18614 + pModExpOpData->base.pData =
18615 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_p;
18616 + BITS_TO_BYTES(pModExpOpData->base.dataLenInBytes,
18617 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
18619 + icp_ocfDrvSwapBytes(pModExpOpData->base.pData,
18620 + pModExpOpData->base.dataLenInBytes);
18623 + pModExpOpData->exponent.pData =
18624 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].crp_p;
18625 + BITS_TO_BYTES(pModExpOpData->exponent.dataLenInBytes,
18626 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].
18629 + icp_ocfDrvSwapBytes(pModExpOpData->exponent.pData,
18630 + pModExpOpData->exponent.dataLenInBytes);
18631 + /* Output parameters */
18633 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_p,
18634 + BITS_TO_BYTES(pResult->dataLenInBytes,
18635 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].
18638 + lacStatus = cpaCyLnModExp(CPA_INSTANCE_HANDLE_SINGLE,
18639 + icp_ocfDrvModExpCallBack,
18640 + callbackTag, pModExpOpData, pResult);
18642 + if (CPA_STATUS_SUCCESS != lacStatus) {
18643 + EPRINTK("%s(): Mod Exp Operation failed (%d).\n",
18644 + __FUNCTION__, lacStatus);
18645 + krp->krp_status = ECANCELED;
18646 + icp_ocfDrvFreeFlatBuffer(pResult);
18647 + kmem_cache_free(drvLnModExp_zone, pModExpOpData);
18650 + return lacStatus;
18653 +/* Name : icp_ocfDrvModExpCRT
18655 + * Description : This function will map ordinary Modular Exponentiation Chinese
18656 + * Remainder Theorem implementaion calls from OCF to the LAC API.
18658 + * Note : Mod Exp CRT for this driver is accelerated through LAC RSA type 2
18659 + * decrypt operation. Therefore P and Q input values must always be prime
18660 + * numbers. Although basic primality checks are done in LAC, it is up to the
18661 + * user to do any correct prime number checking before passing the inputs.
18664 +static int icp_ocfDrvModExpCRT(struct cryptkop *krp)
18666 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18667 + CpaCyRsaDecryptOpData *rsaDecryptOpData = NULL;
18668 + void *callbackTag = NULL;
18669 + CpaFlatBuffer *pOutputData = NULL;
18671 + /*Parameter input checks are all done by LAC, no need to repeat
18673 + callbackTag = krp;
18675 + rsaDecryptOpData = kmem_cache_zalloc(drvRSADecrypt_zone, GFP_KERNEL);
18676 + if (NULL == rsaDecryptOpData) {
18677 + APRINTK("%s():Failed to get memory"
18678 + " for MOD EXP CRT Op data struct\n", __FUNCTION__);
18679 + krp->krp_status = ENOMEM;
18683 + rsaDecryptOpData->pRecipientPrivateKey
18684 + = kmem_cache_zalloc(drvRSAPrivateKey_zone, GFP_KERNEL);
18685 + if (NULL == rsaDecryptOpData->pRecipientPrivateKey) {
18686 + APRINTK("%s():Failed to get memory for MOD EXP CRT"
18687 + " private key values struct\n", __FUNCTION__);
18688 + kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
18689 + krp->krp_status = ENOMEM;
18693 + rsaDecryptOpData->pRecipientPrivateKey->
18694 + version = CPA_CY_RSA_VERSION_TWO_PRIME;
18695 + rsaDecryptOpData->pRecipientPrivateKey->
18696 + privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
18698 + pOutputData = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18699 + if (NULL == pOutputData) {
18700 + APRINTK("%s():Failed to get memory"
18701 + " for MOD EXP CRT output data\n", __FUNCTION__);
18702 + kmem_cache_free(drvRSAPrivateKey_zone,
18703 + rsaDecryptOpData->pRecipientPrivateKey);
18704 + kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
18705 + krp->krp_status = ENOMEM;
18709 + rsaDecryptOpData->pRecipientPrivateKey->
18710 + version = CPA_CY_RSA_VERSION_TWO_PRIME;
18711 + rsaDecryptOpData->pRecipientPrivateKey->
18712 + privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
18714 + /* Link parameters */
18715 + rsaDecryptOpData->inputData.pData =
18716 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].crp_p;
18717 + BITS_TO_BYTES(rsaDecryptOpData->inputData.dataLenInBytes,
18718 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].
18721 + icp_ocfDrvSwapBytes(rsaDecryptOpData->inputData.pData,
18722 + rsaDecryptOpData->inputData.dataLenInBytes);
18724 + rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime1P.pData =
18725 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].crp_p;
18726 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
18727 + prime1P.dataLenInBytes,
18728 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].
18731 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18732 + privateKeyRep2.prime1P.pData,
18733 + rsaDecryptOpData->pRecipientPrivateKey->
18734 + privateKeyRep2.prime1P.dataLenInBytes);
18736 + rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime2Q.pData =
18737 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].crp_p;
18738 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
18739 + prime2Q.dataLenInBytes,
18740 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].
18743 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18744 + privateKeyRep2.prime2Q.pData,
18745 + rsaDecryptOpData->pRecipientPrivateKey->
18746 + privateKeyRep2.prime2Q.dataLenInBytes);
18748 + rsaDecryptOpData->pRecipientPrivateKey->
18749 + privateKeyRep2.exponent1Dp.pData =
18750 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].crp_p;
18751 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
18752 + exponent1Dp.dataLenInBytes,
18754 + krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].
18757 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18758 + privateKeyRep2.exponent1Dp.pData,
18759 + rsaDecryptOpData->pRecipientPrivateKey->
18760 + privateKeyRep2.exponent1Dp.dataLenInBytes);
18762 + rsaDecryptOpData->pRecipientPrivateKey->
18763 + privateKeyRep2.exponent2Dq.pData =
18764 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].crp_p;
18765 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
18766 + privateKeyRep2.exponent2Dq.dataLenInBytes,
18768 + krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].
18771 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18772 + privateKeyRep2.exponent2Dq.pData,
18773 + rsaDecryptOpData->pRecipientPrivateKey->
18774 + privateKeyRep2.exponent2Dq.dataLenInBytes);
18776 + rsaDecryptOpData->pRecipientPrivateKey->
18777 + privateKeyRep2.coefficientQInv.pData =
18778 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].crp_p;
18779 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
18780 + privateKeyRep2.coefficientQInv.dataLenInBytes,
18782 + krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].
18785 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18786 + privateKeyRep2.coefficientQInv.pData,
18787 + rsaDecryptOpData->pRecipientPrivateKey->
18788 + privateKeyRep2.coefficientQInv.dataLenInBytes);
18790 + /* Output Parameter */
18791 + pOutputData->pData =
18792 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].crp_p;
18793 + BITS_TO_BYTES(pOutputData->dataLenInBytes,
18794 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].
18797 + lacStatus = cpaCyRsaDecrypt(CPA_INSTANCE_HANDLE_SINGLE,
18798 + icp_ocfDrvModExpCRTCallBack,
18799 + callbackTag, rsaDecryptOpData, pOutputData);
18801 + if (CPA_STATUS_SUCCESS != lacStatus) {
18802 + EPRINTK("%s(): Mod Exp CRT Operation failed (%d).\n",
18803 + __FUNCTION__, lacStatus);
18804 + krp->krp_status = ECANCELED;
18805 + icp_ocfDrvFreeFlatBuffer(pOutputData);
18806 + kmem_cache_free(drvRSAPrivateKey_zone,
18807 + rsaDecryptOpData->pRecipientPrivateKey);
18808 + kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
18811 + return lacStatus;
18814 +/* Name : icp_ocfDrvCheckALessThanB
18816 + * Description : This function will check whether the first argument is less
18817 + * than the second. It is used to check whether the DSA RS sign Random K
18818 + * value is less than the Prime Q value (as defined in the specification)
18822 +icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck)
18825 + uint8_t *MSB_K = pK->pData;
18826 + uint8_t *MSB_Q = pQ->pData;
18827 + uint32_t buffer_lengths_in_bytes = pQ->dataLenInBytes;
18829 + if (DONT_RUN_LESS_THAN_CHECK == *doCheck) {
18830 + return FAIL_A_IS_GREATER_THAN_B;
18834 +if A == B, check next MSB
18835 +if A > B, return A_IS_GREATER_THAN_B
18836 +if A < B, return A_IS_LESS_THAN_B (success)
18838 + while (*MSB_K == *MSB_Q) {
18842 + buffer_lengths_in_bytes--;
18843 + if (0 == buffer_lengths_in_bytes) {
18844 + DPRINTK("%s() Buffers have equal value!!\n",
18846 + return FAIL_A_IS_EQUAL_TO_B;
18851 + if (*MSB_K < *MSB_Q) {
18852 + return SUCCESS_A_IS_LESS_THAN_B;
18854 + return FAIL_A_IS_GREATER_THAN_B;
18859 +/* Name : icp_ocfDrvDsaSign
18861 + * Description : This function will map DSA RS Sign from OCF to the LAC API.
18863 + * NOTE: From looking at OCF patch to OpenSSL and even the number of input
18864 + * parameters, OCF expects us to generate the random seed value. This value
18865 + * is generated and passed to LAC, however the number is discared in the
18866 + * callback and not returned to the user.
18868 +static int icp_ocfDrvDsaSign(struct cryptkop *krp)
18870 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18871 + CpaCyDsaRSSignOpData *dsaRsSignOpData = NULL;
18872 + void *callbackTag = NULL;
18873 + CpaCyRandGenOpData randGenOpData;
18874 + int primeQSizeInBytes = 0;
18876 + CpaFlatBuffer randData;
18877 + CpaBoolean protocolStatus = CPA_FALSE;
18878 + CpaFlatBuffer *pR = NULL;
18879 + CpaFlatBuffer *pS = NULL;
18881 + callbackTag = krp;
18883 + BITS_TO_BYTES(primeQSizeInBytes,
18884 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
18887 + if (DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES != primeQSizeInBytes) {
18888 + APRINTK("%s(): DSA PRIME Q size not equal to the "
18889 + "FIPS defined 20bytes, = %d\n",
18890 + __FUNCTION__, primeQSizeInBytes);
18891 + krp->krp_status = EDOM;
18895 + dsaRsSignOpData = kmem_cache_zalloc(drvDSARSSign_zone, GFP_KERNEL);
18896 + if (NULL == dsaRsSignOpData) {
18897 + APRINTK("%s():Failed to get memory"
18898 + " for DSA RS Sign Op data struct\n", __FUNCTION__);
18899 + krp->krp_status = ENOMEM;
18903 + dsaRsSignOpData->K.pData =
18904 + kmem_cache_alloc(drvDSARSSignKValue_zone, GFP_ATOMIC);
18906 + if (NULL == dsaRsSignOpData->K.pData) {
18907 + APRINTK("%s():Failed to get memory"
18908 + " for DSA RS Sign Op Random value\n", __FUNCTION__);
18909 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18910 + krp->krp_status = ENOMEM;
18914 + pR = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18915 + if (NULL == pR) {
18916 + APRINTK("%s():Failed to get memory"
18917 + " for DSA signature R\n", __FUNCTION__);
18918 + kmem_cache_free(drvDSARSSignKValue_zone,
18919 + dsaRsSignOpData->K.pData);
18920 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18921 + krp->krp_status = ENOMEM;
18925 + pS = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18926 + if (NULL == pS) {
18927 + APRINTK("%s():Failed to get memory"
18928 + " for DSA signature S\n", __FUNCTION__);
18929 + icp_ocfDrvFreeFlatBuffer(pR);
18930 + kmem_cache_free(drvDSARSSignKValue_zone,
18931 + dsaRsSignOpData->K.pData);
18932 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18933 + krp->krp_status = ENOMEM;
18937 + /*link prime number parameter for ease of processing */
18938 + dsaRsSignOpData->P.pData =
18939 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].crp_p;
18940 + BITS_TO_BYTES(dsaRsSignOpData->P.dataLenInBytes,
18941 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].
18944 + icp_ocfDrvSwapBytes(dsaRsSignOpData->P.pData,
18945 + dsaRsSignOpData->P.dataLenInBytes);
18947 + dsaRsSignOpData->Q.pData =
18948 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].crp_p;
18949 + BITS_TO_BYTES(dsaRsSignOpData->Q.dataLenInBytes,
18950 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
18953 + icp_ocfDrvSwapBytes(dsaRsSignOpData->Q.pData,
18954 + dsaRsSignOpData->Q.dataLenInBytes);
18956 + /*generate random number with equal buffer size to Prime value Q,
18957 + but value less than Q */
18958 + dsaRsSignOpData->K.dataLenInBytes = dsaRsSignOpData->Q.dataLenInBytes;
18960 + randGenOpData.generateBits = CPA_TRUE;
18961 + randGenOpData.lenInBytes = dsaRsSignOpData->K.dataLenInBytes;
18963 + icp_ocfDrvPtrAndLenToFlatBuffer(dsaRsSignOpData->K.pData,
18964 + dsaRsSignOpData->K.dataLenInBytes,
18968 + while (icp_ocfDrvCheckALessThanB(&(dsaRsSignOpData->K),
18969 + &(dsaRsSignOpData->Q), &doCheck)) {
18971 + if (CPA_STATUS_SUCCESS
18972 + != cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
18973 + NULL, NULL, &randGenOpData, &randData)) {
18974 + APRINTK("%s(): ERROR - Failed to generate DSA RS Sign K"
18975 + "value\n", __FUNCTION__);
18976 + icp_ocfDrvFreeFlatBuffer(pS);
18977 + icp_ocfDrvFreeFlatBuffer(pR);
18978 + kmem_cache_free(drvDSARSSignKValue_zone,
18979 + dsaRsSignOpData->K.pData);
18980 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18981 + krp->krp_status = EAGAIN;
18986 + if (DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS == doCheck) {
18987 + APRINTK("%s(): ERROR - Failed to find DSA RS Sign K "
18988 + "value less than Q value\n", __FUNCTION__);
18989 + icp_ocfDrvFreeFlatBuffer(pS);
18990 + icp_ocfDrvFreeFlatBuffer(pR);
18991 + kmem_cache_free(drvDSARSSignKValue_zone,
18992 + dsaRsSignOpData->K.pData);
18993 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18994 + krp->krp_status = EAGAIN;
18999 + /*Rand Data - no need to swap bytes for pK */
19001 + /* Link parameters */
19002 + dsaRsSignOpData->G.pData =
19003 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_p;
19004 + BITS_TO_BYTES(dsaRsSignOpData->G.dataLenInBytes,
19005 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_nbits);
19007 + icp_ocfDrvSwapBytes(dsaRsSignOpData->G.pData,
19008 + dsaRsSignOpData->G.dataLenInBytes);
19010 + dsaRsSignOpData->X.pData =
19011 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_p;
19012 + BITS_TO_BYTES(dsaRsSignOpData->X.dataLenInBytes,
19013 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_nbits);
19014 + icp_ocfDrvSwapBytes(dsaRsSignOpData->X.pData,
19015 + dsaRsSignOpData->X.dataLenInBytes);
19017 + dsaRsSignOpData->M.pData =
19018 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].crp_p;
19019 + BITS_TO_BYTES(dsaRsSignOpData->M.dataLenInBytes,
19020 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].
19022 + icp_ocfDrvSwapBytes(dsaRsSignOpData->M.pData,
19023 + dsaRsSignOpData->M.dataLenInBytes);
19025 + /* Output Parameters */
19026 + pS->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].crp_p;
19027 + BITS_TO_BYTES(pS->dataLenInBytes,
19028 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].
19031 + pR->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].crp_p;
19032 + BITS_TO_BYTES(pR->dataLenInBytes,
19033 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].
19036 + lacStatus = cpaCyDsaSignRS(CPA_INSTANCE_HANDLE_SINGLE,
19037 + icp_ocfDrvDsaRSSignCallBack,
19038 + callbackTag, dsaRsSignOpData,
19039 + &protocolStatus, pR, pS);
19041 + if (CPA_STATUS_SUCCESS != lacStatus) {
19042 + EPRINTK("%s(): DSA RS Sign Operation failed (%d).\n",
19043 + __FUNCTION__, lacStatus);
19044 + krp->krp_status = ECANCELED;
19045 + icp_ocfDrvFreeFlatBuffer(pS);
19046 + icp_ocfDrvFreeFlatBuffer(pR);
19047 + kmem_cache_free(drvDSARSSignKValue_zone,
19048 + dsaRsSignOpData->K.pData);
19049 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
19052 + return lacStatus;
19055 +/* Name : icp_ocfDrvDsaVerify
19057 + * Description : This function will map DSA RS Verify from OCF to the LAC API.
19060 +static int icp_ocfDrvDsaVerify(struct cryptkop *krp)
19062 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
19063 + CpaCyDsaVerifyOpData *dsaVerifyOpData = NULL;
19064 + void *callbackTag = NULL;
19065 + CpaBoolean verifyStatus = CPA_FALSE;
19067 + callbackTag = krp;
19069 + dsaVerifyOpData = kmem_cache_zalloc(drvDSAVerify_zone, GFP_KERNEL);
19070 + if (NULL == dsaVerifyOpData) {
19071 + APRINTK("%s():Failed to get memory"
19072 + " for DSA Verify Op data struct\n", __FUNCTION__);
19073 + krp->krp_status = ENOMEM;
19077 + /* Link parameters */
19078 + dsaVerifyOpData->P.pData =
19079 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].crp_p;
19080 + BITS_TO_BYTES(dsaVerifyOpData->P.dataLenInBytes,
19081 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].
19083 + icp_ocfDrvSwapBytes(dsaVerifyOpData->P.pData,
19084 + dsaVerifyOpData->P.dataLenInBytes);
19086 + dsaVerifyOpData->Q.pData =
19087 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].crp_p;
19088 + BITS_TO_BYTES(dsaVerifyOpData->Q.dataLenInBytes,
19089 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].
19091 + icp_ocfDrvSwapBytes(dsaVerifyOpData->Q.pData,
19092 + dsaVerifyOpData->Q.dataLenInBytes);
19094 + dsaVerifyOpData->G.pData =
19095 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].crp_p;
19096 + BITS_TO_BYTES(dsaVerifyOpData->G.dataLenInBytes,
19097 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].
19099 + icp_ocfDrvSwapBytes(dsaVerifyOpData->G.pData,
19100 + dsaVerifyOpData->G.dataLenInBytes);
19102 + dsaVerifyOpData->Y.pData =
19103 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].crp_p;
19104 + BITS_TO_BYTES(dsaVerifyOpData->Y.dataLenInBytes,
19105 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].
19107 + icp_ocfDrvSwapBytes(dsaVerifyOpData->Y.pData,
19108 + dsaVerifyOpData->Y.dataLenInBytes);
19110 + dsaVerifyOpData->M.pData =
19111 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].crp_p;
19112 + BITS_TO_BYTES(dsaVerifyOpData->M.dataLenInBytes,
19113 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].
19115 + icp_ocfDrvSwapBytes(dsaVerifyOpData->M.pData,
19116 + dsaVerifyOpData->M.dataLenInBytes);
19118 + dsaVerifyOpData->R.pData =
19119 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].crp_p;
19120 + BITS_TO_BYTES(dsaVerifyOpData->R.dataLenInBytes,
19121 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].
19123 + icp_ocfDrvSwapBytes(dsaVerifyOpData->R.pData,
19124 + dsaVerifyOpData->R.dataLenInBytes);
19126 + dsaVerifyOpData->S.pData =
19127 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].crp_p;
19128 + BITS_TO_BYTES(dsaVerifyOpData->S.dataLenInBytes,
19129 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].
19131 + icp_ocfDrvSwapBytes(dsaVerifyOpData->S.pData,
19132 + dsaVerifyOpData->S.dataLenInBytes);
19134 + lacStatus = cpaCyDsaVerify(CPA_INSTANCE_HANDLE_SINGLE,
19135 + icp_ocfDrvDsaVerifyCallBack,
19136 + callbackTag, dsaVerifyOpData, &verifyStatus);
19138 + if (CPA_STATUS_SUCCESS != lacStatus) {
19139 + EPRINTK("%s(): DSA Verify Operation failed (%d).\n",
19140 + __FUNCTION__, lacStatus);
19141 + kmem_cache_free(drvDSAVerify_zone, dsaVerifyOpData);
19142 + krp->krp_status = ECANCELED;
19145 + return lacStatus;
19148 +/* Name : icp_ocfDrvReadRandom
19150 + * Description : This function will map RNG functionality calls from OCF
19151 + * to the LAC API.
19153 +int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords)
19155 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
19156 + CpaCyRandGenOpData randGenOpData;
19157 + CpaFlatBuffer randData;
19159 + if (NULL == buf) {
19160 + APRINTK("%s(): Invalid input parameters\n", __FUNCTION__);
19164 + /* maxwords here is number of integers to generate data for */
19165 + randGenOpData.generateBits = CPA_TRUE;
19167 + randGenOpData.lenInBytes = maxwords * sizeof(uint32_t);
19169 + icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *) buf,
19170 + randGenOpData.lenInBytes, &randData);
19172 + lacStatus = cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
19173 + NULL, NULL, &randGenOpData, &randData);
19174 + if (CPA_STATUS_SUCCESS != lacStatus) {
19175 + EPRINTK("%s(): icp_LacSymRandGen failed (%d). \n",
19176 + __FUNCTION__, lacStatus);
19177 + return RETURN_RAND_NUM_GEN_FAILED;
19180 + return randGenOpData.lenInBytes / sizeof(uint32_t);
19183 +/* Name : icp_ocfDrvDhP1Callback
19185 + * Description : When this function returns it signifies that the LAC
19186 + * component has completed the DH operation.
19189 +icp_ocfDrvDhP1CallBack(void *callbackTag,
19190 + CpaStatus status,
19191 + void *pOpData, CpaFlatBuffer * pLocalOctetStringPV)
19193 + struct cryptkop *krp = NULL;
19194 + CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
19196 + if (NULL == callbackTag) {
19197 + DPRINTK("%s(): Invalid input parameters - "
19198 + "callbackTag data is NULL\n", __FUNCTION__);
19201 + krp = (struct cryptkop *)callbackTag;
19203 + if (NULL == pOpData) {
19204 + DPRINTK("%s(): Invalid input parameters - "
19205 + "Operation Data is NULL\n", __FUNCTION__);
19206 + krp->krp_status = ECANCELED;
19207 + crypto_kdone(krp);
19210 + pPhase1OpData = (CpaCyDhPhase1KeyGenOpData *) pOpData;
19212 + if (NULL == pLocalOctetStringPV) {
19213 + DPRINTK("%s(): Invalid input parameters - "
19214 + "pLocalOctetStringPV Data is NULL\n", __FUNCTION__);
19215 + memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
19216 + kmem_cache_free(drvDH_zone, pPhase1OpData);
19217 + krp->krp_status = ECANCELED;
19218 + crypto_kdone(krp);
19222 + if (CPA_STATUS_SUCCESS == status) {
19223 + krp->krp_status = CRYPTO_OP_SUCCESS;
19225 + APRINTK("%s(): Diffie Hellman Phase1 Key Gen failed - "
19226 + "Operation Status = %d\n", __FUNCTION__, status);
19227 + krp->krp_status = ECANCELED;
19230 + icp_ocfDrvSwapBytes(pLocalOctetStringPV->pData,
19231 + pLocalOctetStringPV->dataLenInBytes);
19233 + icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
19234 + memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
19235 + kmem_cache_free(drvDH_zone, pPhase1OpData);
19237 + crypto_kdone(krp);
19242 +/* Name : icp_ocfDrvModExpCallBack
19244 + * Description : When this function returns it signifies that the LAC
19245 + * component has completed the Mod Exp operation.
19248 +icp_ocfDrvModExpCallBack(void *callbackTag,
19249 + CpaStatus status,
19250 + void *pOpdata, CpaFlatBuffer * pResult)
19252 + struct cryptkop *krp = NULL;
19253 + CpaCyLnModExpOpData *pLnModExpOpData = NULL;
19255 + if (NULL == callbackTag) {
19256 + DPRINTK("%s(): Invalid input parameters - "
19257 + "callbackTag data is NULL\n", __FUNCTION__);
19260 + krp = (struct cryptkop *)callbackTag;
19262 + if (NULL == pOpdata) {
19263 + DPRINTK("%s(): Invalid Mod Exp input parameters - "
19264 + "Operation Data is NULL\n", __FUNCTION__);
19265 + krp->krp_status = ECANCELED;
19266 + crypto_kdone(krp);
19269 + pLnModExpOpData = (CpaCyLnModExpOpData *) pOpdata;
19271 + if (NULL == pResult) {
19272 + DPRINTK("%s(): Invalid input parameters - "
19273 + "pResult data is NULL\n", __FUNCTION__);
19274 + krp->krp_status = ECANCELED;
19275 + memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
19276 + kmem_cache_free(drvLnModExp_zone, pLnModExpOpData);
19277 + crypto_kdone(krp);
19281 + if (CPA_STATUS_SUCCESS == status) {
19282 + krp->krp_status = CRYPTO_OP_SUCCESS;
19284 + APRINTK("%s(): LAC Mod Exp Operation failed - "
19285 + "Operation Status = %d\n", __FUNCTION__, status);
19286 + krp->krp_status = ECANCELED;
19289 + icp_ocfDrvSwapBytes(pResult->pData, pResult->dataLenInBytes);
19291 + /*switch base size value back to original */
19292 + if (pLnModExpOpData->base.pData ==
19293 + (uint8_t *) & (krp->
19294 + krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
19296 + *((uint32_t *) pLnModExpOpData->base.pData) =
19297 + ntohl(*((uint32_t *) pLnModExpOpData->base.pData));
19299 + icp_ocfDrvFreeFlatBuffer(pResult);
19300 + memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
19301 + kmem_cache_free(drvLnModExp_zone, pLnModExpOpData);
19303 + crypto_kdone(krp);
19309 +/* Name : icp_ocfDrvModExpCRTCallBack
19311 + * Description : When this function returns it signifies that the LAC
19312 + * component has completed the Mod Exp CRT operation.
19315 +icp_ocfDrvModExpCRTCallBack(void *callbackTag,
19316 + CpaStatus status,
19317 + void *pOpData, CpaFlatBuffer * pOutputData)
19319 + struct cryptkop *krp = NULL;
19320 + CpaCyRsaDecryptOpData *pDecryptData = NULL;
19322 + if (NULL == callbackTag) {
19323 + DPRINTK("%s(): Invalid input parameters - "
19324 + "callbackTag data is NULL\n", __FUNCTION__);
19328 + krp = (struct cryptkop *)callbackTag;
19330 + if (NULL == pOpData) {
19331 + DPRINTK("%s(): Invalid input parameters - "
19332 + "Operation Data is NULL\n", __FUNCTION__);
19333 + krp->krp_status = ECANCELED;
19334 + crypto_kdone(krp);
19337 + pDecryptData = (CpaCyRsaDecryptOpData *) pOpData;
19339 + if (NULL == pOutputData) {
19340 + DPRINTK("%s(): Invalid input parameter - "
19341 + "pOutputData is NULL\n", __FUNCTION__);
19342 + memset(pDecryptData->pRecipientPrivateKey, 0,
19343 + sizeof(CpaCyRsaPrivateKey));
19344 + kmem_cache_free(drvRSAPrivateKey_zone,
19345 + pDecryptData->pRecipientPrivateKey);
19346 + memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
19347 + kmem_cache_free(drvRSADecrypt_zone, pDecryptData);
19348 + krp->krp_status = ECANCELED;
19349 + crypto_kdone(krp);
19353 + if (CPA_STATUS_SUCCESS == status) {
19354 + krp->krp_status = CRYPTO_OP_SUCCESS;
19356 + APRINTK("%s(): LAC Mod Exp CRT operation failed - "
19357 + "Operation Status = %d\n", __FUNCTION__, status);
19358 + krp->krp_status = ECANCELED;
19361 + icp_ocfDrvSwapBytes(pOutputData->pData, pOutputData->dataLenInBytes);
19363 + icp_ocfDrvFreeFlatBuffer(pOutputData);
19364 + memset(pDecryptData->pRecipientPrivateKey, 0,
19365 + sizeof(CpaCyRsaPrivateKey));
19366 + kmem_cache_free(drvRSAPrivateKey_zone,
19367 + pDecryptData->pRecipientPrivateKey);
19368 + memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
19369 + kmem_cache_free(drvRSADecrypt_zone, pDecryptData);
19371 + crypto_kdone(krp);
19376 +/* Name : icp_ocfDrvDsaRSSignCallBack
19378 + * Description : When this function returns it signifies that the LAC
19379 + * component has completed the DSA RS sign operation.
19382 +icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
19383 + CpaStatus status,
19385 + CpaBoolean protocolStatus,
19386 + CpaFlatBuffer * pR, CpaFlatBuffer * pS)
19388 + struct cryptkop *krp = NULL;
19389 + CpaCyDsaRSSignOpData *pSignData = NULL;
19391 + if (NULL == callbackTag) {
19392 + DPRINTK("%s(): Invalid input parameters - "
19393 + "callbackTag data is NULL\n", __FUNCTION__);
19397 + krp = (struct cryptkop *)callbackTag;
19399 + if (NULL == pOpData) {
19400 + DPRINTK("%s(): Invalid input parameters - "
19401 + "Operation Data is NULL\n", __FUNCTION__);
19402 + krp->krp_status = ECANCELED;
19403 + crypto_kdone(krp);
19406 + pSignData = (CpaCyDsaRSSignOpData *) pOpData;
19408 + if (NULL == pR) {
19409 + DPRINTK("%s(): Invalid input parameter - "
19410 + "pR sign is NULL\n", __FUNCTION__);
19411 + icp_ocfDrvFreeFlatBuffer(pS);
19412 + kmem_cache_free(drvDSARSSign_zone, pSignData);
19413 + krp->krp_status = ECANCELED;
19414 + crypto_kdone(krp);
19418 + if (NULL == pS) {
19419 + DPRINTK("%s(): Invalid input parameter - "
19420 + "pS sign is NULL\n", __FUNCTION__);
19421 + icp_ocfDrvFreeFlatBuffer(pR);
19422 + kmem_cache_free(drvDSARSSign_zone, pSignData);
19423 + krp->krp_status = ECANCELED;
19424 + crypto_kdone(krp);
19428 + if (CPA_STATUS_SUCCESS != status) {
19429 + APRINTK("%s(): LAC DSA RS Sign operation failed - "
19430 + "Operation Status = %d\n", __FUNCTION__, status);
19431 + krp->krp_status = ECANCELED;
19433 + krp->krp_status = CRYPTO_OP_SUCCESS;
19435 + if (CPA_TRUE != protocolStatus) {
19436 + DPRINTK("%s(): LAC DSA RS Sign operation failed due "
19437 + "to protocol error\n", __FUNCTION__);
19438 + krp->krp_status = EIO;
19442 + /* Swap bytes only when the callback status is successful and
19443 + protocolStatus is set to true */
19444 + if (CPA_STATUS_SUCCESS == status && CPA_TRUE == protocolStatus) {
19445 + icp_ocfDrvSwapBytes(pR->pData, pR->dataLenInBytes);
19446 + icp_ocfDrvSwapBytes(pS->pData, pS->dataLenInBytes);
19449 + icp_ocfDrvFreeFlatBuffer(pR);
19450 + icp_ocfDrvFreeFlatBuffer(pS);
19451 + memset(pSignData->K.pData, 0, pSignData->K.dataLenInBytes);
19452 + kmem_cache_free(drvDSARSSignKValue_zone, pSignData->K.pData);
19453 + memset(pSignData, 0, sizeof(CpaCyDsaRSSignOpData));
19454 + kmem_cache_free(drvDSARSSign_zone, pSignData);
19455 + crypto_kdone(krp);
19460 +/* Name : icp_ocfDrvDsaVerifyCallback
19462 + * Description : When this function returns it signifies that the LAC
19463 + * component has completed the DSA Verify operation.
19466 +icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
19467 + CpaStatus status,
19468 + void *pOpData, CpaBoolean verifyStatus)
19471 + struct cryptkop *krp = NULL;
19472 + CpaCyDsaVerifyOpData *pVerData = NULL;
19474 + if (NULL == callbackTag) {
19475 + DPRINTK("%s(): Invalid input parameters - "
19476 + "callbackTag data is NULL\n", __FUNCTION__);
19480 + krp = (struct cryptkop *)callbackTag;
19482 + if (NULL == pOpData) {
19483 + DPRINTK("%s(): Invalid input parameters - "
19484 + "Operation Data is NULL\n", __FUNCTION__);
19485 + krp->krp_status = ECANCELED;
19486 + crypto_kdone(krp);
19489 + pVerData = (CpaCyDsaVerifyOpData *) pOpData;
19491 + if (CPA_STATUS_SUCCESS != status) {
19492 + APRINTK("%s(): LAC DSA Verify operation failed - "
19493 + "Operation Status = %d\n", __FUNCTION__, status);
19494 + krp->krp_status = ECANCELED;
19496 + krp->krp_status = CRYPTO_OP_SUCCESS;
19498 + if (CPA_TRUE != verifyStatus) {
19499 + DPRINTK("%s(): DSA signature invalid\n", __FUNCTION__);
19500 + krp->krp_status = EIO;
19504 + /* Swap bytes only when the callback status is successful and
19505 + verifyStatus is set to true */
19506 + /*Just swapping back the key values for now. Possibly all
19507 + swapped buffers need to be reverted */
19508 + if (CPA_STATUS_SUCCESS == status && CPA_TRUE == verifyStatus) {
19509 + icp_ocfDrvSwapBytes(pVerData->R.pData,
19510 + pVerData->R.dataLenInBytes);
19511 + icp_ocfDrvSwapBytes(pVerData->S.pData,
19512 + pVerData->S.dataLenInBytes);
19515 + memset(pVerData, 0, sizeof(CpaCyDsaVerifyOpData));
19516 + kmem_cache_free(drvDSAVerify_zone, pVerData);
19517 + crypto_kdone(krp);
19522 +++ b/crypto/ocf/ep80579/icp_common.c
19524 +/***************************************************************************
19526 + * This file is provided under a dual BSD/GPLv2 license. When using or
19527 + * redistributing this file, you may do so under either license.
19529 + * GPL LICENSE SUMMARY
19531 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
19533 + * This program is free software; you can redistribute it and/or modify
19534 + * it under the terms of version 2 of the GNU General Public License as
19535 + * published by the Free Software Foundation.
19537 + * This program is distributed in the hope that it will be useful, but
19538 + * WITHOUT ANY WARRANTY; without even the implied warranty of
19539 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19540 + * General Public License for more details.
19542 + * You should have received a copy of the GNU General Public License
19543 + * along with this program; if not, write to the Free Software
19544 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19545 + * The full GNU General Public License is included in this distribution
19546 + * in the file called LICENSE.GPL.
19548 + * Contact Information:
19549 + * Intel Corporation
19553 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
19554 + * All rights reserved.
19556 + * Redistribution and use in source and binary forms, with or without
19557 + * modification, are permitted provided that the following conditions
19560 + * * Redistributions of source code must retain the above copyright
19561 + * notice, this list of conditions and the following disclaimer.
19562 + * * Redistributions in binary form must reproduce the above copyright
19563 + * notice, this list of conditions and the following disclaimer in
19564 + * the documentation and/or other materials provided with the
19566 + * * Neither the name of Intel Corporation nor the names of its
19567 + * contributors may be used to endorse or promote products derived
19568 + * from this software without specific prior written permission.
19570 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19571 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19572 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19573 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19574 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19575 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
19576 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19577 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
19578 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19579 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
19580 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19583 + * version: Security.L.1.0.130
19585 + ***************************************************************************/
19588 + * An OCF module that uses Intel® QuickAssist Integrated Accelerator to do the
19591 + * This driver requires the ICP Access Library that is available from Intel in
19592 + * order to operate.
19595 +#include "icp_ocf.h"
19597 +#define ICP_OCF_COMP_NAME "ICP_OCF"
19598 +#define ICP_OCF_VER_MAIN (2)
19599 +#define ICP_OCF_VER_MJR (0)
19600 +#define ICP_OCF_VER_MNR (0)
19602 +#define MAX_DEREG_RETRIES (100)
19603 +#define DEFAULT_DEREG_RETRIES (10)
19604 +#define DEFAULT_DEREG_DELAY_IN_JIFFIES (10)
19606 +/* This defines the maximum number of sessions possible between OCF
19607 + and the OCF Tolapai Driver. If set to zero, there is no limit. */
19608 +#define DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT (0)
19609 +#define NUM_SUPPORTED_CAPABILITIES (21)
19612 +struct kmem_cache *drvSessionData_zone = NULL;
19613 +struct kmem_cache *drvOpData_zone = NULL;
19614 +struct kmem_cache *drvDH_zone = NULL;
19615 +struct kmem_cache *drvLnModExp_zone = NULL;
19616 +struct kmem_cache *drvRSADecrypt_zone = NULL;
19617 +struct kmem_cache *drvRSAPrivateKey_zone = NULL;
19618 +struct kmem_cache *drvDSARSSign_zone = NULL;
19619 +struct kmem_cache *drvDSARSSignKValue_zone = NULL;
19620 +struct kmem_cache *drvDSAVerify_zone = NULL;
19622 +/*Slab zones for flatbuffers and bufferlist*/
19623 +struct kmem_cache *drvFlatBuffer_zone = NULL;
19625 +static int icp_ocfDrvInit(void);
19626 +static void icp_ocfDrvExit(void);
19627 +static void icp_ocfDrvFreeCaches(void);
19628 +static void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg);
19630 +int32_t icp_ocfDrvDriverId = INVALID_DRIVER_ID;
19632 +/* Module parameter - gives the number of times LAC deregistration shall be
19634 +int num_dereg_retries = DEFAULT_DEREG_RETRIES;
19636 +/* Module parameter - gives the delay time in jiffies before a LAC session
19637 + shall be attempted to be deregistered again */
19638 +int dereg_retry_delay_in_jiffies = DEFAULT_DEREG_DELAY_IN_JIFFIES;
19640 +/* Module parameter - gives the maximum number of sessions possible between
19641 + OCF and the OCF Tolapai Driver. If set to zero, there is no limit.*/
19642 +int max_sessions = DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT;
19644 +/* This is set when the module is removed from the system, no further
19645 + processing can take place if this is set */
19646 +atomic_t icp_ocfDrvIsExiting = ATOMIC_INIT(0);
19648 +/* This is used to show how many lac sessions were not deregistered*/
19649 +atomic_t lac_session_failed_dereg_count = ATOMIC_INIT(0);
19651 +/* This is used to track the number of registered sessions between OCF and
19652 + * and the OCF Tolapai driver, when max_session is set to value other than
19653 + * zero. This ensures that the max_session set for the OCF and the driver
19654 + * is equal to the LAC registered sessions */
19655 +atomic_t num_ocf_to_drv_registered_sessions = ATOMIC_INIT(0);
19657 +/* Head of linked list used to store session data */
19658 +struct list_head icp_ocfDrvGlobalSymListHead;
19659 +struct list_head icp_ocfDrvGlobalSymListHead_FreeMemList;
19661 +spinlock_t icp_ocfDrvSymSessInfoListSpinlock = SPIN_LOCK_UNLOCKED;
19662 +rwlock_t icp_kmem_cache_destroy_alloc_lock = RW_LOCK_UNLOCKED;
19664 +struct workqueue_struct *icp_ocfDrvFreeLacSessionWorkQ;
19666 +struct icp_drvBuffListInfo defBuffListInfo;
19669 + softc_device_decl sc_dev;
19672 +static device_method_t icp_methods = {
19673 + /* crypto device methods */
19674 + DEVMETHOD(cryptodev_newsession, icp_ocfDrvNewSession),
19675 + DEVMETHOD(cryptodev_freesession, icp_ocfDrvFreeLACSession),
19676 + DEVMETHOD(cryptodev_process, icp_ocfDrvSymProcess),
19677 + DEVMETHOD(cryptodev_kprocess, icp_ocfDrvPkeProcess),
19680 +module_param(num_dereg_retries, int, S_IRUGO);
19681 +module_param(dereg_retry_delay_in_jiffies, int, S_IRUGO);
19682 +module_param(max_sessions, int, S_IRUGO);
19684 +MODULE_PARM_DESC(num_dereg_retries,
19685 + "Number of times to retry LAC Sym Session Deregistration. "
19686 + "Default 10, Max 100");
19687 +MODULE_PARM_DESC(dereg_retry_delay_in_jiffies, "Delay in jiffies "
19688 + "(added to a schedule() function call) before a LAC Sym "
19689 + "Session Dereg is retried. Default 10");
19690 +MODULE_PARM_DESC(max_sessions, "This sets the maximum number of sessions "
19691 + "between OCF and this driver. If this value is set to zero, "
19692 + "max session count checking is disabled. Default is zero(0)");
19694 +/* Name : icp_ocfDrvInit
19696 + * Description : This function will register all the symmetric and asymmetric
19697 + * functionality that will be accelerated by the hardware. It will also
19698 + * get a unique driver ID from the OCF and initialise all slab caches
19700 +static int __init icp_ocfDrvInit(void)
19702 + int ocfStatus = 0;
19704 + IPRINTK("=== %s ver %d.%d.%d ===\n", ICP_OCF_COMP_NAME,
19705 + ICP_OCF_VER_MAIN, ICP_OCF_VER_MJR, ICP_OCF_VER_MNR);
19707 + if (MAX_DEREG_RETRIES < num_dereg_retries) {
19708 + EPRINTK("Session deregistration retry count set to greater "
19709 + "than %d", MAX_DEREG_RETRIES);
19713 + /* Initialize and Start the Cryptographic component */
19714 + if (CPA_STATUS_SUCCESS !=
19715 + cpaCyStartInstance(CPA_INSTANCE_HANDLE_SINGLE)) {
19716 + EPRINTK("Failed to initialize and start the instance "
19717 + "of the Cryptographic component.\n");
19721 + /* Set the default size of BufferList to allocate */
19722 + memset(&defBuffListInfo, 0, sizeof(struct icp_drvBuffListInfo));
19723 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
19724 + icp_ocfDrvBufferListMemInfo(ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS,
19725 + &defBuffListInfo)) {
19726 + EPRINTK("Failed to get bufferlist memory info.\n");
19730 + /*Register OCF Tolapai Driver with OCF */
19731 + memset(&icpDev, 0, sizeof(icpDev));
19732 + softc_device_init(&icpDev, "icp", 0, icp_methods);
19734 + icp_ocfDrvDriverId = crypto_get_driverid(softc_get_device(&icpDev),
19735 + CRYPTOCAP_F_HARDWARE);
19737 + if (icp_ocfDrvDriverId < 0) {
19738 + EPRINTK("%s : ICP driver failed to register with OCF!\n",
19743 + /*Create all the slab caches used by the OCF Tolapai Driver */
19744 + drvSessionData_zone =
19745 + ICP_CACHE_CREATE("ICP Session Data", struct icp_drvSessionData);
19746 + ICP_CACHE_NULL_CHECK(drvSessionData_zone);
19749 + * Allocation of the OpData includes the allocation space for meta data.
19750 + * The memory after the opData structure is reserved for this meta data.
19753 + kmem_cache_create("ICP Op Data", sizeof(struct icp_drvOpData) +
19754 + defBuffListInfo.metaSize ,0, SLAB_HWCACHE_ALIGN, NULL, NULL);
19757 + ICP_CACHE_NULL_CHECK(drvOpData_zone);
19759 + drvDH_zone = ICP_CACHE_CREATE("ICP DH data", CpaCyDhPhase1KeyGenOpData);
19760 + ICP_CACHE_NULL_CHECK(drvDH_zone);
19762 + drvLnModExp_zone =
19763 + ICP_CACHE_CREATE("ICP ModExp data", CpaCyLnModExpOpData);
19764 + ICP_CACHE_NULL_CHECK(drvLnModExp_zone);
19766 + drvRSADecrypt_zone =
19767 + ICP_CACHE_CREATE("ICP RSA decrypt data", CpaCyRsaDecryptOpData);
19768 + ICP_CACHE_NULL_CHECK(drvRSADecrypt_zone);
19770 + drvRSAPrivateKey_zone =
19771 + ICP_CACHE_CREATE("ICP RSA private key data", CpaCyRsaPrivateKey);
19772 + ICP_CACHE_NULL_CHECK(drvRSAPrivateKey_zone);
19774 + drvDSARSSign_zone =
19775 + ICP_CACHE_CREATE("ICP DSA Sign", CpaCyDsaRSSignOpData);
19776 + ICP_CACHE_NULL_CHECK(drvDSARSSign_zone);
19778 + /*too awkward to use a macro here */
19779 + drvDSARSSignKValue_zone =
19780 + kmem_cache_create("ICP DSA Sign Rand Val",
19781 + DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES, 0,
19782 + SLAB_HWCACHE_ALIGN, NULL, NULL);
19783 + ICP_CACHE_NULL_CHECK(drvDSARSSignKValue_zone);
19785 + drvDSAVerify_zone =
19786 + ICP_CACHE_CREATE("ICP DSA Verify", CpaCyDsaVerifyOpData);
19787 + ICP_CACHE_NULL_CHECK(drvDSAVerify_zone);
19789 + drvFlatBuffer_zone =
19790 + ICP_CACHE_CREATE("ICP Flat Buffers", CpaFlatBuffer);
19791 + ICP_CACHE_NULL_CHECK(drvFlatBuffer_zone);
19793 + /* Register the ICP symmetric crypto support. */
19794 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_NULL_CBC);
19795 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_DES_CBC);
19796 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_3DES_CBC);
19797 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_AES_CBC);
19798 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_ARC4);
19799 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_MD5);
19800 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_MD5_HMAC);
19801 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA1);
19802 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA1_HMAC);
19803 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_256);
19804 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_256_HMAC);
19805 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_384);
19806 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_384_HMAC);
19807 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_512);
19808 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_512_HMAC);
19810 + /* Register the ICP asymmetric algorithm support */
19811 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DH_COMPUTE_KEY);
19812 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_MOD_EXP);
19813 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_MOD_EXP_CRT);
19814 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DSA_SIGN);
19815 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DSA_VERIFY);
19817 + /* Register the ICP random number generator support */
19818 + if (OCF_REGISTRATION_STATUS_SUCCESS ==
19819 + crypto_rregister(icp_ocfDrvDriverId, icp_ocfDrvReadRandom, NULL)) {
19823 + if (OCF_ZERO_FUNCTIONALITY_REGISTERED == ocfStatus) {
19824 + DPRINTK("%s: Failed to register any device capabilities\n",
19826 + icp_ocfDrvFreeCaches();
19827 + icp_ocfDrvDriverId = INVALID_DRIVER_ID;
19828 + return -ECANCELED;
19831 + DPRINTK("%s: Registered %d of %d device capabilities\n",
19832 + __FUNCTION__, ocfStatus, NUM_SUPPORTED_CAPABILITIES);
19834 +/*Session data linked list used during module exit*/
19835 + INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead);
19836 + INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead_FreeMemList);
19838 + icp_ocfDrvFreeLacSessionWorkQ =
19839 + create_singlethread_workqueue("ocfLacDeregWorkQueue");
19844 +/* Name : icp_ocfDrvExit
19846 + * Description : This function will deregister all the symmetric sessions
19847 + * registered with the LAC component. It will also deregister all symmetric
19848 + * and asymmetric functionality that can be accelerated by the hardware via OCF
19849 + * and random number generation if it is enabled.
19851 +static void icp_ocfDrvExit(void)
19853 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
19854 + struct icp_drvSessionData *sessionData = NULL;
19855 + struct icp_drvSessionData *tempSessionData = NULL;
19856 + int i, remaining_delay_time_in_jiffies = 0;
19857 + /* There is a possibility of a process or new session command being */
19858 + /* sent before this variable is incremented. The aim of this variable */
19859 + /* is to stop a loop of calls creating a deadlock situation which */
19860 + /* would prevent the driver from exiting. */
19862 + atomic_inc(&icp_ocfDrvIsExiting);
19864 + /*Existing sessions will be routed to another driver after these calls */
19865 + crypto_unregister_all(icp_ocfDrvDriverId);
19866 + crypto_runregister_all(icp_ocfDrvDriverId);
19868 + /*If any sessions are waiting to be deregistered, do that. This also
19869 + flushes the work queue */
19870 + destroy_workqueue(icp_ocfDrvFreeLacSessionWorkQ);
19872 + /*ENTER CRITICAL SECTION */
19873 + spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
19874 + list_for_each_entry_safe(tempSessionData, sessionData,
19875 + &icp_ocfDrvGlobalSymListHead, listNode) {
19876 + for (i = 0; i < num_dereg_retries; i++) {
19877 + /*No harm if bad input - LAC will handle error cases */
19878 + if (ICP_SESSION_RUNNING == tempSessionData->inUse) {
19880 + cpaCySymRemoveSession
19881 + (CPA_INSTANCE_HANDLE_SINGLE,
19882 + tempSessionData->sessHandle);
19883 + if (CPA_STATUS_SUCCESS == lacStatus) {
19884 + /* Succesfully deregistered */
19886 + } else if (CPA_STATUS_RETRY != lacStatus) {
19888 + (&lac_session_failed_dereg_count);
19892 + /*schedule_timout returns the time left for completion if
19893 + * this task is set to TASK_INTERRUPTIBLE */
19894 + remaining_delay_time_in_jiffies =
19895 + dereg_retry_delay_in_jiffies;
19896 + while (0 > remaining_delay_time_in_jiffies) {
19897 + remaining_delay_time_in_jiffies =
19899 + (remaining_delay_time_in_jiffies);
19903 + ("%s(): Retry %d to deregistrate the session\n",
19904 + __FUNCTION__, i);
19908 + /*remove from current list */
19909 + list_del(&(tempSessionData->listNode));
19910 + /*add to free mem linked list */
19911 + list_add(&(tempSessionData->listNode),
19912 + &icp_ocfDrvGlobalSymListHead_FreeMemList);
19916 + /*EXIT CRITICAL SECTION */
19917 + spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
19919 + /*set back to initial values */
19920 + sessionData = NULL;
19921 + /*still have a reference in our list! */
19922 + tempSessionData = NULL;
19924 + list_for_each_entry_safe(tempSessionData, sessionData,
19925 + &icp_ocfDrvGlobalSymListHead_FreeMemList,
19928 + list_del(&(tempSessionData->listNode));
19929 + /* Free allocated CpaCySymSessionCtx */
19930 + if (NULL != tempSessionData->sessHandle) {
19931 + kfree(tempSessionData->sessHandle);
19933 + memset(tempSessionData, 0, sizeof(struct icp_drvSessionData));
19934 + kmem_cache_free(drvSessionData_zone, tempSessionData);
19937 + if (0 != atomic_read(&lac_session_failed_dereg_count)) {
19938 + DPRINTK("%s(): %d LAC sessions were not deregistered "
19939 + "correctly. This is not a clean exit! \n",
19941 + atomic_read(&lac_session_failed_dereg_count));
19944 + icp_ocfDrvFreeCaches();
19945 + icp_ocfDrvDriverId = INVALID_DRIVER_ID;
19947 + /* Shutdown the Cryptographic component */
19948 + lacStatus = cpaCyStopInstance(CPA_INSTANCE_HANDLE_SINGLE);
19949 + if (CPA_STATUS_SUCCESS != lacStatus) {
19950 + DPRINTK("%s(): Failed to stop instance of the "
19951 + "Cryptographic component.(status == %d)\n",
19952 + __FUNCTION__, lacStatus);
19957 +/* Name : icp_ocfDrvFreeCaches
19959 + * Description : This function deregisters all slab caches
19961 +static void icp_ocfDrvFreeCaches(void)
19963 + if (atomic_read(&icp_ocfDrvIsExiting) != CPA_TRUE) {
19964 + atomic_set(&icp_ocfDrvIsExiting, 1);
19968 + ICP_CACHE_DESTROY(drvSessionData_zone);
19969 + ICP_CACHE_DESTROY(drvOpData_zone);
19972 + ICP_CACHE_DESTROY(drvDH_zone);
19973 + ICP_CACHE_DESTROY(drvLnModExp_zone);
19974 + ICP_CACHE_DESTROY(drvRSADecrypt_zone);
19975 + ICP_CACHE_DESTROY(drvRSAPrivateKey_zone);
19976 + ICP_CACHE_DESTROY(drvDSARSSignKValue_zone);
19977 + ICP_CACHE_DESTROY(drvDSARSSign_zone);
19978 + ICP_CACHE_DESTROY(drvDSAVerify_zone);
19980 + /*FlatBuffer and BufferList Zones */
19981 + ICP_CACHE_DESTROY(drvFlatBuffer_zone);
19985 +/* Name : icp_ocfDrvDeregRetry
19987 + * Description : This function will try to farm the session deregistration
19988 + * off to a work queue. If it fails, nothing more can be done and it
19989 + * returns an error
19992 +int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister)
19994 + struct icp_ocfDrvFreeLacSession *workstore = NULL;
19996 + DPRINTK("%s(): Retry - Deregistering session (%p)\n",
19997 + __FUNCTION__, sessionToDeregister);
19999 + /*make sure the session is not available to be allocated during this
20001 + atomic_inc(&lac_session_failed_dereg_count);
20003 + /*Farm off to work queue */
20005 + kmalloc(sizeof(struct icp_ocfDrvFreeLacSession), GFP_ATOMIC);
20006 + if (NULL == workstore) {
20007 + DPRINTK("%s(): unable to free session - no memory available "
20008 + "for work queue\n", __FUNCTION__);
20012 + workstore->sessionToDeregister = sessionToDeregister;
20014 + INIT_WORK(&(workstore->work), icp_ocfDrvDeferedFreeLacSessionProcess,
20016 + queue_work(icp_ocfDrvFreeLacSessionWorkQ, &(workstore->work));
20018 + return ICP_OCF_DRV_STATUS_SUCCESS;
20022 +/* Name : icp_ocfDrvDeferedFreeLacSessionProcess
20024 + * Description : This function will retry (module input parameter)
20025 + * 'num_dereg_retries' times to deregister any symmetric session that recieves a
20026 + * CPA_STATUS_RETRY message from the LAC component. This function is run in
20027 + * Thread context because it is called from a worker thread
20029 +static void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg)
20031 + struct icp_ocfDrvFreeLacSession *workstore = NULL;
20032 + CpaCySymSessionCtx sessionToDeregister = NULL;
20034 + int remaining_delay_time_in_jiffies = 0;
20035 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
20037 + workstore = (struct icp_ocfDrvFreeLacSession *)arg;
20038 + if (NULL == workstore) {
20039 + DPRINTK("%s() function called with null parameter \n",
20044 + sessionToDeregister = workstore->sessionToDeregister;
20045 + kfree(workstore);
20047 + /*if exiting, give deregistration one more blast only */
20048 + if (atomic_read(&icp_ocfDrvIsExiting) == CPA_TRUE) {
20049 + lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
20050 + sessionToDeregister);
20052 + if (lacStatus != CPA_STATUS_SUCCESS) {
20053 + DPRINTK("%s() Failed to Dereg LAC session %p "
20054 + "during module exit\n", __FUNCTION__,
20055 + sessionToDeregister);
20059 + atomic_dec(&lac_session_failed_dereg_count);
20063 + for (i = 0; i <= num_dereg_retries; i++) {
20064 + lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
20065 + sessionToDeregister);
20067 + if (lacStatus == CPA_STATUS_SUCCESS) {
20068 + atomic_dec(&lac_session_failed_dereg_count);
20071 + if (lacStatus != CPA_STATUS_RETRY) {
20072 + DPRINTK("%s() Failed to deregister session - lacStatus "
20073 + " = %d", __FUNCTION__, lacStatus);
20077 + /*schedule_timout returns the time left for completion if this
20078 + task is set to TASK_INTERRUPTIBLE */
20079 + remaining_delay_time_in_jiffies = dereg_retry_delay_in_jiffies;
20080 + while (0 > remaining_delay_time_in_jiffies) {
20081 + remaining_delay_time_in_jiffies =
20082 + schedule_timeout(remaining_delay_time_in_jiffies);
20087 + DPRINTK("%s(): Unable to deregister session\n", __FUNCTION__);
20088 + DPRINTK("%s(): Number of unavailable LAC sessions = %d\n", __FUNCTION__,
20089 + atomic_read(&lac_session_failed_dereg_count));
20092 +/* Name : icp_ocfDrvPtrAndLenToFlatBuffer
20094 + * Description : This function converts a "pointer and length" buffer
20095 + * structure to Fredericksburg Flat Buffer (CpaFlatBuffer) format.
20097 + * This function assumes that the data passed in are valid.
20100 +icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
20101 + CpaFlatBuffer * pFlatBuffer)
20103 + pFlatBuffer->pData = pData;
20104 + pFlatBuffer->dataLenInBytes = len;
20107 +/* Name : icp_ocfDrvSingleSkBuffToFlatBuffer
20109 + * Description : This function converts a single socket buffer (sk_buff)
20110 + * structure to a Fredericksburg Flat Buffer (CpaFlatBuffer) format.
20112 + * This function assumes that the data passed in are valid.
20114 +static inline void
20115 +icp_ocfDrvSingleSkBuffToFlatBuffer(struct sk_buff *pSkb,
20116 + CpaFlatBuffer * pFlatBuffer)
20118 + pFlatBuffer->pData = pSkb->data;
20119 + pFlatBuffer->dataLenInBytes = skb_headlen(pSkb);
20122 +/* Name : icp_ocfDrvSkBuffToBufferList
20124 + * Description : This function converts a socket buffer (sk_buff) structure to
20125 + * Fredericksburg Scatter/Gather (CpaBufferList) buffer format.
20127 + * This function assumes that the bufferlist has been allocated with the correct
20128 + * number of buffer arrays.
20132 +icp_ocfDrvSkBuffToBufferList(struct sk_buff *pSkb, CpaBufferList * bufferList)
20134 + CpaFlatBuffer *curFlatBuffer = NULL;
20135 + char *skbuffPageAddr = NULL;
20136 + struct sk_buff *pCurFrag = NULL;
20137 + struct skb_shared_info *pShInfo = NULL;
20138 + uint32_t page_offset = 0, i = 0;
20140 + DPRINTK("%s(): Entry Point\n", __FUNCTION__);
20143 + * In all cases, the first skb needs to be translated to FlatBuffer.
20144 + * Perform a buffer translation for the first skbuff
20146 + curFlatBuffer = bufferList->pBuffers;
20147 + icp_ocfDrvSingleSkBuffToFlatBuffer(pSkb, curFlatBuffer);
20149 + /* Set the userData to point to the original sk_buff */
20150 + bufferList->pUserData = (void *)pSkb;
20152 + /* We now know we'll have at least one element in the SGL */
20153 + bufferList->numBuffers = 1;
20155 + if (0 == skb_is_nonlinear(pSkb)) {
20156 + /* Is a linear buffer - therefore it's a single skbuff */
20157 + DPRINTK("%s(): Exit Point\n", __FUNCTION__);
20158 + return ICP_OCF_DRV_STATUS_SUCCESS;
20162 + pShInfo = skb_shinfo(pSkb);
20163 + if (pShInfo->frag_list != NULL && pShInfo->nr_frags != 0) {
20165 + "Translation for a combination of frag_list "
20166 + "and frags[] array not supported!\n", __FUNCTION__);
20167 + return ICP_OCF_DRV_STATUS_FAIL;
20168 + } else if (pShInfo->frag_list != NULL) {
20170 + * Non linear skbuff supported through frag_list
20171 + * Perform translation for each fragment (sk_buff)
20172 + * in the frag_list of the first sk_buff.
20174 + for (pCurFrag = pShInfo->frag_list;
20175 + pCurFrag != NULL; pCurFrag = pCurFrag->next) {
20176 + icp_ocfDrvSingleSkBuffToFlatBuffer(pCurFrag,
20179 + bufferList->numBuffers++;
20181 + } else if (pShInfo->nr_frags != 0) {
20183 + * Perform translation for each fragment in frags array
20184 + * and add to the BufferList
20186 + for (i = 0; i < pShInfo->nr_frags; i++) {
20187 + /* Get the page address and offset of this frag */
20188 + skbuffPageAddr = (char *)pShInfo->frags[i].page;
20189 + page_offset = pShInfo->frags[i].page_offset;
20191 + /* Convert a pointer and length to a flat buffer */
20192 + icp_ocfDrvPtrAndLenToFlatBuffer(skbuffPageAddr +
20194 + pShInfo->frags[i].size,
20197 + bufferList->numBuffers++;
20200 + EPRINTK("%s():" "Could not recognize skbuff fragments!\n",
20202 + return ICP_OCF_DRV_STATUS_FAIL;
20205 + DPRINTK("%s(): Exit Point\n", __FUNCTION__);
20206 + return ICP_OCF_DRV_STATUS_SUCCESS;
20209 +/* Name : icp_ocfDrvBufferListToSkBuff
20211 + * Description : This function converts a Fredericksburg Scatter/Gather
20212 + * (CpaBufferList) buffer format to socket buffer structure.
20215 +icp_ocfDrvBufferListToSkBuff(CpaBufferList * bufferList, struct sk_buff **skb)
20217 + DPRINTK("%s(): Entry Point\n", __FUNCTION__);
20219 + /* Retrieve the orignal skbuff */
20220 + *skb = (struct sk_buff *)bufferList->pUserData;
20221 + if (NULL == *skb) {
20223 + "Error on converting from a BufferList. "
20224 + "The BufferList does not contain an sk_buff.\n",
20226 + return ICP_OCF_DRV_STATUS_FAIL;
20228 + DPRINTK("%s(): Exit Point\n", __FUNCTION__);
20229 + return ICP_OCF_DRV_STATUS_SUCCESS;
20232 +/* Name : icp_ocfDrvPtrAndLenToBufferList
20234 + * Description : This function converts a "pointer and length" buffer
20235 + * structure to Fredericksburg Scatter/Gather Buffer (CpaBufferList) format.
20237 + * This function assumes that the data passed in are valid.
20240 +icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
20241 + CpaBufferList * pBufferList)
20243 + pBufferList->numBuffers = 1;
20244 + pBufferList->pBuffers->pData = pDataIn;
20245 + pBufferList->pBuffers->dataLenInBytes = length;
20248 +/* Name : icp_ocfDrvBufferListToPtrAndLen
20250 + * Description : This function converts Fredericksburg Scatter/Gather Buffer
20251 + * (CpaBufferList) format to a "pointer and length" buffer structure.
20253 + * This function assumes that the data passed in are valid.
20256 +icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
20257 + void **ppDataOut, uint32_t * pLength)
20259 + *ppDataOut = pBufferList->pBuffers->pData;
20260 + *pLength = pBufferList->pBuffers->dataLenInBytes;
20263 +/* Name : icp_ocfDrvBufferListMemInfo
20265 + * Description : This function will set the number of flat buffers in
20266 + * bufferlist, the size of memory to allocate for the pPrivateMetaData
20267 + * member of the CpaBufferList.
20270 +icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
20271 + struct icp_drvBuffListInfo *buffListInfo)
20273 + buffListInfo->numBuffers = numBuffers;
20275 + if (CPA_STATUS_SUCCESS !=
20276 + cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
20277 + buffListInfo->numBuffers,
20278 + &(buffListInfo->metaSize))) {
20279 + EPRINTK("%s() Failed to get buffer list meta size.\n",
20281 + return ICP_OCF_DRV_STATUS_FAIL;
20284 + return ICP_OCF_DRV_STATUS_SUCCESS;
20287 +/* Name : icp_ocfDrvGetSkBuffFrags
20289 + * Description : This function will determine the number of
20290 + * fragments in a socket buffer(sk_buff).
20292 +inline uint16_t icp_ocfDrvGetSkBuffFrags(struct sk_buff * pSkb)
20294 + uint16_t numFrags = 0;
20295 + struct sk_buff *pCurFrag = NULL;
20296 + struct skb_shared_info *pShInfo = NULL;
20298 + if (NULL == pSkb)
20302 + if (0 == skb_is_nonlinear(pSkb)) {
20303 + /* Linear buffer - it's a single skbuff */
20307 + pShInfo = skb_shinfo(pSkb);
20308 + if (NULL != pShInfo->frag_list && 0 != pShInfo->nr_frags) {
20309 + EPRINTK("%s(): Combination of frag_list "
20310 + "and frags[] array not supported!\n", __FUNCTION__);
20312 + } else if (0 != pShInfo->nr_frags) {
20313 + numFrags += pShInfo->nr_frags;
20315 + } else if (NULL != pShInfo->frag_list) {
20316 + for (pCurFrag = pShInfo->frag_list;
20317 + pCurFrag != NULL; pCurFrag = pCurFrag->next) {
20326 +/* Name : icp_ocfDrvFreeFlatBuffer
20328 + * Description : This function will deallocate flat buffer.
20330 +inline void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer)
20332 + if (pFlatBuffer != NULL) {
20333 + memset(pFlatBuffer, 0, sizeof(CpaFlatBuffer));
20334 + kmem_cache_free(drvFlatBuffer_zone, pFlatBuffer);
20338 +/* Name : icp_ocfDrvAllocMetaData
20340 + * Description : This function will allocate memory for the
20341 + * pPrivateMetaData member of CpaBufferList.
20344 +icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
20345 + const struct icp_drvOpData *pOpData)
20347 + Cpa32U metaSize = 0;
20349 + if (pBufferList->numBuffers <= ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS){
20350 + void *pOpDataStartAddr = (void *)pOpData;
20352 + if (0 == defBuffListInfo.metaSize) {
20353 + pBufferList->pPrivateMetaData = NULL;
20354 + return ICP_OCF_DRV_STATUS_SUCCESS;
20357 + * The meta data allocation has been included as part of the
20358 + * op data. It has been pre-allocated in memory just after the
20359 + * icp_drvOpData structure.
20361 + pBufferList->pPrivateMetaData = pOpDataStartAddr +
20362 + sizeof(struct icp_drvOpData);
20364 + if (CPA_STATUS_SUCCESS !=
20365 + cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
20366 + pBufferList->numBuffers,
20368 + EPRINTK("%s() Failed to get buffer list meta size.\n",
20370 + return ICP_OCF_DRV_STATUS_FAIL;
20373 + if (0 == metaSize) {
20374 + pBufferList->pPrivateMetaData = NULL;
20375 + return ICP_OCF_DRV_STATUS_SUCCESS;
20378 + pBufferList->pPrivateMetaData = kmalloc(metaSize, GFP_ATOMIC);
20380 + if (NULL == pBufferList->pPrivateMetaData) {
20381 + EPRINTK("%s() Failed to allocate pPrivateMetaData.\n",
20383 + return ICP_OCF_DRV_STATUS_FAIL;
20386 + return ICP_OCF_DRV_STATUS_SUCCESS;
20389 +/* Name : icp_ocfDrvFreeMetaData
20391 + * Description : This function will deallocate pPrivateMetaData memory.
20393 +inline void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList)
20395 + if (NULL == pBufferList->pPrivateMetaData) {
20400 + * Only free the meta data if the BufferList has more than
20401 + * ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS number of buffers.
20402 + * Otherwise, the meta data shall be freed when the icp_drvOpData is
20405 + if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < pBufferList->numBuffers){
20406 + kfree(pBufferList->pPrivateMetaData);
20410 +module_init(icp_ocfDrvInit);
20411 +module_exit(icp_ocfDrvExit);
20412 +MODULE_LICENSE("Dual BSD/GPL");
20413 +MODULE_AUTHOR("Intel");
20414 +MODULE_DESCRIPTION("OCF Driver for Intel Quick Assist crypto acceleration");
20416 +++ b/crypto/ocf/ep80579/icp_ocf.h
20418 +/***************************************************************************
20420 + * This file is provided under a dual BSD/GPLv2 license. When using or
20421 + * redistributing this file, you may do so under either license.
20423 + * GPL LICENSE SUMMARY
20425 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
20427 + * This program is free software; you can redistribute it and/or modify
20428 + * it under the terms of version 2 of the GNU General Public License as
20429 + * published by the Free Software Foundation.
20431 + * This program is distributed in the hope that it will be useful, but
20432 + * WITHOUT ANY WARRANTY; without even the implied warranty of
20433 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20434 + * General Public License for more details.
20436 + * You should have received a copy of the GNU General Public License
20437 + * along with this program; if not, write to the Free Software
20438 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20439 + * The full GNU General Public License is included in this distribution
20440 + * in the file called LICENSE.GPL.
20442 + * Contact Information:
20443 + * Intel Corporation
20447 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
20448 + * All rights reserved.
20450 + * Redistribution and use in source and binary forms, with or without
20451 + * modification, are permitted provided that the following conditions
20454 + * * Redistributions of source code must retain the above copyright
20455 + * notice, this list of conditions and the following disclaimer.
20456 + * * Redistributions in binary form must reproduce the above copyright
20457 + * notice, this list of conditions and the following disclaimer in
20458 + * the documentation and/or other materials provided with the
20460 + * * Neither the name of Intel Corporation nor the names of its
20461 + * contributors may be used to endorse or promote products derived
20462 + * from this software without specific prior written permission.
20464 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20465 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20466 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20467 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20468 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20469 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20470 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20471 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20472 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20473 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
20474 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20477 + * version: Security.L.1.0.130
20479 + ***************************************************************************/
20482 + * OCF drv driver header file for the Intel ICP processor.
20488 +#include <linux/crypto.h>
20489 +#include <linux/delay.h>
20490 +#include <linux/skbuff.h>
20492 +#include "cryptodev.h"
20496 +#include "cpa_cy_im.h"
20497 +#include "cpa_cy_sym.h"
20498 +#include "cpa_cy_rand.h"
20499 +#include "cpa_cy_dh.h"
20500 +#include "cpa_cy_rsa.h"
20501 +#include "cpa_cy_ln.h"
20502 +#include "cpa_cy_common.h"
20503 +#include "cpa_cy_dsa.h"
20505 +#define NUM_BITS_IN_BYTE (8)
20506 +#define NUM_BITS_IN_BYTE_MINUS_ONE (NUM_BITS_IN_BYTE -1)
20507 +#define INVALID_DRIVER_ID (-1)
20508 +#define RETURN_RAND_NUM_GEN_FAILED (-1)
20510 +/*This is define means only one operation can be chained to another
20511 +(resulting in one chain of two operations)*/
20512 +#define MAX_NUM_OF_CHAINED_OPS (1)
20513 +/*This is the max block cipher initialisation vector*/
20514 +#define MAX_IV_LEN_IN_BYTES (20)
20515 +/*This is used to check whether the OCF to this driver session limit has
20517 +#define NO_OCF_TO_DRV_MAX_SESSIONS (0)
20519 +/*OCF values mapped here*/
20520 +#define ICP_SHA1_DIGEST_SIZE_IN_BYTES (SHA1_HASH_LEN)
20521 +#define ICP_SHA256_DIGEST_SIZE_IN_BYTES (SHA2_256_HASH_LEN)
20522 +#define ICP_SHA384_DIGEST_SIZE_IN_BYTES (SHA2_384_HASH_LEN)
20523 +#define ICP_SHA512_DIGEST_SIZE_IN_BYTES (SHA2_512_HASH_LEN)
20524 +#define ICP_MD5_DIGEST_SIZE_IN_BYTES (MD5_HASH_LEN)
20525 +#define ARC4_COUNTER_LEN (ARC4_BLOCK_LEN)
20527 +#define OCF_REGISTRATION_STATUS_SUCCESS (0)
20528 +#define OCF_ZERO_FUNCTIONALITY_REGISTERED (0)
20529 +#define ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR (0)
20530 +#define ICP_OCF_DRV_STATUS_SUCCESS (0)
20531 +#define ICP_OCF_DRV_STATUS_FAIL (1)
20533 +/*Turn on/off debug options*/
20534 +#define ICP_OCF_PRINT_DEBUG_MESSAGES (0)
20535 +#define ICP_OCF_PRINT_KERN_ALERT (1)
20536 +#define ICP_OCF_PRINT_KERN_ERRS (1)
20538 +/*DSA Prime Q size in bytes (as defined in the standard) */
20539 +#define DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES (20)
20541 +/*MACRO DEFINITIONS*/
20543 +#define BITS_TO_BYTES(bytes, bits) \
20544 + bytes = (bits + NUM_BITS_IN_BYTE_MINUS_ONE) / NUM_BITS_IN_BYTE
20546 +#define ICP_CACHE_CREATE(cache_ID, cache_name) \
20547 + kmem_cache_create(cache_ID, sizeof(cache_name),0, \
20548 + SLAB_HWCACHE_ALIGN, NULL, NULL);
20550 +#define ICP_CACHE_NULL_CHECK(slab_zone) \
20552 + if(NULL == slab_zone){ \
20553 + icp_ocfDrvFreeCaches(); \
20554 + EPRINTK("%s() line %d: Not enough memory!\n", \
20555 + __FUNCTION__, __LINE__); \
20560 +#define ICP_CACHE_DESTROY(slab_zone) \
20562 + if(NULL != slab_zone){ \
20563 + kmem_cache_destroy(slab_zone); \
20564 + slab_zone = NULL; \
20568 +#define ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(alg) \
20570 + if(OCF_REGISTRATION_STATUS_SUCCESS == \
20571 + crypto_register(icp_ocfDrvDriverId, \
20579 +#define ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(alg) \
20581 + if(OCF_REGISTRATION_STATUS_SUCCESS == \
20582 + crypto_kregister(icp_ocfDrvDriverId, \
20589 +#if ICP_OCF_PRINT_DEBUG_MESSAGES == 1
20590 +#define DPRINTK(args...) \
20595 +#else //ICP_OCF_PRINT_DEBUG_MESSAGES == 1
20597 +#define DPRINTK(args...)
20599 +#endif //ICP_OCF_PRINT_DEBUG_MESSAGES == 1
20601 +#if ICP_OCF_PRINT_KERN_ALERT == 1
20602 +#define APRINTK(args...) \
20604 + printk(KERN_ALERT args); \
20607 +#else //ICP_OCF_PRINT_KERN_ALERT == 1
20609 +#define APRINTK(args...)
20611 +#endif //ICP_OCF_PRINT_KERN_ALERT == 1
20613 +#if ICP_OCF_PRINT_KERN_ERRS == 1
20614 +#define EPRINTK(args...) \
20616 + printk(KERN_ERR args); \
20619 +#else //ICP_OCF_PRINT_KERN_ERRS == 1
20621 +#define EPRINTK(args...)
20623 +#endif //ICP_OCF_PRINT_KERN_ERRS == 1
20625 +#define IPRINTK(args...) \
20627 + printk(KERN_INFO args); \
20630 +/*END OF MACRO DEFINITIONS*/
20633 + ICP_OCF_DRV_ALG_CIPHER = 0,
20634 + ICP_OCF_DRV_ALG_HASH
20635 +} icp_ocf_drv_alg_type_t;
20637 +/* These are all defined in icp_common.c */
20638 +extern atomic_t lac_session_failed_dereg_count;
20639 +extern atomic_t icp_ocfDrvIsExiting;
20640 +extern atomic_t num_ocf_to_drv_registered_sessions;
20642 +/*These are use inputs used in icp_sym.c and icp_common.c
20643 + They are instantiated in icp_common.c*/
20644 +extern int max_sessions;
20646 +extern int32_t icp_ocfDrvDriverId;
20647 +extern struct list_head icp_ocfDrvGlobalSymListHead;
20648 +extern struct list_head icp_ocfDrvGlobalSymListHead_FreeMemList;
20649 +extern struct workqueue_struct *icp_ocfDrvFreeLacSessionWorkQ;
20650 +extern spinlock_t icp_ocfDrvSymSessInfoListSpinlock;
20651 +extern rwlock_t icp_kmem_cache_destroy_alloc_lock;
20653 +/*Slab zones for symettric functionality, instantiated in icp_common.c*/
20654 +extern struct kmem_cache *drvSessionData_zone;
20655 +extern struct kmem_cache *drvOpData_zone;
20657 +/*Slabs zones for asymettric functionality, instantiated in icp_common.c*/
20658 +extern struct kmem_cache *drvDH_zone;
20659 +extern struct kmem_cache *drvLnModExp_zone;
20660 +extern struct kmem_cache *drvRSADecrypt_zone;
20661 +extern struct kmem_cache *drvRSAPrivateKey_zone;
20662 +extern struct kmem_cache *drvDSARSSign_zone;
20663 +extern struct kmem_cache *drvDSARSSignKValue_zone;
20664 +extern struct kmem_cache *drvDSAVerify_zone;
20666 +/*Slab zones for flatbuffers and bufferlist*/
20667 +extern struct kmem_cache *drvFlatBuffer_zone;
20669 +#define ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS (16)
20671 +struct icp_drvBuffListInfo {
20672 + Cpa16U numBuffers;
20674 + Cpa32U metaOffset;
20675 + Cpa32U buffListSize;
20677 +extern struct icp_drvBuffListInfo defBuffListInfo;
20680 +* This struct is used to keep a reference to the relevant node in the list
20681 +* of sessionData structs, to the buffer type required by OCF and to the OCF
20682 +* provided crp struct that needs to be returned. All this info is needed in
20683 +* the callback function.
20685 +* IV can sometimes be stored in non-contiguous memory (e.g. skbuff
20686 +* linked/frag list, therefore a contiguous memory space for the IV data must be
20687 +* created and passed to LAC
20690 +struct icp_drvOpData {
20691 + CpaCySymOpData lacOpData;
20692 + uint32_t digestSizeInBytes;
20693 + struct cryptop *crp;
20694 + uint8_t bufferType;
20695 + uint8_t ivData[MAX_IV_LEN_IN_BYTES];
20696 + uint16_t numBufferListArray;
20697 + CpaBufferList srcBuffer;
20698 + CpaFlatBuffer bufferListArray[ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS];
20699 + CpaBoolean verifyResult;
20701 +/*Values used to derisk chances of performs being called against
20702 +deregistered sessions (for which the slab page has been reclaimed)
20703 +This is not a fix - since page frames are reclaimed from a slab, one cannot
20704 +rely on that memory not being re-used by another app.*/
20706 + ICP_SESSION_INITIALISED = 0x5C5C5C,
20707 + ICP_SESSION_RUNNING = 0x005C00,
20708 + ICP_SESSION_DEREGISTERED = 0xC5C5C5
20712 +This is the OCF<->OCF_DRV session object:
20714 +1.The first member is a listNode. These session objects are added to a linked
20715 + list in order to make it easier to remove them all at session exit time.
20716 +2.The second member is used to give the session object state and derisk the
20717 + possibility of OCF batch calls executing against a deregistered session (as
20718 + described above).
20719 +3.The third member is a LAC<->OCF_DRV session handle (initialised with the first
20720 + perform request for that session).
20721 +4.The fourth is the LAC session context. All the parameters for this structure
20722 + are only known when the first perform request for this session occurs. That is
20723 + why the OCF Tolapai Driver only registers a new LAC session at perform time
20725 +struct icp_drvSessionData {
20726 + struct list_head listNode;
20727 + usage_derisk inUse;
20728 + CpaCySymSessionCtx sessHandle;
20729 + CpaCySymSessionSetupData lacSessCtx;
20732 +/* This struct is required for deferred session
20733 + deregistration as a work queue function can
20734 + only have one argument*/
20735 +struct icp_ocfDrvFreeLacSession {
20736 + CpaCySymSessionCtx sessionToDeregister;
20737 + struct work_struct work;
20740 +int icp_ocfDrvNewSession(device_t dev, uint32_t * sild, struct cryptoini *cri);
20742 +int icp_ocfDrvFreeLACSession(device_t dev, uint64_t sid);
20744 +int icp_ocfDrvSymProcess(device_t dev, struct cryptop *crp, int hint);
20746 +int icp_ocfDrvPkeProcess(device_t dev, struct cryptkop *krp, int hint);
20748 +int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords);
20750 +int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister);
20752 +int icp_ocfDrvSkBuffToBufferList(struct sk_buff *skb,
20753 + CpaBufferList * bufferList);
20755 +int icp_ocfDrvBufferListToSkBuff(CpaBufferList * bufferList,
20756 + struct sk_buff **skb);
20758 +void icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
20759 + CpaFlatBuffer * pFlatBuffer);
20761 +void icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
20762 + CpaBufferList * pBufferList);
20764 +void icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
20765 + void **ppDataOut, uint32_t * pLength);
20767 +int icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
20768 + struct icp_drvBuffListInfo *buffListInfo);
20770 +uint16_t icp_ocfDrvGetSkBuffFrags(struct sk_buff *pSkb);
20772 +void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer);
20774 +int icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
20775 + const struct icp_drvOpData *pOpData);
20777 +void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList);
20782 +++ b/crypto/ocf/ep80579/icp_sym.c
20784 +/***************************************************************************
20786 + * This file is provided under a dual BSD/GPLv2 license. When using or
20787 + * redistributing this file, you may do so under either license.
20789 + * GPL LICENSE SUMMARY
20791 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
20793 + * This program is free software; you can redistribute it and/or modify
20794 + * it under the terms of version 2 of the GNU General Public License as
20795 + * published by the Free Software Foundation.
20797 + * This program is distributed in the hope that it will be useful, but
20798 + * WITHOUT ANY WARRANTY; without even the implied warranty of
20799 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20800 + * General Public License for more details.
20802 + * You should have received a copy of the GNU General Public License
20803 + * along with this program; if not, write to the Free Software
20804 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20805 + * The full GNU General Public License is included in this distribution
20806 + * in the file called LICENSE.GPL.
20808 + * Contact Information:
20809 + * Intel Corporation
20813 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
20814 + * All rights reserved.
20816 + * Redistribution and use in source and binary forms, with or without
20817 + * modification, are permitted provided that the following conditions
20820 + * * Redistributions of source code must retain the above copyright
20821 + * notice, this list of conditions and the following disclaimer.
20822 + * * Redistributions in binary form must reproduce the above copyright
20823 + * notice, this list of conditions and the following disclaimer in
20824 + * the documentation and/or other materials provided with the
20826 + * * Neither the name of Intel Corporation nor the names of its
20827 + * contributors may be used to endorse or promote products derived
20828 + * from this software without specific prior written permission.
20830 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20831 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20832 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20833 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20834 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20835 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20836 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20837 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20838 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20839 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
20840 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20843 + * version: Security.L.1.0.130
20845 + ***************************************************************************/
20847 + * An OCF module that uses the API for Intel® QuickAssist Technology to do the
20850 + * This driver requires the ICP Access Library that is available from Intel in
20851 + * order to operate.
20854 +#include "icp_ocf.h"
20856 +/*This is the call back function for all symmetric cryptographic processes.
20857 + Its main functionality is to free driver crypto operation structure and to
20858 + call back to OCF*/
20860 +icp_ocfDrvSymCallBack(void *callbackTag,
20861 + CpaStatus status,
20862 + const CpaCySymOp operationType,
20864 + CpaBufferList * pDstBuffer, CpaBoolean verifyResult);
20866 +/*This function is used to extract crypto processing information from the OCF
20867 + inputs, so as that it may be passed onto LAC*/
20869 +icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
20870 + struct cryptodesc *crp_desc);
20872 +/*This function checks whether the crp_desc argument pertains to a digest or a
20873 + cipher operation*/
20874 +static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc);
20876 +/*This function copies all the passed in session context information and stores
20877 + it in a LAC context structure*/
20879 +icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
20880 + CpaCySymSessionSetupData * lacSessCtx);
20882 +/*This top level function is used to find a pointer to where a digest is
20883 + stored/needs to be inserted. */
20884 +static uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
20885 + struct cryptodesc *crp_desc);
20887 +/*This function is called when a digest pointer has to be found within a
20889 +static inline uint8_t *icp_ocfDrvSkbuffDigestPointerFind(struct icp_drvOpData
20891 + int offsetInBytes,
20893 + digestSizeInBytes);
20895 +/*The following two functions are called if the SKBUFF digest pointer is not
20896 + positioned in the linear portion of the buffer (i.e. it is in a linked SKBUFF
20897 + or page fragment).*/
20898 +/*This function takes care of the page fragment case.*/
20899 +static inline uint8_t *icp_ocfDrvDigestSkbNRFragsCheck(struct sk_buff *skb,
20900 + struct skb_shared_info
20902 + int offsetInBytes,
20904 + digestSizeInBytes);
20906 +/*This function takes care of the linked list case.*/
20907 +static inline uint8_t *icp_ocfDrvDigestSkbFragListCheck(struct sk_buff *skb,
20908 + struct skb_shared_info
20910 + int offsetInBytes,
20912 + digestSizeInBytes);
20914 +/*This function is used to free an OCF->OCF_DRV session object*/
20915 +static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData);
20917 +/*max IOV buffs supported in a UIO structure*/
20918 +#define NUM_IOV_SUPPORTED (1)
20920 +/* Name : icp_ocfDrvSymCallBack
20922 + * Description : When this function returns it signifies that the LAC
20923 + * component has completed the relevant symmetric operation.
20925 + * Notes : The callbackTag is a pointer to an icp_drvOpData. This memory
20926 + * object was passed to LAC for the cryptographic processing and contains all
20927 + * the relevant information for cleaning up buffer handles etc. so that the
20928 + * OCF Tolapai Driver portion of this crypto operation can be fully completed.
20931 +icp_ocfDrvSymCallBack(void *callbackTag,
20932 + CpaStatus status,
20933 + const CpaCySymOp operationType,
20935 + CpaBufferList * pDstBuffer, CpaBoolean verifyResult)
20937 + struct cryptop *crp = NULL;
20938 + struct icp_drvOpData *temp_drvOpData =
20939 + (struct icp_drvOpData *)callbackTag;
20940 + uint64_t *tempBasePtr = NULL;
20941 + uint32_t tempLen = 0;
20943 + if (NULL == temp_drvOpData) {
20944 + DPRINTK("%s(): The callback from the LAC component"
20945 + " has failed due to Null userOpaque data"
20946 + "(status == %d).\n", __FUNCTION__, status);
20947 + DPRINTK("%s(): Unable to call OCF back! \n", __FUNCTION__);
20951 + crp = temp_drvOpData->crp;
20952 + crp->crp_etype = ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR;
20954 + if (NULL == pOpData) {
20955 + DPRINTK("%s(): The callback from the LAC component"
20956 + " has failed due to Null Symmetric Op data"
20957 + "(status == %d).\n", __FUNCTION__, status);
20958 + crp->crp_etype = ECANCELED;
20959 + crypto_done(crp);
20963 + if (NULL == pDstBuffer) {
20964 + DPRINTK("%s(): The callback from the LAC component"
20965 + " has failed due to Null Dst Bufferlist data"
20966 + "(status == %d).\n", __FUNCTION__, status);
20967 + crp->crp_etype = ECANCELED;
20968 + crypto_done(crp);
20972 + if (CPA_STATUS_SUCCESS == status) {
20974 + if (temp_drvOpData->bufferType == CRYPTO_F_SKBUF) {
20975 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
20976 + icp_ocfDrvBufferListToSkBuff(pDstBuffer,
20977 + (struct sk_buff **)
20978 + &(crp->crp_buf))) {
20979 + EPRINTK("%s(): BufferList to SkBuff "
20980 + "conversion error.\n", __FUNCTION__);
20981 + crp->crp_etype = EPERM;
20984 + icp_ocfDrvBufferListToPtrAndLen(pDstBuffer,
20985 + (void **)&tempBasePtr,
20987 + crp->crp_olen = (int)tempLen;
20991 + DPRINTK("%s(): The callback from the LAC component has failed"
20992 + "(status == %d).\n", __FUNCTION__, status);
20994 + crp->crp_etype = ECANCELED;
20997 + if (temp_drvOpData->numBufferListArray >
20998 + ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
20999 + kfree(pDstBuffer->pBuffers);
21001 + icp_ocfDrvFreeMetaData(pDstBuffer);
21002 + kmem_cache_free(drvOpData_zone, temp_drvOpData);
21004 + /* Invoke the OCF callback function */
21005 + crypto_done(crp);
21010 +/* Name : icp_ocfDrvNewSession
21012 + * Description : This function will create a new Driver<->OCF session
21014 + * Notes : LAC session registration happens during the first perform call.
21015 + * That is the first time we know all information about a given session.
21017 +int icp_ocfDrvNewSession(device_t dev, uint32_t * sid, struct cryptoini *cri)
21019 + struct icp_drvSessionData *sessionData = NULL;
21020 + uint32_t delete_session = 0;
21022 + /* The SID passed in should be our driver ID. We can return the */
21023 + /* local ID (LID) which is a unique identifier which we can use */
21024 + /* to differentiate between the encrypt/decrypt LAC session handles */
21025 + if (NULL == sid) {
21026 + EPRINTK("%s(): Invalid input parameters - NULL sid.\n",
21031 + if (NULL == cri) {
21032 + EPRINTK("%s(): Invalid input parameters - NULL cryptoini.\n",
21037 + if (icp_ocfDrvDriverId != *sid) {
21038 + EPRINTK("%s(): Invalid input parameters - bad driver ID\n",
21040 + EPRINTK("\t sid = 0x08%p \n \t cri = 0x08%p \n", sid, cri);
21044 + sessionData = kmem_cache_zalloc(drvSessionData_zone, GFP_ATOMIC);
21045 + if (NULL == sessionData) {
21046 + DPRINTK("%s():No memory for Session Data\n", __FUNCTION__);
21050 + /*ENTER CRITICAL SECTION */
21051 + spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21052 + /*put this check in the spinlock so no new sessions can be added to the
21053 + linked list when we are exiting */
21054 + if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
21055 + delete_session++;
21057 + } else if (NO_OCF_TO_DRV_MAX_SESSIONS != max_sessions) {
21058 + if (atomic_read(&num_ocf_to_drv_registered_sessions) >=
21060 + atomic_read(&lac_session_failed_dereg_count))) {
21061 + delete_session++;
21063 + atomic_inc(&num_ocf_to_drv_registered_sessions);
21064 + /* Add to session data linked list */
21065 + list_add(&(sessionData->listNode),
21066 + &icp_ocfDrvGlobalSymListHead);
21069 + } else if (NO_OCF_TO_DRV_MAX_SESSIONS == max_sessions) {
21070 + list_add(&(sessionData->listNode),
21071 + &icp_ocfDrvGlobalSymListHead);
21074 + sessionData->inUse = ICP_SESSION_INITIALISED;
21076 + /*EXIT CRITICAL SECTION */
21077 + spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21079 + if (delete_session) {
21080 + DPRINTK("%s():No Session handles available\n", __FUNCTION__);
21081 + kmem_cache_free(drvSessionData_zone, sessionData);
21085 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
21086 + icp_ocfDrvAlgorithmSetup(cri, &(sessionData->lacSessCtx))) {
21087 + DPRINTK("%s():algorithm not supported\n", __FUNCTION__);
21088 + icp_ocfDrvFreeOCFSession(sessionData);
21092 + if (cri->cri_next) {
21093 + if (cri->cri_next->cri_next != NULL) {
21094 + DPRINTK("%s():only two chained algorithms supported\n",
21096 + icp_ocfDrvFreeOCFSession(sessionData);
21100 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
21101 + icp_ocfDrvAlgorithmSetup(cri->cri_next,
21102 + &(sessionData->lacSessCtx))) {
21103 + DPRINTK("%s():second algorithm not supported\n",
21105 + icp_ocfDrvFreeOCFSession(sessionData);
21109 + sessionData->lacSessCtx.symOperation =
21110 + CPA_CY_SYM_OP_ALGORITHM_CHAINING;
21113 + *sid = (uint32_t) sessionData;
21115 + return ICP_OCF_DRV_STATUS_SUCCESS;
21118 +/* Name : icp_ocfDrvAlgorithmSetup
21120 + * Description : This function builds the session context data from the
21121 + * information supplied through OCF. Algorithm chain order and whether the
21122 + * session is Encrypt/Decrypt can only be found out at perform time however, so
21123 + * the session is registered with LAC at that time.
21126 +icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
21127 + CpaCySymSessionSetupData * lacSessCtx)
21130 + lacSessCtx->sessionPriority = CPA_CY_PRIORITY_NORMAL;
21132 + switch (cri->cri_alg) {
21134 + case CRYPTO_NULL_CBC:
21135 + DPRINTK("%s(): NULL CBC\n", __FUNCTION__);
21136 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21137 + lacSessCtx->cipherSetupData.cipherAlgorithm =
21138 + CPA_CY_SYM_CIPHER_NULL;
21139 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21140 + cri->cri_klen / NUM_BITS_IN_BYTE;
21141 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21144 + case CRYPTO_DES_CBC:
21145 + DPRINTK("%s(): DES CBC\n", __FUNCTION__);
21146 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21147 + lacSessCtx->cipherSetupData.cipherAlgorithm =
21148 + CPA_CY_SYM_CIPHER_DES_CBC;
21149 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21150 + cri->cri_klen / NUM_BITS_IN_BYTE;
21151 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21154 + case CRYPTO_3DES_CBC:
21155 + DPRINTK("%s(): 3DES CBC\n", __FUNCTION__);
21156 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21157 + lacSessCtx->cipherSetupData.cipherAlgorithm =
21158 + CPA_CY_SYM_CIPHER_3DES_CBC;
21159 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21160 + cri->cri_klen / NUM_BITS_IN_BYTE;
21161 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21164 + case CRYPTO_AES_CBC:
21165 + DPRINTK("%s(): AES CBC\n", __FUNCTION__);
21166 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21167 + lacSessCtx->cipherSetupData.cipherAlgorithm =
21168 + CPA_CY_SYM_CIPHER_AES_CBC;
21169 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21170 + cri->cri_klen / NUM_BITS_IN_BYTE;
21171 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21174 + case CRYPTO_ARC4:
21175 + DPRINTK("%s(): ARC4\n", __FUNCTION__);
21176 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21177 + lacSessCtx->cipherSetupData.cipherAlgorithm =
21178 + CPA_CY_SYM_CIPHER_ARC4;
21179 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21180 + cri->cri_klen / NUM_BITS_IN_BYTE;
21181 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21184 + case CRYPTO_SHA1:
21185 + DPRINTK("%s(): SHA1\n", __FUNCTION__);
21186 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21187 + lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
21188 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21189 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21191 + cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
21195 + case CRYPTO_SHA1_HMAC:
21196 + DPRINTK("%s(): SHA1_HMAC\n", __FUNCTION__);
21197 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21198 + lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
21199 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21200 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21202 + cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
21203 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
21205 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21206 + cri->cri_klen / NUM_BITS_IN_BYTE;
21207 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21211 + case CRYPTO_SHA2_256:
21212 + DPRINTK("%s(): SHA256\n", __FUNCTION__);
21213 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21214 + lacSessCtx->hashSetupData.hashAlgorithm =
21215 + CPA_CY_SYM_HASH_SHA256;
21216 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21217 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21219 + cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
21223 + case CRYPTO_SHA2_256_HMAC:
21224 + DPRINTK("%s(): SHA256_HMAC\n", __FUNCTION__);
21225 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21226 + lacSessCtx->hashSetupData.hashAlgorithm =
21227 + CPA_CY_SYM_HASH_SHA256;
21228 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21229 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21231 + cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
21232 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
21234 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21235 + cri->cri_klen / NUM_BITS_IN_BYTE;
21236 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21240 + case CRYPTO_SHA2_384:
21241 + DPRINTK("%s(): SHA384\n", __FUNCTION__);
21242 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21243 + lacSessCtx->hashSetupData.hashAlgorithm =
21244 + CPA_CY_SYM_HASH_SHA384;
21245 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21246 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21248 + cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
21252 + case CRYPTO_SHA2_384_HMAC:
21253 + DPRINTK("%s(): SHA384_HMAC\n", __FUNCTION__);
21254 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21255 + lacSessCtx->hashSetupData.hashAlgorithm =
21256 + CPA_CY_SYM_HASH_SHA384;
21257 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21258 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21260 + cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
21261 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
21263 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21264 + cri->cri_klen / NUM_BITS_IN_BYTE;
21265 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21269 + case CRYPTO_SHA2_512:
21270 + DPRINTK("%s(): SHA512\n", __FUNCTION__);
21271 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21272 + lacSessCtx->hashSetupData.hashAlgorithm =
21273 + CPA_CY_SYM_HASH_SHA512;
21274 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21275 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21277 + cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
21281 + case CRYPTO_SHA2_512_HMAC:
21282 + DPRINTK("%s(): SHA512_HMAC\n", __FUNCTION__);
21283 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21284 + lacSessCtx->hashSetupData.hashAlgorithm =
21285 + CPA_CY_SYM_HASH_SHA512;
21286 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21287 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21289 + cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
21290 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
21292 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21293 + cri->cri_klen / NUM_BITS_IN_BYTE;
21294 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21299 + DPRINTK("%s(): MD5\n", __FUNCTION__);
21300 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21301 + lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
21302 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21303 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21305 + cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
21309 + case CRYPTO_MD5_HMAC:
21310 + DPRINTK("%s(): MD5_HMAC\n", __FUNCTION__);
21311 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21312 + lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
21313 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21314 + lacSessCtx->hashSetupData.digestResultLenInBytes =
21316 + cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
21317 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
21319 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21320 + cri->cri_klen / NUM_BITS_IN_BYTE;
21321 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21326 + DPRINTK("%s(): ALG Setup FAIL\n", __FUNCTION__);
21327 + return ICP_OCF_DRV_STATUS_FAIL;
21330 + return ICP_OCF_DRV_STATUS_SUCCESS;
21333 +/* Name : icp_ocfDrvFreeOCFSession
21335 + * Description : This function deletes all existing Session data representing
21336 + * the Cryptographic session established between OCF and this driver. This
21337 + * also includes freeing the memory allocated for the session context. The
21338 + * session object is also removed from the session linked list.
21340 +static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData)
21343 + sessionData->inUse = ICP_SESSION_DEREGISTERED;
21345 + /*ENTER CRITICAL SECTION */
21346 + spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21348 + if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
21349 + /*If the Driver is exiting, allow that process to
21350 + handle any deletions */
21351 + /*EXIT CRITICAL SECTION */
21352 + spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21356 + atomic_dec(&num_ocf_to_drv_registered_sessions);
21358 + list_del(&(sessionData->listNode));
21360 + /*EXIT CRITICAL SECTION */
21361 + spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21363 + if (NULL != sessionData->sessHandle) {
21364 + kfree(sessionData->sessHandle);
21366 + kmem_cache_free(drvSessionData_zone, sessionData);
21369 +/* Name : icp_ocfDrvFreeLACSession
21371 + * Description : This attempts to deregister a LAC session. If it fails, the
21372 + * deregistation retry function is called.
21374 +int icp_ocfDrvFreeLACSession(device_t dev, uint64_t sid)
21376 + CpaCySymSessionCtx sessionToDeregister = NULL;
21377 + struct icp_drvSessionData *sessionData = NULL;
21378 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
21381 + sessionData = (struct icp_drvSessionData *)CRYPTO_SESID2LID(sid);
21382 + if (NULL == sessionData) {
21383 + EPRINTK("%s(): OCF Free session called with Null Session ID.\n",
21388 + sessionToDeregister = sessionData->sessHandle;
21390 + if (ICP_SESSION_INITIALISED == sessionData->inUse) {
21391 + DPRINTK("%s() Session not registered with LAC\n", __FUNCTION__);
21392 + } else if (NULL == sessionData->sessHandle) {
21394 + ("%s(): OCF Free session called with Null Session Handle.\n",
21398 + lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
21399 + sessionToDeregister);
21400 + if (CPA_STATUS_RETRY == lacStatus) {
21401 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
21402 + icp_ocfDrvDeregRetry(&sessionToDeregister)) {
21403 + /* the retry function increments the
21404 + dereg failed count */
21405 + DPRINTK("%s(): LAC failed to deregister the "
21406 + "session. (localSessionId= %p)\n",
21407 + __FUNCTION__, sessionToDeregister);
21411 + } else if (CPA_STATUS_SUCCESS != lacStatus) {
21412 + DPRINTK("%s(): LAC failed to deregister the session. "
21413 + "localSessionId= %p, lacStatus = %d\n",
21414 + __FUNCTION__, sessionToDeregister, lacStatus);
21415 + atomic_inc(&lac_session_failed_dereg_count);
21420 + icp_ocfDrvFreeOCFSession(sessionData);
21425 +/* Name : icp_ocfDrvAlgCheck
21427 + * Description : This function checks whether the cryptodesc argument pertains
21428 + * to a sym or hash function
21430 +static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc)
21433 + if (crp_desc->crd_alg == CRYPTO_3DES_CBC ||
21434 + crp_desc->crd_alg == CRYPTO_AES_CBC ||
21435 + crp_desc->crd_alg == CRYPTO_DES_CBC ||
21436 + crp_desc->crd_alg == CRYPTO_NULL_CBC ||
21437 + crp_desc->crd_alg == CRYPTO_ARC4) {
21438 + return ICP_OCF_DRV_ALG_CIPHER;
21441 + return ICP_OCF_DRV_ALG_HASH;
21444 +/* Name : icp_ocfDrvSymProcess
21446 + * Description : This function will map symmetric functionality calls from OCF
21447 + * to the LAC API. It will also allocate memory to store the session context.
21449 + * Notes: If it is the first perform call for a given session, then a LAC
21450 + * session is registered. After the session is registered, no checks as
21451 + * to whether session paramaters have changed (e.g. alg chain order) are
21454 +int icp_ocfDrvSymProcess(device_t dev, struct cryptop *crp, int hint)
21456 + struct icp_drvSessionData *sessionData = NULL;
21457 + struct icp_drvOpData *drvOpData = NULL;
21458 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
21459 + Cpa32U sessionCtxSizeInBytes = 0;
21460 + uint16_t numBufferListArray = 0;
21462 + if (NULL == crp) {
21463 + DPRINTK("%s(): Invalid input parameters, cryptop is NULL\n",
21468 + if (NULL == crp->crp_desc) {
21469 + DPRINTK("%s(): Invalid input parameters, no crp_desc attached "
21470 + "to crp\n", __FUNCTION__);
21471 + crp->crp_etype = EINVAL;
21475 + if (NULL == crp->crp_buf) {
21476 + DPRINTK("%s(): Invalid input parameters, no buffer attached "
21477 + "to crp\n", __FUNCTION__);
21478 + crp->crp_etype = EINVAL;
21482 + if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
21483 + crp->crp_etype = EFAULT;
21487 + sessionData = (struct icp_drvSessionData *)
21488 + (CRYPTO_SESID2LID(crp->crp_sid));
21489 + if (NULL == sessionData) {
21490 + DPRINTK("%s(): Invalid input parameters, Null Session ID \n",
21492 + crp->crp_etype = EINVAL;
21496 +/*If we get a request against a deregisted session, cancel operation*/
21497 + if (ICP_SESSION_DEREGISTERED == sessionData->inUse) {
21498 + DPRINTK("%s(): Session ID %d was deregistered \n",
21499 + __FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
21500 + crp->crp_etype = EFAULT;
21504 +/*If none of the session states are set, then the session structure was either
21505 + not initialised properly or we are reading from a freed memory area (possible
21506 + due to OCF batch mode not removing queued requests against deregistered
21508 + if (ICP_SESSION_INITIALISED != sessionData->inUse &&
21509 + ICP_SESSION_RUNNING != sessionData->inUse) {
21510 + DPRINTK("%s(): Session - ID %d - not properly initialised or "
21511 + "memory freed back to the kernel \n",
21512 + __FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
21513 + crp->crp_etype = EINVAL;
21517 + /*For the below checks, remember error checking is already done in LAC.
21518 + We're not validating inputs subsequent to registration */
21519 + if (sessionData->inUse == ICP_SESSION_INITIALISED) {
21520 + DPRINTK("%s(): Initialising session\n", __FUNCTION__);
21522 + if (NULL != crp->crp_desc->crd_next) {
21523 + if (ICP_OCF_DRV_ALG_CIPHER ==
21524 + icp_ocfDrvAlgCheck(crp->crp_desc)) {
21526 + sessionData->lacSessCtx.algChainOrder =
21527 + CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
21529 + if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
21530 + sessionData->lacSessCtx.cipherSetupData.
21531 + cipherDirection =
21532 + CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
21534 + sessionData->lacSessCtx.cipherSetupData.
21535 + cipherDirection =
21536 + CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
21539 + sessionData->lacSessCtx.algChainOrder =
21540 + CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
21542 + if (crp->crp_desc->crd_next->crd_flags &
21544 + sessionData->lacSessCtx.cipherSetupData.
21545 + cipherDirection =
21546 + CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
21548 + sessionData->lacSessCtx.cipherSetupData.
21549 + cipherDirection =
21550 + CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
21555 + } else if (ICP_OCF_DRV_ALG_CIPHER ==
21556 + icp_ocfDrvAlgCheck(crp->crp_desc)) {
21557 + if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
21558 + sessionData->lacSessCtx.cipherSetupData.
21559 + cipherDirection =
21560 + CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
21562 + sessionData->lacSessCtx.cipherSetupData.
21563 + cipherDirection =
21564 + CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
21569 + /*No action required for standalone Auth here */
21571 + /* Allocate memory for SymSessionCtx before the Session Registration */
21573 + cpaCySymSessionCtxGetSize(CPA_INSTANCE_HANDLE_SINGLE,
21574 + &(sessionData->lacSessCtx),
21575 + &sessionCtxSizeInBytes);
21576 + if (CPA_STATUS_SUCCESS != lacStatus) {
21577 + EPRINTK("%s(): cpaCySymSessionCtxGetSize failed - %d\n",
21578 + __FUNCTION__, lacStatus);
21581 + sessionData->sessHandle =
21582 + kmalloc(sessionCtxSizeInBytes, GFP_ATOMIC);
21583 + if (NULL == sessionData->sessHandle) {
21585 + ("%s(): Failed to get memory for SymSessionCtx\n",
21590 + lacStatus = cpaCySymInitSession(CPA_INSTANCE_HANDLE_SINGLE,
21591 + icp_ocfDrvSymCallBack,
21592 + &(sessionData->lacSessCtx),
21593 + sessionData->sessHandle);
21595 + if (CPA_STATUS_SUCCESS != lacStatus) {
21596 + EPRINTK("%s(): cpaCySymInitSession failed -%d \n",
21597 + __FUNCTION__, lacStatus);
21601 + sessionData->inUse = ICP_SESSION_RUNNING;
21604 + drvOpData = kmem_cache_zalloc(drvOpData_zone, GFP_ATOMIC);
21605 + if (NULL == drvOpData) {
21606 + EPRINTK("%s():Failed to get memory for drvOpData\n",
21608 + crp->crp_etype = ENOMEM;
21612 + drvOpData->lacOpData.pSessionCtx = sessionData->sessHandle;
21613 + drvOpData->digestSizeInBytes = sessionData->lacSessCtx.hashSetupData.
21614 + digestResultLenInBytes;
21615 + drvOpData->crp = crp;
21617 + /* Set the default buffer list array memory allocation */
21618 + drvOpData->srcBuffer.pBuffers = drvOpData->bufferListArray;
21619 + drvOpData->numBufferListArray = ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS;
21622 + * Allocate buffer list array memory allocation if the
21623 + * data fragment is more than the default allocation
21625 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
21626 + numBufferListArray = icp_ocfDrvGetSkBuffFrags((struct sk_buff *)
21628 + if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < numBufferListArray) {
21629 + DPRINTK("%s() numBufferListArray more than default\n",
21631 + drvOpData->srcBuffer.pBuffers = NULL;
21632 + drvOpData->srcBuffer.pBuffers =
21633 + kmalloc(numBufferListArray *
21634 + sizeof(CpaFlatBuffer), GFP_ATOMIC);
21635 + if (NULL == drvOpData->srcBuffer.pBuffers) {
21636 + EPRINTK("%s() Failed to get memory for "
21637 + "pBuffers\n", __FUNCTION__);
21638 + kmem_cache_free(drvOpData_zone, drvOpData);
21639 + crp->crp_etype = ENOMEM;
21642 + drvOpData->numBufferListArray = numBufferListArray;
21647 + * Check the type of buffer structure we got and convert it into
21648 + * CpaBufferList format.
21650 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
21651 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
21652 + icp_ocfDrvSkBuffToBufferList((struct sk_buff *)crp->crp_buf,
21653 + &(drvOpData->srcBuffer))) {
21654 + EPRINTK("%s():Failed to translate from SK_BUF "
21655 + "to bufferlist\n", __FUNCTION__);
21656 + crp->crp_etype = EINVAL;
21660 + drvOpData->bufferType = CRYPTO_F_SKBUF;
21661 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
21662 + /* OCF only supports IOV of one entry. */
21663 + if (NUM_IOV_SUPPORTED ==
21664 + ((struct uio *)(crp->crp_buf))->uio_iovcnt) {
21666 + icp_ocfDrvPtrAndLenToBufferList(((struct uio *)(crp->
21668 + uio_iov[0].iov_base,
21669 + ((struct uio *)(crp->
21671 + uio_iov[0].iov_len,
21675 + drvOpData->bufferType = CRYPTO_F_IOV;
21678 + DPRINTK("%s():Unable to handle IOVs with lengths of "
21679 + "greater than one!\n", __FUNCTION__);
21680 + crp->crp_etype = EINVAL;
21685 + icp_ocfDrvPtrAndLenToBufferList(crp->crp_buf,
21687 + &(drvOpData->srcBuffer));
21689 + drvOpData->bufferType = CRYPTO_BUF_CONTIG;
21692 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
21693 + icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->crp_desc)) {
21694 + crp->crp_etype = EINVAL;
21698 + if (drvOpData->crp->crp_desc->crd_next != NULL) {
21699 + if (icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->
21700 + crp_desc->crd_next)) {
21701 + crp->crp_etype = EINVAL;
21707 + /* Allocate srcBuffer's private meta data */
21708 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
21709 + icp_ocfDrvAllocMetaData(&(drvOpData->srcBuffer), drvOpData)) {
21710 + EPRINTK("%s() icp_ocfDrvAllocMetaData failed\n", __FUNCTION__);
21711 + memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
21712 + crp->crp_etype = EINVAL;
21716 + /* Perform "in-place" crypto operation */
21717 + lacStatus = cpaCySymPerformOp(CPA_INSTANCE_HANDLE_SINGLE,
21718 + (void *)drvOpData,
21719 + &(drvOpData->lacOpData),
21720 + &(drvOpData->srcBuffer),
21721 + &(drvOpData->srcBuffer),
21722 + &(drvOpData->verifyResult));
21723 + if (CPA_STATUS_RETRY == lacStatus) {
21724 + DPRINTK("%s(): cpaCySymPerformOp retry, lacStatus = %d\n",
21725 + __FUNCTION__, lacStatus);
21726 + memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
21727 + crp->crp_etype = EINVAL;
21730 + if (CPA_STATUS_SUCCESS != lacStatus) {
21731 + EPRINTK("%s(): cpaCySymPerformOp failed, lacStatus = %d\n",
21732 + __FUNCTION__, lacStatus);
21733 + memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
21734 + crp->crp_etype = EINVAL;
21738 + return 0; //OCF success status value
21741 + if (drvOpData->numBufferListArray > ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
21742 + kfree(drvOpData->srcBuffer.pBuffers);
21744 + icp_ocfDrvFreeMetaData(&(drvOpData->srcBuffer));
21745 + kmem_cache_free(drvOpData_zone, drvOpData);
21747 + return crp->crp_etype;
21750 +/* Name : icp_ocfDrvProcessDataSetup
21752 + * Description : This function will setup all the cryptographic operation data
21753 + * that is required by LAC to execute the operation.
21755 +static int icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
21756 + struct cryptodesc *crp_desc)
21758 + CpaCyRandGenOpData randGenOpData;
21759 + CpaFlatBuffer randData;
21761 + drvOpData->lacOpData.packetType = CPA_CY_SYM_PACKET_TYPE_FULL;
21763 + /* Convert from the cryptop to the ICP LAC crypto parameters */
21764 + switch (crp_desc->crd_alg) {
21765 + case CRYPTO_NULL_CBC:
21766 + drvOpData->lacOpData.
21767 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21768 + drvOpData->lacOpData.
21769 + messageLenToCipherInBytes = crp_desc->crd_len;
21770 + drvOpData->verifyResult = CPA_FALSE;
21771 + drvOpData->lacOpData.ivLenInBytes = NULL_BLOCK_LEN;
21773 + case CRYPTO_DES_CBC:
21774 + drvOpData->lacOpData.
21775 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21776 + drvOpData->lacOpData.
21777 + messageLenToCipherInBytes = crp_desc->crd_len;
21778 + drvOpData->verifyResult = CPA_FALSE;
21779 + drvOpData->lacOpData.ivLenInBytes = DES_BLOCK_LEN;
21781 + case CRYPTO_3DES_CBC:
21782 + drvOpData->lacOpData.
21783 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21784 + drvOpData->lacOpData.
21785 + messageLenToCipherInBytes = crp_desc->crd_len;
21786 + drvOpData->verifyResult = CPA_FALSE;
21787 + drvOpData->lacOpData.ivLenInBytes = DES3_BLOCK_LEN;
21789 + case CRYPTO_ARC4:
21790 + drvOpData->lacOpData.
21791 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21792 + drvOpData->lacOpData.
21793 + messageLenToCipherInBytes = crp_desc->crd_len;
21794 + drvOpData->verifyResult = CPA_FALSE;
21795 + drvOpData->lacOpData.ivLenInBytes = ARC4_COUNTER_LEN;
21797 + case CRYPTO_AES_CBC:
21798 + drvOpData->lacOpData.
21799 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21800 + drvOpData->lacOpData.
21801 + messageLenToCipherInBytes = crp_desc->crd_len;
21802 + drvOpData->verifyResult = CPA_FALSE;
21803 + drvOpData->lacOpData.ivLenInBytes = RIJNDAEL128_BLOCK_LEN;
21805 + case CRYPTO_SHA1:
21806 + case CRYPTO_SHA1_HMAC:
21807 + case CRYPTO_SHA2_256:
21808 + case CRYPTO_SHA2_256_HMAC:
21809 + case CRYPTO_SHA2_384:
21810 + case CRYPTO_SHA2_384_HMAC:
21811 + case CRYPTO_SHA2_512:
21812 + case CRYPTO_SHA2_512_HMAC:
21814 + case CRYPTO_MD5_HMAC:
21815 + drvOpData->lacOpData.
21816 + hashStartSrcOffsetInBytes = crp_desc->crd_skip;
21817 + drvOpData->lacOpData.
21818 + messageLenToHashInBytes = crp_desc->crd_len;
21819 + drvOpData->lacOpData.
21821 + icp_ocfDrvDigestPointerFind(drvOpData, crp_desc);
21823 + if (NULL == drvOpData->lacOpData.pDigestResult) {
21824 + DPRINTK("%s(): ERROR - could not calculate "
21825 + "Digest Result memory address\n", __FUNCTION__);
21826 + return ICP_OCF_DRV_STATUS_FAIL;
21829 + drvOpData->lacOpData.digestVerify = CPA_FALSE;
21832 + DPRINTK("%s(): Crypto process error - algorithm not "
21833 + "found \n", __FUNCTION__);
21834 + return ICP_OCF_DRV_STATUS_FAIL;
21837 + /* Figure out what the IV is supposed to be */
21838 + if ((crp_desc->crd_alg == CRYPTO_DES_CBC) ||
21839 + (crp_desc->crd_alg == CRYPTO_3DES_CBC) ||
21840 + (crp_desc->crd_alg == CRYPTO_AES_CBC)) {
21841 + /*ARC4 doesn't use an IV */
21842 + if (crp_desc->crd_flags & CRD_F_IV_EXPLICIT) {
21843 + /* Explicit IV provided to OCF */
21844 + drvOpData->lacOpData.pIv = crp_desc->crd_iv;
21846 + /* IV is not explicitly provided to OCF */
21848 + /* Point the LAC OP Data IV pointer to our allocated
21849 + storage location for this session. */
21850 + drvOpData->lacOpData.pIv = drvOpData->ivData;
21852 + if ((crp_desc->crd_flags & CRD_F_ENCRYPT) &&
21853 + ((crp_desc->crd_flags & CRD_F_IV_PRESENT) == 0)) {
21855 + /* Encrypting - need to create IV */
21856 + randGenOpData.generateBits = CPA_TRUE;
21857 + randGenOpData.lenInBytes = MAX_IV_LEN_IN_BYTES;
21859 + icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *)
21862 + MAX_IV_LEN_IN_BYTES,
21865 + if (CPA_STATUS_SUCCESS !=
21866 + cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
21868 + &randGenOpData, &randData)) {
21869 + DPRINTK("%s(): ERROR - Failed to"
21871 + " Initialisation Vector\n",
21873 + return ICP_OCF_DRV_STATUS_FAIL;
21876 + crypto_copyback(drvOpData->crp->
21878 + drvOpData->crp->crp_buf,
21879 + crp_desc->crd_inject,
21880 + drvOpData->lacOpData.
21882 + (caddr_t) (drvOpData->lacOpData.
21885 + /* Reading IV from buffer */
21886 + crypto_copydata(drvOpData->crp->
21888 + drvOpData->crp->crp_buf,
21889 + crp_desc->crd_inject,
21890 + drvOpData->lacOpData.
21892 + (caddr_t) (drvOpData->lacOpData.
21900 + return ICP_OCF_DRV_STATUS_SUCCESS;
21903 +/* Name : icp_ocfDrvDigestPointerFind
21905 + * Description : This function is used to find the memory address of where the
21906 + * digest information shall be stored in. Input buffer types are an skbuff, iov
21907 + * or flat buffer. The address is found using the buffer data start address and
21910 + * Note: In the case of a linux skbuff, the digest address may exist within
21911 + * a memory space linked to from the start buffer. These linked memory spaces
21912 + * must be traversed by the data length offset in order to find the digest start
21913 + * address. Whether there is enough space for the digest must also be checked.
21916 +static uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
21917 + struct cryptodesc *crp_desc)
21920 + int offsetInBytes = crp_desc->crd_inject;
21921 + uint32_t digestSizeInBytes = drvOpData->digestSizeInBytes;
21922 + uint8_t *flat_buffer_base = NULL;
21923 + int flat_buffer_length = 0;
21924 + struct sk_buff *skb;
21926 + if (drvOpData->crp->crp_flags & CRYPTO_F_SKBUF) {
21927 + /*check if enough overall space to store hash */
21928 + skb = (struct sk_buff *)(drvOpData->crp->crp_buf);
21930 + if (skb->len < (offsetInBytes + digestSizeInBytes)) {
21931 + DPRINTK("%s() Not enough space for Digest"
21932 + " payload after the offset (%d), "
21933 + "digest size (%d) \n", __FUNCTION__,
21934 + offsetInBytes, digestSizeInBytes);
21938 + return icp_ocfDrvSkbuffDigestPointerFind(drvOpData,
21940 + digestSizeInBytes);
21943 + /* IOV or flat buffer */
21944 + if (drvOpData->crp->crp_flags & CRYPTO_F_IOV) {
21945 + /*single IOV check has already been done */
21946 + flat_buffer_base = ((struct uio *)
21947 + (drvOpData->crp->crp_buf))->
21948 + uio_iov[0].iov_base;
21949 + flat_buffer_length = ((struct uio *)
21950 + (drvOpData->crp->crp_buf))->
21951 + uio_iov[0].iov_len;
21953 + flat_buffer_base = (uint8_t *) drvOpData->crp->crp_buf;
21954 + flat_buffer_length = drvOpData->crp->crp_ilen;
21957 + if (flat_buffer_length < (offsetInBytes + digestSizeInBytes)) {
21958 + DPRINTK("%s() Not enough space for Digest "
21959 + "(IOV/Flat Buffer) \n", __FUNCTION__);
21962 + return (uint8_t *) (flat_buffer_base + offsetInBytes);
21965 + DPRINTK("%s() Should not reach this point\n", __FUNCTION__);
21969 +/* Name : icp_ocfDrvSkbuffDigestPointerFind
21971 + * Description : This function is used by icp_ocfDrvDigestPointerFind to process
21972 + * the non-linear portion of the skbuff if the fragmentation type is a linked
21973 + * list (frag_list is not NULL in the skb_shared_info structure)
21975 +static inline uint8_t *icp_ocfDrvSkbuffDigestPointerFind(struct icp_drvOpData
21977 + int offsetInBytes,
21979 + digestSizeInBytes)
21982 + struct sk_buff *skb = NULL;
21983 + struct skb_shared_info *skb_shared = NULL;
21985 + uint32_t skbuffisnonlinear = 0;
21987 + uint32_t skbheadlen = 0;
21989 + skb = (struct sk_buff *)(drvOpData->crp->crp_buf);
21990 + skbuffisnonlinear = skb_is_nonlinear(skb);
21992 + skbheadlen = skb_headlen(skb);
21994 + /*Linear skb checks */
21995 + if (skbheadlen > offsetInBytes) {
21997 + if (skbheadlen >= (offsetInBytes + digestSizeInBytes)) {
21998 + return (uint8_t *) (skb->data + offsetInBytes);
22000 + DPRINTK("%s() Auth payload stretches "
22001 + "accross contiguous memory\n", __FUNCTION__);
22005 + if (skbuffisnonlinear) {
22006 + offsetInBytes -= skbheadlen;
22008 + DPRINTK("%s() Offset outside of buffer boundaries\n",
22014 + /*Non Linear checks */
22015 + skb_shared = (struct skb_shared_info *)(skb->end);
22016 + if (unlikely(NULL == skb_shared)) {
22017 + DPRINTK("%s() skbuff shared info stucture is NULL! \n",
22020 + } else if ((0 != skb_shared->nr_frags) &&
22021 + (skb_shared->frag_list != NULL)) {
22022 + DPRINTK("%s() skbuff nr_frags AND "
22023 + "frag_list not supported \n", __FUNCTION__);
22027 + /*TCP segmentation more likely than IP fragmentation */
22028 + if (likely(0 != skb_shared->nr_frags)) {
22029 + return icp_ocfDrvDigestSkbNRFragsCheck(skb, skb_shared,
22031 + digestSizeInBytes);
22032 + } else if (skb_shared->frag_list != NULL) {
22033 + return icp_ocfDrvDigestSkbFragListCheck(skb, skb_shared,
22035 + digestSizeInBytes);
22037 + DPRINTK("%s() skbuff is non-linear but does not show any "
22038 + "linked data\n", __FUNCTION__);
22044 +/* Name : icp_ocfDrvDigestSkbNRFragsCheck
22046 + * Description : This function is used by icp_ocfDrvSkbuffDigestPointerFind to
22047 + * process the non-linear portion of the skbuff, if the fragmentation type is
22050 +static inline uint8_t *icp_ocfDrvDigestSkbNRFragsCheck(struct sk_buff *skb,
22051 + struct skb_shared_info
22053 + int offsetInBytes,
22055 + digestSizeInBytes)
22058 + /*nr_frags starts from 1 */
22059 + if (MAX_SKB_FRAGS < skb_shared->nr_frags) {
22060 + DPRINTK("%s error processing skbuff "
22061 + "page frame -- MAX FRAGS exceeded \n", __FUNCTION__);
22065 + for (i = 0; i < skb_shared->nr_frags; i++) {
22067 + if (offsetInBytes >= skb_shared->frags[i].size) {
22068 + /*offset still greater than data position */
22069 + offsetInBytes -= skb_shared->frags[i].size;
22071 + /* found the page containing start of hash */
22073 + if (NULL == skb_shared->frags[i].page) {
22074 + DPRINTK("%s() Linked page is NULL!\n",
22079 + if (offsetInBytes + digestSizeInBytes >
22080 + skb_shared->frags[i].size) {
22081 + DPRINTK("%s() Auth payload stretches accross "
22082 + "contiguous memory\n", __FUNCTION__);
22085 + return (uint8_t *) (skb_shared->frags[i].page +
22086 + skb_shared->frags[i].
22091 + /*only possible if internal page sizes are set wrong */
22092 + if (offsetInBytes < 0) {
22093 + DPRINTK("%s error processing skbuff page frame "
22094 + "-- offset calculation \n", __FUNCTION__);
22098 + /*only possible if internal page sizes are set wrong */
22099 + DPRINTK("%s error processing skbuff page frame "
22100 + "-- ran out of page fragments, remaining offset = %d \n",
22101 + __FUNCTION__, offsetInBytes);
22106 +/* Name : icp_ocfDrvDigestSkbFragListCheck
22108 + * Description : This function is used by icp_ocfDrvSkbuffDigestPointerFind to
22109 + * process the non-linear portion of the skbuff, if the fragmentation type is
22113 +static inline uint8_t *icp_ocfDrvDigestSkbFragListCheck(struct sk_buff *skb,
22114 + struct skb_shared_info
22116 + int offsetInBytes,
22118 + digestSizeInBytes)
22121 + struct sk_buff *skb_list = skb_shared->frag_list;
22122 + /*check added for readability */
22123 + if (NULL == skb_list) {
22124 + DPRINTK("%s error processing skbuff "
22125 + "-- no more list! \n", __FUNCTION__);
22129 + for (; skb_list; skb_list = skb_list->next) {
22130 + if (NULL == skb_list) {
22131 + DPRINTK("%s error processing skbuff "
22132 + "-- no more list! \n", __FUNCTION__);
22136 + if (offsetInBytes >= skb_list->len) {
22137 + offsetInBytes -= skb_list->len;
22140 + if (offsetInBytes + digestSizeInBytes > skb_list->len) {
22141 + DPRINTK("%s() Auth payload stretches accross "
22142 + "contiguous memory\n", __FUNCTION__);
22145 + return (uint8_t *)
22146 + (skb_list->data + offsetInBytes);
22151 + /*This check is only needed if internal skb_list length values
22152 + are set wrong. */
22153 + if (0 > offsetInBytes) {
22154 + DPRINTK("%s() error processing skbuff object -- offset "
22155 + "calculation \n", __FUNCTION__);
22161 + /*catch all for unusual for-loop exit.
22162 + This code should never be reached */
22163 + DPRINTK("%s() Catch-All hit! Process error.\n", __FUNCTION__);
22167 +++ b/crypto/ocf/pasemi/pasemi.c
22170 + * Copyright (C) 2007 PA Semi, Inc
22172 + * Driver for the PA Semi PWRficient DMA Crypto Engine
22174 + * This program is free software; you can redistribute it and/or modify
22175 + * it under the terms of the GNU General Public License version 2 as
22176 + * published by the Free Software Foundation.
22178 + * This program is distributed in the hope that it will be useful,
22179 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
22180 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22181 + * GNU General Public License for more details.
22183 + * You should have received a copy of the GNU General Public License
22184 + * along with this program; if not, write to the Free Software
22185 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22188 +#ifndef AUTOCONF_INCLUDED
22189 +#include <linux/config.h>
22191 +#include <linux/module.h>
22192 +#include <linux/init.h>
22193 +#include <linux/interrupt.h>
22194 +#include <linux/timer.h>
22195 +#include <linux/random.h>
22196 +#include <linux/skbuff.h>
22197 +#include <asm/scatterlist.h>
22198 +#include <linux/moduleparam.h>
22199 +#include <linux/pci.h>
22200 +#include <cryptodev.h>
22202 +#include "pasemi_fnu.h"
22204 +#define DRV_NAME "pasemi"
22206 +#define TIMER_INTERVAL 1000
22208 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
22209 +static struct pasdma_status volatile * dma_status;
22212 +module_param(debug, int, 0644);
22213 +MODULE_PARM_DESC(debug, "Enable debug");
22215 +static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
22217 + desc->postop = 0;
22218 + desc->quad[0] = hdr;
22219 + desc->quad_cnt = 1;
22223 +static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
22225 + desc->quad[desc->quad_cnt++] = val;
22226 + desc->size = (desc->quad_cnt + 1) / 2;
22229 +static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
22231 + desc->quad[0] |= hdr;
22234 +static int pasemi_desc_size(struct pasemi_desc *desc)
22236 + return desc->size;
22239 +static void pasemi_ring_add_desc(
22240 + struct pasemi_fnu_txring *ring,
22241 + struct pasemi_desc *desc,
22242 + struct cryptop *crp) {
22244 + int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
22246 + TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
22247 + TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
22248 + TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
22250 + for (i = 0; i < desc->quad_cnt; i += 2) {
22251 + ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
22252 + ring->desc[ring_index] = desc->quad[i];
22253 + ring->desc[ring_index + 1] = desc->quad[i + 1];
22254 + ring->next_to_fill++;
22257 + if (desc->quad_cnt & 1)
22258 + ring->desc[ring_index + 1] = 0;
22261 +static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
22263 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
22268 + * Generate a new software session.
22271 +pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
22273 + struct cryptoini *c, *encini = NULL, *macini = NULL;
22274 + struct pasemi_softc *sc = device_get_softc(dev);
22275 + struct pasemi_session *ses = NULL, **sespp;
22276 + int sesn, blksz = 0;
22278 + unsigned long flags;
22279 + struct pasemi_desc init_desc;
22280 + struct pasemi_fnu_txring *txring;
22282 + DPRINTF("%s()\n", __FUNCTION__);
22283 + if (sidp == NULL || cri == NULL || sc == NULL) {
22284 + DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
22287 + for (c = cri; c != NULL; c = c->cri_next) {
22288 + if (ALG_IS_SIG(c->cri_alg)) {
22292 + } else if (ALG_IS_CIPHER(c->cri_alg)) {
22297 + DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
22301 + if (encini == NULL && macini == NULL)
22304 + /* validate key length */
22305 + switch (encini->cri_alg) {
22306 + case CRYPTO_DES_CBC:
22307 + if (encini->cri_klen != 64)
22309 + ccmd = DMA_CALGO_DES;
22311 + case CRYPTO_3DES_CBC:
22312 + if (encini->cri_klen != 192)
22314 + ccmd = DMA_CALGO_3DES;
22316 + case CRYPTO_AES_CBC:
22317 + if (encini->cri_klen != 128 &&
22318 + encini->cri_klen != 192 &&
22319 + encini->cri_klen != 256)
22321 + ccmd = DMA_CALGO_AES;
22323 + case CRYPTO_ARC4:
22324 + if (encini->cri_klen != 128)
22326 + ccmd = DMA_CALGO_ARC;
22329 + DPRINTF("UNKNOWN encini->cri_alg %d\n",
22330 + encini->cri_alg);
22336 + switch (macini->cri_alg) {
22338 + case CRYPTO_MD5_HMAC:
22341 + case CRYPTO_SHA1:
22342 + case CRYPTO_SHA1_HMAC:
22346 + DPRINTF("UNKNOWN macini->cri_alg %d\n",
22347 + macini->cri_alg);
22350 + if (((macini->cri_klen + 7) / 8) > blksz) {
22351 + DPRINTF("key length %d bigger than blksize %d not supported\n",
22352 + ((macini->cri_klen + 7) / 8), blksz);
22357 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
22358 + if (sc->sc_sessions[sesn] == NULL) {
22359 + sc->sc_sessions[sesn] = (struct pasemi_session *)
22360 + kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
22361 + ses = sc->sc_sessions[sesn];
22363 + } else if (sc->sc_sessions[sesn]->used == 0) {
22364 + ses = sc->sc_sessions[sesn];
22369 + if (ses == NULL) {
22370 + sespp = (struct pasemi_session **)
22371 + kzalloc(sc->sc_nsessions * 2 *
22372 + sizeof(struct pasemi_session *), GFP_ATOMIC);
22373 + if (sespp == NULL)
22375 + memcpy(sespp, sc->sc_sessions,
22376 + sc->sc_nsessions * sizeof(struct pasemi_session *));
22377 + kfree(sc->sc_sessions);
22378 + sc->sc_sessions = sespp;
22379 + sesn = sc->sc_nsessions;
22380 + ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
22381 + kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
22384 + sc->sc_nsessions *= 2;
22389 + ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
22390 + sizeof(struct pasemi_session), DMA_TO_DEVICE);
22392 + /* enter the channel scheduler */
22393 + spin_lock_irqsave(&sc->sc_chnlock, flags);
22395 + /* ARC4 has to be processed by the even channel */
22396 + if (encini && (encini->cri_alg == CRYPTO_ARC4))
22397 + ses->chan = sc->sc_lastchn & ~1;
22399 + ses->chan = sc->sc_lastchn;
22400 + sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
22402 + spin_unlock_irqrestore(&sc->sc_chnlock, flags);
22404 + txring = &sc->tx[ses->chan];
22407 + ses->ccmd = ccmd;
22410 + /* XXX may read fewer than requested */
22411 + get_random_bytes(ses->civ, sizeof(ses->civ));
22413 + ses->keysz = (encini->cri_klen - 63) / 64;
22414 + memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
22416 + pasemi_desc_start(&init_desc,
22417 + XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
22418 + pasemi_desc_build(&init_desc,
22419 + XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
22422 + if (macini->cri_alg == CRYPTO_MD5_HMAC ||
22423 + macini->cri_alg == CRYPTO_SHA1_HMAC)
22424 + memcpy(ses->hkey, macini->cri_key, blksz);
22426 + /* Load initialization constants(RFC 1321, 3174) */
22427 + ses->hiv[0] = 0x67452301efcdab89ULL;
22428 + ses->hiv[1] = 0x98badcfe10325476ULL;
22429 + ses->hiv[2] = 0xc3d2e1f000000000ULL;
22431 + ses->hseq = 0ULL;
22434 + spin_lock_irqsave(&txring->fill_lock, flags);
22436 + if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
22437 + txring->next_to_clean) > TX_RING_SIZE) {
22438 + spin_unlock_irqrestore(&txring->fill_lock, flags);
22443 + pasemi_ring_add_desc(txring, &init_desc, NULL);
22444 + pasemi_ring_incr(sc, ses->chan,
22445 + pasemi_desc_size(&init_desc));
22448 + txring->sesn = sesn;
22449 + spin_unlock_irqrestore(&txring->fill_lock, flags);
22451 + *sidp = PASEMI_SID(sesn);
22456 + * Deallocate a session.
22459 +pasemi_freesession(device_t dev, u_int64_t tid)
22461 + struct pasemi_softc *sc = device_get_softc(dev);
22463 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
22465 + DPRINTF("%s()\n", __FUNCTION__);
22469 + session = PASEMI_SESSION(sid);
22470 + if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
22473 + pci_unmap_single(sc->dma_pdev,
22474 + sc->sc_sessions[session]->dma_addr,
22475 + sizeof(struct pasemi_session), DMA_TO_DEVICE);
22476 + memset(sc->sc_sessions[session], 0,
22477 + sizeof(struct pasemi_session));
22483 +pasemi_process(device_t dev, struct cryptop *crp, int hint)
22486 + int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
22487 + struct pasemi_softc *sc = device_get_softc(dev);
22488 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
22490 + struct pasemi_desc init_desc, work_desc;
22491 + struct pasemi_session *ses;
22492 + struct sk_buff *skb;
22493 + struct uio *uiop;
22494 + unsigned long flags;
22495 + struct pasemi_fnu_txring *txring;
22497 + DPRINTF("%s()\n", __FUNCTION__);
22499 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
22502 + crp->crp_etype = 0;
22503 + if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
22506 + ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
22508 + crd1 = crp->crp_desc;
22509 + if (crd1 == NULL) {
22513 + crd2 = crd1->crd_next;
22515 + if (ALG_IS_SIG(crd1->crd_alg)) {
22517 + if (crd2 == NULL)
22519 + else if (ALG_IS_CIPHER(crd2->crd_alg) &&
22520 + (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
22524 + } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
22526 + if (crd2 == NULL)
22528 + else if (ALG_IS_SIG(crd2->crd_alg) &&
22529 + (crd1->crd_flags & CRD_F_ENCRYPT))
22536 + chsel = ses->chan;
22538 + txring = &sc->tx[chsel];
22540 + if (enccrd && !maccrd) {
22541 + if (enccrd->crd_alg == CRYPTO_ARC4)
22543 + reinit_size = 0x40;
22544 + srclen = crp->crp_ilen;
22546 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
22547 + | XCT_FUN_FUN(chsel));
22548 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
22549 + pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
22551 + pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
22552 + } else if (enccrd && maccrd) {
22553 + if (enccrd->crd_alg == CRYPTO_ARC4)
22555 + reinit_size = 0x68;
22557 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
22558 + /* Encrypt -> Authenticate */
22559 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
22560 + | XCT_FUN_A | XCT_FUN_FUN(chsel));
22561 + srclen = maccrd->crd_skip + maccrd->crd_len;
22563 + /* Authenticate -> Decrypt */
22564 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
22565 + | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
22566 + pasemi_desc_build(&work_desc, 0);
22567 + pasemi_desc_build(&work_desc, 0);
22568 + pasemi_desc_build(&work_desc, 0);
22569 + work_desc.postop = PASEMI_CHECK_SIG;
22570 + srclen = crp->crp_ilen;
22573 + pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
22574 + pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
22575 + } else if (!enccrd && maccrd) {
22576 + srclen = maccrd->crd_len;
22578 + pasemi_desc_start(&init_desc,
22579 + XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
22580 + pasemi_desc_build(&init_desc,
22581 + XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
22583 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
22584 + | XCT_FUN_A | XCT_FUN_FUN(chsel));
22588 + switch (enccrd->crd_alg) {
22589 + case CRYPTO_3DES_CBC:
22590 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
22591 + XCT_FUN_BCM_CBC);
22592 + ivsize = sizeof(u64);
22594 + case CRYPTO_DES_CBC:
22595 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
22596 + XCT_FUN_BCM_CBC);
22597 + ivsize = sizeof(u64);
22599 + case CRYPTO_AES_CBC:
22600 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
22601 + XCT_FUN_BCM_CBC);
22602 + ivsize = 2 * sizeof(u64);
22604 + case CRYPTO_ARC4:
22605 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
22609 + printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
22610 + enccrd->crd_alg);
22615 + ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
22616 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
22617 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
22618 + memcpy(ivp, enccrd->crd_iv, ivsize);
22619 + /* If IV is not present in the buffer already, it has to be copied there */
22620 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
22621 + crypto_copyback(crp->crp_flags, crp->crp_buf,
22622 + enccrd->crd_inject, ivsize, ivp);
22624 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
22625 + /* IV is provided expicitly in descriptor */
22626 + memcpy(ivp, enccrd->crd_iv, ivsize);
22628 + /* IV is provided in the packet */
22629 + crypto_copydata(crp->crp_flags, crp->crp_buf,
22630 + enccrd->crd_inject, ivsize,
22636 + switch (maccrd->crd_alg) {
22638 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
22639 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
22641 + case CRYPTO_SHA1:
22642 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
22643 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
22645 + case CRYPTO_MD5_HMAC:
22646 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
22647 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
22649 + case CRYPTO_SHA1_HMAC:
22650 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
22651 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
22654 + printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
22655 + maccrd->crd_alg);
22661 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
22662 + /* using SKB buffers */
22663 + skb = (struct sk_buff *)crp->crp_buf;
22664 + if (skb_shinfo(skb)->nr_frags) {
22665 + printk(DRV_NAME ": skb frags unimplemented\n");
22669 + pasemi_desc_build(
22671 + XCT_FUN_DST_PTR(skb->len, pci_map_single(
22672 + sc->dma_pdev, skb->data,
22673 + skb->len, DMA_TO_DEVICE)));
22674 + pasemi_desc_build(
22677 + srclen, pci_map_single(
22678 + sc->dma_pdev, skb->data,
22679 + srclen, DMA_TO_DEVICE)));
22680 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
22681 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
22682 + /* using IOV buffers */
22683 + uiop = (struct uio *)crp->crp_buf;
22684 + if (uiop->uio_iovcnt > 1) {
22685 + printk(DRV_NAME ": iov frags unimplemented\n");
22690 + /* crp_olen is never set; always use crp_ilen */
22691 + pasemi_desc_build(
22693 + XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
22695 + uiop->uio_iov->iov_base,
22696 + crp->crp_ilen, DMA_TO_DEVICE)));
22697 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
22699 + pasemi_desc_build(
22701 + XCT_FUN_SRC_PTR(srclen, pci_map_single(
22703 + uiop->uio_iov->iov_base,
22704 + srclen, DMA_TO_DEVICE)));
22706 + /* using contig buffers */
22707 + pasemi_desc_build(
22709 + XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
22712 + crp->crp_ilen, DMA_TO_DEVICE)));
22713 + pasemi_desc_build(
22715 + XCT_FUN_SRC_PTR(srclen, pci_map_single(
22717 + crp->crp_buf, srclen,
22718 + DMA_TO_DEVICE)));
22719 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
22722 + spin_lock_irqsave(&txring->fill_lock, flags);
22724 + if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
22725 + txring->sesn = PASEMI_SESSION(crp->crp_sid);
22730 + pasemi_desc_start(&init_desc,
22731 + XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
22732 + pasemi_desc_build(&init_desc,
22733 + XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
22736 + if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
22737 + pasemi_desc_size(&work_desc)) -
22738 + txring->next_to_clean) > TX_RING_SIZE) {
22739 + spin_unlock_irqrestore(&txring->fill_lock, flags);
22744 + pasemi_ring_add_desc(txring, &init_desc, NULL);
22745 + pasemi_ring_add_desc(txring, &work_desc, crp);
22747 + pasemi_ring_incr(sc, chsel,
22748 + pasemi_desc_size(&init_desc) +
22749 + pasemi_desc_size(&work_desc));
22751 + spin_unlock_irqrestore(&txring->fill_lock, flags);
22753 + mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
22758 + printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
22759 + crd1->crd_alg, crd2->crd_alg);
22763 + if (err != ERESTART) {
22764 + crp->crp_etype = err;
22765 + crypto_done(crp);
22770 +static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
22772 + int i, j, ring_idx;
22773 + struct pasemi_fnu_txring *ring = &sc->tx[chan];
22775 + int flags, loops = 10;
22777 + struct cryptop *crp;
22779 + spin_lock_irqsave(&ring->clean_lock, flags);
22781 + while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
22782 + & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
22785 + for (i = 0; i < delta_cnt; i++) {
22786 + desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
22787 + crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
22789 + ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
22790 + if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
22791 + /* Need to make sure signature matched,
22792 + * if not - return error */
22793 + if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
22794 + crp->crp_etype = -EINVAL;
22796 + crypto_done(TX_DESC_INFO(ring,
22797 + ring->next_to_clean).cf_crp);
22798 + TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
22799 + pci_unmap_single(
22801 + XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
22802 + PCI_DMA_TODEVICE);
22804 + ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
22806 + ring->next_to_clean++;
22807 + for (j = 1; j < desc_size; j++) {
22809 + (ring->next_to_clean &
22810 + (TX_RING_SIZE-1));
22811 + pci_unmap_single(
22813 + XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
22814 + PCI_DMA_TODEVICE);
22815 + if (ring->desc[ring_idx + 1])
22816 + pci_unmap_single(
22818 + XCT_PTR_ADDR_LEN(
22821 + PCI_DMA_TODEVICE);
22822 + ring->desc[ring_idx] =
22823 + ring->desc[ring_idx + 1] = 0;
22824 + ring->next_to_clean++;
22827 + for (j = 0; j < desc_size; j++) {
22828 + ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
22829 + ring->desc[ring_idx] =
22830 + ring->desc[ring_idx + 1] = 0;
22831 + ring->next_to_clean++;
22836 + ring->total_pktcnt += delta_cnt;
22838 + spin_unlock_irqrestore(&ring->clean_lock, flags);
22843 +static void sweepup_tx(struct pasemi_softc *sc)
22847 + for (i = 0; i < sc->sc_num_channels; i++)
22848 + pasemi_clean_tx(sc, i);
22851 +static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
22853 + struct pasemi_softc *sc = arg;
22854 + unsigned int reg;
22855 + int chan = irq - sc->base_irq;
22856 + int chan_index = sc->base_chan + chan;
22857 + u64 stat = dma_status->tx_sta[chan_index];
22859 + DPRINTF("%s()\n", __FUNCTION__);
22861 + if (!(stat & PAS_STATUS_CAUSE_M))
22864 + pasemi_clean_tx(sc, chan);
22866 + stat = dma_status->tx_sta[chan_index];
22868 + reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
22869 + PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
22871 + if (stat & PAS_STATUS_SOFT)
22872 + reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
22874 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
22877 + return IRQ_HANDLED;
22880 +static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
22883 + int chan_index = chan + sc->base_chan;
22885 + struct pasemi_fnu_txring *ring;
22887 + ring = &sc->tx[chan];
22889 + spin_lock_init(&ring->fill_lock);
22890 + spin_lock_init(&ring->clean_lock);
22892 + ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
22893 + TX_RING_SIZE, GFP_KERNEL);
22894 + if (!ring->desc_info)
22897 + /* Allocate descriptors */
22898 + ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
22901 + &ring->dma, GFP_KERNEL);
22905 + memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
22907 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
22909 + ring->total_pktcnt = 0;
22911 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
22912 + PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
22914 + val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
22915 + val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
22917 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
22919 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
22920 + PAS_DMA_TXCHAN_CFG_TY_FUNC |
22921 + PAS_DMA_TXCHAN_CFG_TATTR(chan) |
22922 + PAS_DMA_TXCHAN_CFG_WT(2));
22924 + /* enable tx channel */
22925 + out_le32(sc->dma_regs +
22926 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
22927 + PAS_DMA_TXCHAN_TCMDSTA_EN);
22929 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
22930 + PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
22932 + ring->next_to_fill = 0;
22933 + ring->next_to_clean = 0;
22935 + snprintf(ring->irq_name, sizeof(ring->irq_name),
22936 + "%s%d", "crypto", chan);
22938 + ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
22939 + ret = request_irq(ring->irq, (irq_handler_t)
22940 + pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
22942 + printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
22948 + setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
22953 +static device_method_t pasemi_methods = {
22954 + /* crypto device methods */
22955 + DEVMETHOD(cryptodev_newsession, pasemi_newsession),
22956 + DEVMETHOD(cryptodev_freesession, pasemi_freesession),
22957 + DEVMETHOD(cryptodev_process, pasemi_process),
22960 +/* Set up the crypto device structure, private data,
22961 + * and anything else we need before we start */
22963 +static int __devinit
22964 +pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
22966 + struct pasemi_softc *sc;
22969 + DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
22971 + sc = kzalloc(sizeof(*sc), GFP_KERNEL);
22975 + softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
22977 + pci_set_drvdata(pdev, sc);
22979 + spin_lock_init(&sc->sc_chnlock);
22981 + sc->sc_sessions = (struct pasemi_session **)
22982 + kzalloc(PASEMI_INITIAL_SESSIONS *
22983 + sizeof(struct pasemi_session *), GFP_ATOMIC);
22984 + if (sc->sc_sessions == NULL) {
22989 + sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
22990 + sc->sc_lastchn = 0;
22991 + sc->base_irq = pdev->irq + 6;
22992 + sc->base_chan = 6;
22994 + sc->dma_pdev = pdev;
22996 + sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
22997 + if (!sc->iob_pdev) {
22998 + dev_err(&pdev->dev, "Can't find I/O Bridge\n");
23003 + /* This is hardcoded and ugly, but we have some firmware versions
23004 + * who don't provide the register space in the device tree. Luckily
23005 + * they are at well-known locations so we can just do the math here.
23008 + ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
23010 + ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
23011 + if (!sc->dma_regs || !sc->iob_regs) {
23012 + dev_err(&pdev->dev, "Can't map registers\n");
23017 + dma_status = __ioremap(0xfd800000, 0x1000, 0);
23018 + if (!dma_status) {
23020 + dev_err(&pdev->dev, "Can't map dmastatus space\n");
23024 + sc->tx = (struct pasemi_fnu_txring *)
23025 + kzalloc(sizeof(struct pasemi_fnu_txring)
23026 + * 8, GFP_KERNEL);
23032 + /* Initialize the h/w */
23033 + out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
23034 + (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
23035 + PAS_DMA_COM_CFG_FWF));
23036 + out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
23038 + for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
23039 + sc->sc_num_channels++;
23040 + ret = pasemi_dma_setup_tx_resources(sc, i);
23045 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
23046 + CRYPTOCAP_F_HARDWARE);
23047 + if (sc->sc_cid < 0) {
23048 + printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
23053 + /* register algorithms with the framework */
23054 + printk(DRV_NAME ":");
23056 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
23057 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
23058 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
23059 + crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
23060 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
23061 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
23062 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
23063 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
23068 + pasemi_dma_remove(pdev);
23072 +#define MAX_RETRIES 5000
23074 +static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
23076 + struct pasemi_fnu_txring *ring = &sc->tx[chan];
23077 + int chan_index = chan + sc->base_chan;
23081 + /* Stop the channel */
23082 + out_le32(sc->dma_regs +
23083 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
23084 + PAS_DMA_TXCHAN_TCMDSTA_ST);
23086 + for (retries = 0; retries < MAX_RETRIES; retries++) {
23087 + stat = in_le32(sc->dma_regs +
23088 + PAS_DMA_TXCHAN_TCMDSTA(chan_index));
23089 + if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
23094 + if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
23095 + dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
23098 + /* Disable the channel */
23099 + out_le32(sc->dma_regs +
23100 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
23103 + if (ring->desc_info)
23104 + kfree((void *) ring->desc_info);
23106 + dma_free_coherent(&sc->dma_pdev->dev,
23109 + (void *) ring->desc, ring->dma);
23110 + if (ring->irq != -1)
23111 + free_irq(ring->irq, sc);
23113 + del_timer(&ring->crypto_timer);
23116 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
23118 + struct pasemi_softc *sc = pci_get_drvdata(pdev);
23121 + DPRINTF("%s()\n", __FUNCTION__);
23123 + if (sc->sc_cid >= 0) {
23124 + crypto_unregister_all(sc->sc_cid);
23128 + for (i = 0; i < sc->sc_num_channels; i++)
23129 + pasemi_free_tx_resources(sc, i);
23133 + if (sc->sc_sessions) {
23134 + for (i = 0; i < sc->sc_nsessions; i++)
23135 + kfree(sc->sc_sessions[i]);
23136 + kfree(sc->sc_sessions);
23138 + if (sc->iob_pdev)
23139 + pci_dev_put(sc->iob_pdev);
23140 + if (sc->dma_regs)
23141 + iounmap(sc->dma_regs);
23142 + if (sc->iob_regs)
23143 + iounmap(sc->iob_regs);
23147 +static struct pci_device_id pasemi_dma_pci_tbl[] = {
23148 + { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
23151 +MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
23153 +static struct pci_driver pasemi_dma_driver = {
23154 + .name = "pasemi_dma",
23155 + .id_table = pasemi_dma_pci_tbl,
23156 + .probe = pasemi_dma_probe,
23157 + .remove = __devexit_p(pasemi_dma_remove),
23160 +static void __exit pasemi_dma_cleanup_module(void)
23162 + pci_unregister_driver(&pasemi_dma_driver);
23163 + __iounmap(dma_status);
23164 + dma_status = NULL;
23167 +int pasemi_dma_init_module(void)
23169 + return pci_register_driver(&pasemi_dma_driver);
23172 +module_init(pasemi_dma_init_module);
23173 +module_exit(pasemi_dma_cleanup_module);
23175 +MODULE_LICENSE("Dual BSD/GPL");
23176 +MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
23177 +MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
23179 +++ b/crypto/ocf/pasemi/pasemi_fnu.h
23182 + * Copyright (C) 2007 PA Semi, Inc
23184 + * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
23185 + * hardware register layouts.
23187 + * This program is free software; you can redistribute it and/or modify
23188 + * it under the terms of the GNU General Public License version 2 as
23189 + * published by the Free Software Foundation.
23191 + * This program is distributed in the hope that it will be useful,
23192 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
23193 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23194 + * GNU General Public License for more details.
23196 + * You should have received a copy of the GNU General Public License
23197 + * along with this program; if not, write to the Free Software
23198 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23201 +#ifndef PASEMI_FNU_H
23202 +#define PASEMI_FNU_H
23204 +#include <linux/spinlock.h>
23206 +#define PASEMI_SESSION(sid) ((sid) & 0xffffffff)
23207 +#define PASEMI_SID(sesn) ((sesn) & 0xffffffff)
23208 +#define DPRINTF(a...) if (debug) { printk(DRV_NAME ": " a); }
23210 +/* Must be a power of two */
23211 +#define RX_RING_SIZE 512
23212 +#define TX_RING_SIZE 512
23213 +#define TX_DESC(ring, num) ((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
23214 +#define TX_DESC_INFO(ring, num) ((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
23215 +#define MAX_DESC_SIZE 8
23216 +#define PASEMI_INITIAL_SESSIONS 10
23217 +#define PASEMI_FNU_CHANNELS 8
23219 +/* DMA descriptor */
23220 +struct pasemi_desc {
23221 + u64 quad[2*MAX_DESC_SIZE];
23228 + * Holds per descriptor data
23230 +struct pasemi_desc_info {
23233 +#define PASEMI_CHECK_SIG 0x1
23235 + struct cryptop *cf_crp;
23239 + * Holds per channel data
23241 +struct pasemi_fnu_txring {
23242 + volatile u64 *desc;
23244 + pasemi_desc_info *desc_info;
23246 + struct timer_list crypto_timer;
23247 + spinlock_t fill_lock;
23248 + spinlock_t clean_lock;
23249 + unsigned int next_to_fill;
23250 + unsigned int next_to_clean;
23251 + u16 total_pktcnt;
23254 + char irq_name[10];
23258 + * Holds data specific to a single pasemi device.
23260 +struct pasemi_softc {
23261 + softc_device_decl sc_cdev;
23262 + struct pci_dev *dma_pdev; /* device backpointer */
23263 + struct pci_dev *iob_pdev; /* device backpointer */
23264 + void __iomem *dma_regs;
23265 + void __iomem *iob_regs;
23268 + int32_t sc_cid; /* crypto tag */
23269 + int sc_nsessions;
23270 + struct pasemi_session **sc_sessions;
23271 + int sc_num_channels;/* number of crypto channels */
23273 + /* pointer to the array of txring datastructures, one txring per channel */
23274 + struct pasemi_fnu_txring *tx;
23277 + * mutual exclusion for the channel scheduler
23279 + spinlock_t sc_chnlock;
23280 + /* last channel used, for now use round-robin to allocate channels */
23284 +struct pasemi_session {
23295 + dma_addr_t dma_addr;
23299 +/* status register layout in IOB region, at 0xfd800000 */
23300 +struct pasdma_status {
23305 +#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC) || \
23306 + (alg == CRYPTO_3DES_CBC) || \
23307 + (alg == CRYPTO_AES_CBC) || \
23308 + (alg == CRYPTO_ARC4) || \
23309 + (alg == CRYPTO_NULL_CBC))
23311 +#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5) || \
23312 + (alg == CRYPTO_MD5_HMAC) || \
23313 + (alg == CRYPTO_SHA1) || \
23314 + (alg == CRYPTO_SHA1_HMAC) || \
23315 + (alg == CRYPTO_NULL_HMAC))
23318 + PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
23319 + PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
23320 + PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
23321 + PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
23322 + PAS_DMA_COM_CFG = 0x114, /* DMA Configuration Register */
23325 +/* All these registers live in the PCI configuration space for the DMA PCI
23326 + * device. Use the normal PCI config access functions for them.
23329 +#define PAS_DMA_COM_CFG_FWF 0x18000000
23331 +#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
23332 +#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
23333 +#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
23334 +#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
23336 +#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
23337 +#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
23338 +#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
23339 +#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
23340 +#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
23341 +#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
23342 +#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
23343 +#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
23344 +#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
23345 +#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
23346 +#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
23347 +#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
23348 +#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
23349 +#define PAS_DMA_TXCHAN_CFG_TY_FUNC 0x00000002 /* Type = interface */
23350 +#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
23351 +#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
23352 +#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
23353 +#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
23354 + PAS_DMA_TXCHAN_CFG_TATTR_M)
23355 +#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0
23356 +#define PAS_DMA_TXCHAN_CFG_WT_S 6
23357 +#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
23358 + PAS_DMA_TXCHAN_CFG_WT_M)
23359 +#define PAS_DMA_TXCHAN_CFG_LPSQ_FAST 0x00000400
23360 +#define PAS_DMA_TXCHAN_CFG_LPDQ_FAST 0x00000800
23361 +#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
23362 +#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
23363 +#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
23364 +#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
23365 +#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
23366 +#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
23367 +#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
23368 +#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
23369 + PAS_DMA_TXCHAN_BASEL_BRBL_M)
23370 +#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
23371 +#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
23372 +#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
23373 +#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
23374 + PAS_DMA_TXCHAN_BASEU_BRBH_M)
23375 +/* # of cache lines worth of buffer ring */
23376 +#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
23377 +#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
23378 +#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
23379 + PAS_DMA_TXCHAN_BASEU_SIZ_M)
23381 +#define PAS_STATUS_PCNT_M 0x000000000000ffffull
23382 +#define PAS_STATUS_PCNT_S 0
23383 +#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
23384 +#define PAS_STATUS_DCNT_S 16
23385 +#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
23386 +#define PAS_STATUS_BPCNT_S 32
23387 +#define PAS_STATUS_CAUSE_M 0xf000000000000000ull
23388 +#define PAS_STATUS_TIMER 0x1000000000000000ull
23389 +#define PAS_STATUS_ERROR 0x2000000000000000ull
23390 +#define PAS_STATUS_SOFT 0x4000000000000000ull
23391 +#define PAS_STATUS_INT 0x8000000000000000ull
23393 +#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
23394 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
23395 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
23396 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
23397 + PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
23398 +#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
23399 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
23400 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
23401 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
23402 + PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
23403 +#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
23404 +#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
23405 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
23406 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
23407 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
23408 + PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
23409 +#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
23410 +#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
23411 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
23412 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
23413 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
23414 + PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
23415 +#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
23416 +#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
23417 +#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16
23418 +#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
23419 + PAS_IOB_DMA_RXCH_RESET_PCNT_M)
23420 +#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
23421 +#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
23422 +#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
23423 +#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
23424 +#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
23425 +#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
23426 +#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
23427 +#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
23428 +#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16
23429 +#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
23430 + PAS_IOB_DMA_TXCH_RESET_PCNT_M)
23431 +#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
23432 +#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
23433 +#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
23434 +#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
23435 +#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
23436 +#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
23438 +#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
23439 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
23440 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
23441 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
23442 + PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
23444 +/* Transmit descriptor fields */
23445 +#define XCT_MACTX_T 0x8000000000000000ull
23446 +#define XCT_MACTX_ST 0x4000000000000000ull
23447 +#define XCT_MACTX_NORES 0x0000000000000000ull
23448 +#define XCT_MACTX_8BRES 0x1000000000000000ull
23449 +#define XCT_MACTX_24BRES 0x2000000000000000ull
23450 +#define XCT_MACTX_40BRES 0x3000000000000000ull
23451 +#define XCT_MACTX_I 0x0800000000000000ull
23452 +#define XCT_MACTX_O 0x0400000000000000ull
23453 +#define XCT_MACTX_E 0x0200000000000000ull
23454 +#define XCT_MACTX_VLAN_M 0x0180000000000000ull
23455 +#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
23456 +#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
23457 +#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
23458 +#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
23459 +#define XCT_MACTX_CRC_M 0x0060000000000000ull
23460 +#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
23461 +#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
23462 +#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
23463 +#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
23464 +#define XCT_MACTX_SS 0x0010000000000000ull
23465 +#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
23466 +#define XCT_MACTX_LLEN_S 32ull
23467 +#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
23468 + XCT_MACTX_LLEN_M)
23469 +#define XCT_MACTX_IPH_M 0x00000000f8000000ull
23470 +#define XCT_MACTX_IPH_S 27ull
23471 +#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
23473 +#define XCT_MACTX_IPO_M 0x0000000007c00000ull
23474 +#define XCT_MACTX_IPO_S 22ull
23475 +#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
23477 +#define XCT_MACTX_CSUM_M 0x0000000000000060ull
23478 +#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
23479 +#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
23480 +#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
23481 +#define XCT_MACTX_V6 0x0000000000000010ull
23482 +#define XCT_MACTX_C 0x0000000000000004ull
23483 +#define XCT_MACTX_AL2 0x0000000000000002ull
23485 +#define XCT_PTR_T 0x8000000000000000ull
23486 +#define XCT_PTR_LEN_M 0x7ffff00000000000ull
23487 +#define XCT_PTR_LEN_S 44
23488 +#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
23490 +#define XCT_PTR_ADDR_M 0x00000fffffffffffull
23491 +#define XCT_PTR_ADDR_S 0
23492 +#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
23495 +/* Function descriptor fields */
23496 +#define XCT_FUN_T 0x8000000000000000ull
23497 +#define XCT_FUN_ST 0x4000000000000000ull
23498 +#define XCT_FUN_NORES 0x0000000000000000ull
23499 +#define XCT_FUN_8BRES 0x1000000000000000ull
23500 +#define XCT_FUN_24BRES 0x2000000000000000ull
23501 +#define XCT_FUN_40BRES 0x3000000000000000ull
23502 +#define XCT_FUN_I 0x0800000000000000ull
23503 +#define XCT_FUN_O 0x0400000000000000ull
23504 +#define XCT_FUN_E 0x0200000000000000ull
23505 +#define XCT_FUN_FUN_S 54
23506 +#define XCT_FUN_FUN_M 0x01c0000000000000ull
23507 +#define XCT_FUN_FUN(num) ((((long)(num)) << XCT_FUN_FUN_S) & \
23509 +#define XCT_FUN_CRM_NOP 0x0000000000000000ull
23510 +#define XCT_FUN_CRM_SIG 0x0008000000000000ull
23511 +#define XCT_FUN_CRM_ENC 0x0010000000000000ull
23512 +#define XCT_FUN_CRM_DEC 0x0018000000000000ull
23513 +#define XCT_FUN_CRM_SIG_ENC 0x0020000000000000ull
23514 +#define XCT_FUN_CRM_ENC_SIG 0x0028000000000000ull
23515 +#define XCT_FUN_CRM_SIG_DEC 0x0030000000000000ull
23516 +#define XCT_FUN_CRM_DEC_SIG 0x0038000000000000ull
23517 +#define XCT_FUN_LLEN_M 0x0007ffff00000000ull
23518 +#define XCT_FUN_LLEN_S 32ULL
23519 +#define XCT_FUN_LLEN(x) ((((long)(x)) << XCT_FUN_LLEN_S) & \
23521 +#define XCT_FUN_SHL_M 0x00000000f8000000ull
23522 +#define XCT_FUN_SHL_S 27ull
23523 +#define XCT_FUN_SHL(x) ((((long)(x)) << XCT_FUN_SHL_S) & \
23525 +#define XCT_FUN_CHL_M 0x0000000007c00000ull
23526 +#define XCT_FUN_CHL_S 22ull
23527 +#define XCT_FUN_CHL(x) ((((long)(x)) << XCT_FUN_CHL_S) & \
23529 +#define XCT_FUN_HSZ_M 0x00000000003c0000ull
23530 +#define XCT_FUN_HSZ_S 18ull
23531 +#define XCT_FUN_HSZ(x) ((((long)(x)) << XCT_FUN_HSZ_S) & \
23533 +#define XCT_FUN_ALG_DES 0x0000000000000000ull
23534 +#define XCT_FUN_ALG_3DES 0x0000000000008000ull
23535 +#define XCT_FUN_ALG_AES 0x0000000000010000ull
23536 +#define XCT_FUN_ALG_ARC 0x0000000000018000ull
23537 +#define XCT_FUN_ALG_KASUMI 0x0000000000020000ull
23538 +#define XCT_FUN_BCM_ECB 0x0000000000000000ull
23539 +#define XCT_FUN_BCM_CBC 0x0000000000001000ull
23540 +#define XCT_FUN_BCM_CFB 0x0000000000002000ull
23541 +#define XCT_FUN_BCM_OFB 0x0000000000003000ull
23542 +#define XCT_FUN_BCM_CNT 0x0000000000003800ull
23543 +#define XCT_FUN_BCM_KAS_F8 0x0000000000002800ull
23544 +#define XCT_FUN_BCM_KAS_F9 0x0000000000001800ull
23545 +#define XCT_FUN_BCP_NO_PAD 0x0000000000000000ull
23546 +#define XCT_FUN_BCP_ZRO 0x0000000000000200ull
23547 +#define XCT_FUN_BCP_PL 0x0000000000000400ull
23548 +#define XCT_FUN_BCP_INCR 0x0000000000000600ull
23549 +#define XCT_FUN_SIG_MD5 (0ull << 4)
23550 +#define XCT_FUN_SIG_SHA1 (2ull << 4)
23551 +#define XCT_FUN_SIG_HMAC_MD5 (8ull << 4)
23552 +#define XCT_FUN_SIG_HMAC_SHA1 (10ull << 4)
23553 +#define XCT_FUN_A 0x0000000000000008ull
23554 +#define XCT_FUN_C 0x0000000000000004ull
23555 +#define XCT_FUN_AL2 0x0000000000000002ull
23556 +#define XCT_FUN_SE 0x0000000000000001ull
23558 +#define XCT_FUN_SRC_PTR(len, addr) (XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
23559 +#define XCT_FUN_DST_PTR(len, addr) (XCT_FUN_SRC_PTR(len, addr) | \
23560 + 0x8000000000000000ull)
23562 +#define XCT_CTRL_HDR_FUN_NUM_M 0x01c0000000000000ull
23563 +#define XCT_CTRL_HDR_FUN_NUM_S 54
23564 +#define XCT_CTRL_HDR_LEN_M 0x0007ffff00000000ull
23565 +#define XCT_CTRL_HDR_LEN_S 32
23566 +#define XCT_CTRL_HDR_REG_M 0x00000000000000ffull
23567 +#define XCT_CTRL_HDR_REG_S 0
23569 +#define XCT_CTRL_HDR(funcN,len,reg) (0x9400000000000000ull | \
23570 + ((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
23571 + & XCT_CTRL_HDR_FUN_NUM_M) | \
23572 + ((((long)(len)) << \
23573 + XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
23574 + ((((long)(reg)) << \
23575 + XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
23577 +/* Function config command options */
23578 +#define DMA_CALGO_DES 0x00
23579 +#define DMA_CALGO_3DES 0x01
23580 +#define DMA_CALGO_AES 0x02
23581 +#define DMA_CALGO_ARC 0x03
23583 +#define DMA_FN_CIV0 0x02
23584 +#define DMA_FN_CIV1 0x03
23585 +#define DMA_FN_HKEY0 0x0a
23587 +#define XCT_PTR_ADDR_LEN(ptr) ((ptr) & XCT_PTR_ADDR_M), \
23588 + (((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
23590 +#endif /* PASEMI_FNU_H */