#include <linux/version.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
-#include <generated/autoconf.h>
-#else
-#include <linux/autoconf.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
#endif
#include <linux/module.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,4)
+#include <linux/kthread.h>
+#endif
#include <cryptodev.h>
/*
* have one per-queue but having one simplifies handling of block/unblock
* operations.
*/
-static int crp_sleep = 0;
-static LIST_HEAD(crp_q); /* request queues */
-static LIST_HEAD(crp_kq);
+static LIST_HEAD(crp_q); /* crypto request queue */
+static LIST_HEAD(crp_kq); /* asym request queue */
static spinlock_t crypto_q_lock;
static struct kmem_cache *cryptodesc_zone;
#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
-#include <linux/sched.h>
-#define kill_proc(p,s,v) send_sig(s,find_task_by_vpid(p),0)
-#endif
-
#define debug crypto_debug
int crypto_debug = 0;
module_param(crypto_debug, int, 0644);
MODULE_PARM_DESC(crypto_max_loopcount,
"Maximum number of crypto ops to do before yielding to other processes");
-static pid_t cryptoproc = (pid_t) -1;
-static struct completion cryptoproc_exited;
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif
+
+static struct task_struct *cryptoproc[CONFIG_NR_CPUS];
+static struct task_struct *cryptoretproc[CONFIG_NR_CPUS];
static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
-static pid_t cryptoretproc = (pid_t) -1;
-static struct completion cryptoretproc_exited;
static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
static int crypto_proc(void *arg);
return 1;
}
+
/*
* Select a driver for a new session that supports the specified
* algorithms and, optionally, is constrained according to the flags.
cap->cc_unkqblocked = 0;
crypto_all_kqblocked = 0;
}
- if (crp_sleep)
- wake_up_interruptible(&cryptoproc_wait);
+ wake_up_interruptible(&cryptoproc_wait);
err = 0;
} else
err = EINVAL;
CRYPTO_Q_LOCK();
if (crypto_q_cnt >= crypto_q_max) {
- CRYPTO_Q_UNLOCK();
cryptostats.cs_drops++;
+ CRYPTO_Q_UNLOCK();
return ENOMEM;
}
crypto_q_cnt++;
TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
result = 0;
}
- if (crp_sleep)
- wake_up_interruptible(&cryptoproc_wait);
+ wake_up_interruptible(&cryptoproc_wait);
CRYPTO_Q_UNLOCK();
return result;
}
if (error == ERESTART) {
CRYPTO_Q_LOCK();
TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
- if (crp_sleep)
- wake_up_interruptible(&cryptoproc_wait);
+ wake_up_interruptible(&cryptoproc_wait);
CRYPTO_Q_UNLOCK();
error = 0;
}
* Normal case; queue the callback for the thread.
*/
CRYPTO_RETQ_LOCK();
- if (CRYPTO_RETQ_EMPTY())
- wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
+ wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
CRYPTO_RETQ_UNLOCK();
}
* Normal case; queue the callback for the thread.
*/
CRYPTO_RETQ_LOCK();
- if (CRYPTO_RETQ_EMPTY())
- wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
+ wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
CRYPTO_RETQ_UNLOCK();
}
unsigned long q_flags;
int loopcount = 0;
- ocf_daemonize("crypto");
+ set_current_state(TASK_INTERRUPTIBLE);
CRYPTO_Q_LOCK();
for (;;) {
list_empty(&crp_kq), crypto_all_kqblocked);
loopcount = 0;
CRYPTO_Q_UNLOCK();
- crp_sleep = 1;
wait_event_interruptible(cryptoproc_wait,
!(list_empty(&crp_q) || crypto_all_qblocked) ||
!(list_empty(&crp_kq) || crypto_all_kqblocked) ||
- cryptoproc == (pid_t) -1);
- crp_sleep = 0;
+ kthread_should_stop());
if (signal_pending (current)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
spin_lock_irq(¤t->sigmask_lock);
}
CRYPTO_Q_LOCK();
dprintk("%s - awake\n", __FUNCTION__);
- if (cryptoproc == (pid_t) -1)
+ if (kthread_should_stop())
break;
cryptostats.cs_intrs++;
} else if (loopcount > crypto_max_loopcount) {
* been using the CPU exclusively for a while.
*/
loopcount = 0;
+ CRYPTO_Q_UNLOCK();
schedule();
+ CRYPTO_Q_LOCK();
}
loopcount++;
}
CRYPTO_Q_UNLOCK();
- complete_and_exit(&cryptoproc_exited, 0);
+ return 0;
}
/*
struct cryptkop *krpt;
unsigned long r_flags;
- ocf_daemonize("crypto_ret");
+ set_current_state(TASK_INTERRUPTIBLE);
CRYPTO_RETQ_LOCK();
for (;;) {
dprintk("%s - sleeping\n", __FUNCTION__);
CRYPTO_RETQ_UNLOCK();
wait_event_interruptible(cryptoretproc_wait,
- cryptoretproc == (pid_t) -1 ||
!list_empty(&crp_ret_q) ||
- !list_empty(&crp_ret_kq));
+ !list_empty(&crp_ret_kq) ||
+ kthread_should_stop());
if (signal_pending (current)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
spin_lock_irq(¤t->sigmask_lock);
}
CRYPTO_RETQ_LOCK();
dprintk("%s - awake\n", __FUNCTION__);
- if (cryptoretproc == (pid_t) -1) {
+ if (kthread_should_stop()) {
dprintk("%s - EXITING!\n", __FUNCTION__);
break;
}
}
}
CRYPTO_RETQ_UNLOCK();
- complete_and_exit(&cryptoretproc_exited, 0);
+ return 0;
}
crypto_init(void)
{
int error;
+ unsigned long cpu;
dprintk("%s(%p)\n", __FUNCTION__, (void *) crypto_init);
memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
- init_completion(&cryptoproc_exited);
- init_completion(&cryptoretproc_exited);
-
- cryptoproc = 0; /* to avoid race condition where proc runs first */
- cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
- if (cryptoproc < 0) {
- error = cryptoproc;
- printk("crypto: crypto_init cannot start crypto thread; error %d",
- error);
- goto bad;
- }
-
- cryptoretproc = 0; /* to avoid race condition where proc runs first */
- cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
- if (cryptoretproc < 0) {
- error = cryptoretproc;
- printk("crypto: crypto_init cannot start cryptoret thread; error %d",
+ ocf_for_each_cpu(cpu) {
+ cryptoproc[cpu] = kthread_create(crypto_proc, (void *) cpu,
+ "ocf_%d", (int) cpu);
+ if (IS_ERR(cryptoproc[cpu])) {
+ error = PTR_ERR(cryptoproc[cpu]);
+ printk("crypto: crypto_init cannot start crypto thread; error %d",
error);
- goto bad;
+ goto bad;
+ }
+ kthread_bind(cryptoproc[cpu], cpu);
+ wake_up_process(cryptoproc[cpu]);
+
+ cryptoretproc[cpu] = kthread_create(crypto_ret_proc, (void *) cpu,
+ "ocf_ret_%d", (int) cpu);
+ if (IS_ERR(cryptoretproc[cpu])) {
+ error = PTR_ERR(cryptoretproc[cpu]);
+ printk("crypto: crypto_init cannot start cryptoret thread; error %d",
+ error);
+ goto bad;
+ }
+ kthread_bind(cryptoretproc[cpu], cpu);
+ wake_up_process(cryptoretproc[cpu]);
}
return 0;
static void
crypto_exit(void)
{
- pid_t p;
- unsigned long d_flags;
+ int cpu;
dprintk("%s()\n", __FUNCTION__);
/*
* Terminate any crypto threads.
*/
-
- CRYPTO_DRIVER_LOCK();
- p = cryptoproc;
- cryptoproc = (pid_t) -1;
- kill_proc(p, SIGTERM, 1);
- wake_up_interruptible(&cryptoproc_wait);
- CRYPTO_DRIVER_UNLOCK();
-
- wait_for_completion(&cryptoproc_exited);
-
- CRYPTO_DRIVER_LOCK();
- p = cryptoretproc;
- cryptoretproc = (pid_t) -1;
- kill_proc(p, SIGTERM, 1);
- wake_up_interruptible(&cryptoretproc_wait);
- CRYPTO_DRIVER_UNLOCK();
-
- wait_for_completion(&cryptoretproc_exited);
-
- /* XXX flush queues??? */
+ ocf_for_each_cpu(cpu) {
+ kthread_stop(cryptoproc[cpu]);
+ kthread_stop(cryptoretproc[cpu]);
+ }
/*
* Reclaim dynamically allocated resources.