Fix remaining bug of the off-by-one error ;)
[openwrt.git] / target / linux / generic-2.6 / files / crypto / ocf / pasemi / pasemi.c
1 /*
2 * Copyright (C) 2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient DMA Crypto Engine
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #ifndef AUTOCONF_INCLUDED
21 #include <linux/config.h>
22 #endif
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/timer.h>
27 #include <linux/random.h>
28 #include <linux/skbuff.h>
29 #include <asm/scatterlist.h>
30 #include <linux/moduleparam.h>
31 #include <linux/pci.h>
32 #include <cryptodev.h>
33 #include <uio.h>
34 #include "pasemi_fnu.h"
35
36 #define DRV_NAME "pasemi"
37
38 #define TIMER_INTERVAL 1000
39
40 static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
41 static struct pasdma_status volatile * dma_status;
42
43 static int debug;
44 module_param(debug, int, 0644);
45 MODULE_PARM_DESC(debug, "Enable debug");
46
47 static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
48 {
49 desc->postop = 0;
50 desc->quad[0] = hdr;
51 desc->quad_cnt = 1;
52 desc->size = 1;
53 }
54
55 static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
56 {
57 desc->quad[desc->quad_cnt++] = val;
58 desc->size = (desc->quad_cnt + 1) / 2;
59 }
60
61 static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
62 {
63 desc->quad[0] |= hdr;
64 }
65
66 static int pasemi_desc_size(struct pasemi_desc *desc)
67 {
68 return desc->size;
69 }
70
71 static void pasemi_ring_add_desc(
72 struct pasemi_fnu_txring *ring,
73 struct pasemi_desc *desc,
74 struct cryptop *crp) {
75 int i;
76 int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
77
78 TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
79 TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
80 TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
81
82 for (i = 0; i < desc->quad_cnt; i += 2) {
83 ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
84 ring->desc[ring_index] = desc->quad[i];
85 ring->desc[ring_index + 1] = desc->quad[i + 1];
86 ring->next_to_fill++;
87 }
88
89 if (desc->quad_cnt & 1)
90 ring->desc[ring_index + 1] = 0;
91 }
92
93 static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
94 {
95 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
96 incr);
97 }
98
99 /*
100 * Generate a new software session.
101 */
102 static int
103 pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
104 {
105 struct cryptoini *c, *encini = NULL, *macini = NULL;
106 struct pasemi_softc *sc = device_get_softc(dev);
107 struct pasemi_session *ses = NULL, **sespp;
108 int sesn, blksz = 0;
109 u64 ccmd = 0;
110 unsigned long flags;
111 struct pasemi_desc init_desc;
112 struct pasemi_fnu_txring *txring;
113
114 DPRINTF("%s()\n", __FUNCTION__);
115 if (sidp == NULL || cri == NULL || sc == NULL) {
116 DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
117 return -EINVAL;
118 }
119 for (c = cri; c != NULL; c = c->cri_next) {
120 if (ALG_IS_SIG(c->cri_alg)) {
121 if (macini)
122 return -EINVAL;
123 macini = c;
124 } else if (ALG_IS_CIPHER(c->cri_alg)) {
125 if (encini)
126 return -EINVAL;
127 encini = c;
128 } else {
129 DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
130 return -EINVAL;
131 }
132 }
133 if (encini == NULL && macini == NULL)
134 return -EINVAL;
135 if (encini) {
136 /* validate key length */
137 switch (encini->cri_alg) {
138 case CRYPTO_DES_CBC:
139 if (encini->cri_klen != 64)
140 return -EINVAL;
141 ccmd = DMA_CALGO_DES;
142 break;
143 case CRYPTO_3DES_CBC:
144 if (encini->cri_klen != 192)
145 return -EINVAL;
146 ccmd = DMA_CALGO_3DES;
147 break;
148 case CRYPTO_AES_CBC:
149 if (encini->cri_klen != 128 &&
150 encini->cri_klen != 192 &&
151 encini->cri_klen != 256)
152 return -EINVAL;
153 ccmd = DMA_CALGO_AES;
154 break;
155 case CRYPTO_ARC4:
156 if (encini->cri_klen != 128)
157 return -EINVAL;
158 ccmd = DMA_CALGO_ARC;
159 break;
160 default:
161 DPRINTF("UNKNOWN encini->cri_alg %d\n",
162 encini->cri_alg);
163 return -EINVAL;
164 }
165 }
166
167 if (macini) {
168 switch (macini->cri_alg) {
169 case CRYPTO_MD5:
170 case CRYPTO_MD5_HMAC:
171 blksz = 16;
172 break;
173 case CRYPTO_SHA1:
174 case CRYPTO_SHA1_HMAC:
175 blksz = 20;
176 break;
177 default:
178 DPRINTF("UNKNOWN macini->cri_alg %d\n",
179 macini->cri_alg);
180 return -EINVAL;
181 }
182 if (((macini->cri_klen + 7) / 8) > blksz) {
183 DPRINTF("key length %d bigger than blksize %d not supported\n",
184 ((macini->cri_klen + 7) / 8), blksz);
185 return -EINVAL;
186 }
187 }
188
189 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
190 if (sc->sc_sessions[sesn] == NULL) {
191 sc->sc_sessions[sesn] = (struct pasemi_session *)
192 kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
193 ses = sc->sc_sessions[sesn];
194 break;
195 } else if (sc->sc_sessions[sesn]->used == 0) {
196 ses = sc->sc_sessions[sesn];
197 break;
198 }
199 }
200
201 if (ses == NULL) {
202 sespp = (struct pasemi_session **)
203 kzalloc(sc->sc_nsessions * 2 *
204 sizeof(struct pasemi_session *), GFP_ATOMIC);
205 if (sespp == NULL)
206 return -ENOMEM;
207 memcpy(sespp, sc->sc_sessions,
208 sc->sc_nsessions * sizeof(struct pasemi_session *));
209 kfree(sc->sc_sessions);
210 sc->sc_sessions = sespp;
211 sesn = sc->sc_nsessions;
212 ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
213 kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
214 if (ses == NULL)
215 return -ENOMEM;
216 sc->sc_nsessions *= 2;
217 }
218
219 ses->used = 1;
220
221 ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
222 sizeof(struct pasemi_session), DMA_TO_DEVICE);
223
224 /* enter the channel scheduler */
225 spin_lock_irqsave(&sc->sc_chnlock, flags);
226
227 /* ARC4 has to be processed by the even channel */
228 if (encini && (encini->cri_alg == CRYPTO_ARC4))
229 ses->chan = sc->sc_lastchn & ~1;
230 else
231 ses->chan = sc->sc_lastchn;
232 sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
233
234 spin_unlock_irqrestore(&sc->sc_chnlock, flags);
235
236 txring = &sc->tx[ses->chan];
237
238 if (encini) {
239 ses->ccmd = ccmd;
240
241 /* get an IV */
242 /* XXX may read fewer than requested */
243 get_random_bytes(ses->civ, sizeof(ses->civ));
244
245 ses->keysz = (encini->cri_klen - 63) / 64;
246 memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
247
248 pasemi_desc_start(&init_desc,
249 XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
250 pasemi_desc_build(&init_desc,
251 XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
252 }
253 if (macini) {
254 if (macini->cri_alg == CRYPTO_MD5_HMAC ||
255 macini->cri_alg == CRYPTO_SHA1_HMAC)
256 memcpy(ses->hkey, macini->cri_key, blksz);
257 else {
258 /* Load initialization constants(RFC 1321, 3174) */
259 ses->hiv[0] = 0x67452301efcdab89ULL;
260 ses->hiv[1] = 0x98badcfe10325476ULL;
261 ses->hiv[2] = 0xc3d2e1f000000000ULL;
262 }
263 ses->hseq = 0ULL;
264 }
265
266 spin_lock_irqsave(&txring->fill_lock, flags);
267
268 if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
269 txring->next_to_clean) > TX_RING_SIZE) {
270 spin_unlock_irqrestore(&txring->fill_lock, flags);
271 return ERESTART;
272 }
273
274 if (encini) {
275 pasemi_ring_add_desc(txring, &init_desc, NULL);
276 pasemi_ring_incr(sc, ses->chan,
277 pasemi_desc_size(&init_desc));
278 }
279
280 txring->sesn = sesn;
281 spin_unlock_irqrestore(&txring->fill_lock, flags);
282
283 *sidp = PASEMI_SID(sesn);
284 return 0;
285 }
286
287 /*
288 * Deallocate a session.
289 */
290 static int
291 pasemi_freesession(device_t dev, u_int64_t tid)
292 {
293 struct pasemi_softc *sc = device_get_softc(dev);
294 int session;
295 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
296
297 DPRINTF("%s()\n", __FUNCTION__);
298
299 if (sc == NULL)
300 return -EINVAL;
301 session = PASEMI_SESSION(sid);
302 if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
303 return -EINVAL;
304
305 pci_unmap_single(sc->dma_pdev,
306 sc->sc_sessions[session]->dma_addr,
307 sizeof(struct pasemi_session), DMA_TO_DEVICE);
308 memset(sc->sc_sessions[session], 0,
309 sizeof(struct pasemi_session));
310
311 return 0;
312 }
313
314 static int
315 pasemi_process(device_t dev, struct cryptop *crp, int hint)
316 {
317
318 int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
319 struct pasemi_softc *sc = device_get_softc(dev);
320 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
321 caddr_t ivp;
322 struct pasemi_desc init_desc, work_desc;
323 struct pasemi_session *ses;
324 struct sk_buff *skb;
325 struct uio *uiop;
326 unsigned long flags;
327 struct pasemi_fnu_txring *txring;
328
329 DPRINTF("%s()\n", __FUNCTION__);
330
331 if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
332 return -EINVAL;
333
334 crp->crp_etype = 0;
335 if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
336 return -EINVAL;
337
338 ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
339
340 crd1 = crp->crp_desc;
341 if (crd1 == NULL) {
342 err = -EINVAL;
343 goto errout;
344 }
345 crd2 = crd1->crd_next;
346
347 if (ALG_IS_SIG(crd1->crd_alg)) {
348 maccrd = crd1;
349 if (crd2 == NULL)
350 enccrd = NULL;
351 else if (ALG_IS_CIPHER(crd2->crd_alg) &&
352 (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
353 enccrd = crd2;
354 else
355 goto erralg;
356 } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
357 enccrd = crd1;
358 if (crd2 == NULL)
359 maccrd = NULL;
360 else if (ALG_IS_SIG(crd2->crd_alg) &&
361 (crd1->crd_flags & CRD_F_ENCRYPT))
362 maccrd = crd2;
363 else
364 goto erralg;
365 } else
366 goto erralg;
367
368 chsel = ses->chan;
369
370 txring = &sc->tx[chsel];
371
372 if (enccrd && !maccrd) {
373 if (enccrd->crd_alg == CRYPTO_ARC4)
374 reinit = 1;
375 reinit_size = 0x40;
376 srclen = crp->crp_ilen;
377
378 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
379 | XCT_FUN_FUN(chsel));
380 if (enccrd->crd_flags & CRD_F_ENCRYPT)
381 pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
382 else
383 pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
384 } else if (enccrd && maccrd) {
385 if (enccrd->crd_alg == CRYPTO_ARC4)
386 reinit = 1;
387 reinit_size = 0x68;
388
389 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
390 /* Encrypt -> Authenticate */
391 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
392 | XCT_FUN_A | XCT_FUN_FUN(chsel));
393 srclen = maccrd->crd_skip + maccrd->crd_len;
394 } else {
395 /* Authenticate -> Decrypt */
396 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
397 | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
398 pasemi_desc_build(&work_desc, 0);
399 pasemi_desc_build(&work_desc, 0);
400 pasemi_desc_build(&work_desc, 0);
401 work_desc.postop = PASEMI_CHECK_SIG;
402 srclen = crp->crp_ilen;
403 }
404
405 pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
406 pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
407 } else if (!enccrd && maccrd) {
408 srclen = maccrd->crd_len;
409
410 pasemi_desc_start(&init_desc,
411 XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
412 pasemi_desc_build(&init_desc,
413 XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
414
415 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
416 | XCT_FUN_A | XCT_FUN_FUN(chsel));
417 }
418
419 if (enccrd) {
420 switch (enccrd->crd_alg) {
421 case CRYPTO_3DES_CBC:
422 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
423 XCT_FUN_BCM_CBC);
424 ivsize = sizeof(u64);
425 break;
426 case CRYPTO_DES_CBC:
427 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
428 XCT_FUN_BCM_CBC);
429 ivsize = sizeof(u64);
430 break;
431 case CRYPTO_AES_CBC:
432 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
433 XCT_FUN_BCM_CBC);
434 ivsize = 2 * sizeof(u64);
435 break;
436 case CRYPTO_ARC4:
437 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
438 ivsize = 0;
439 break;
440 default:
441 printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
442 enccrd->crd_alg);
443 err = -EINVAL;
444 goto errout;
445 }
446
447 ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
448 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
449 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
450 memcpy(ivp, enccrd->crd_iv, ivsize);
451 /* If IV is not present in the buffer already, it has to be copied there */
452 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
453 crypto_copyback(crp->crp_flags, crp->crp_buf,
454 enccrd->crd_inject, ivsize, ivp);
455 } else {
456 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
457 /* IV is provided expicitly in descriptor */
458 memcpy(ivp, enccrd->crd_iv, ivsize);
459 else
460 /* IV is provided in the packet */
461 crypto_copydata(crp->crp_flags, crp->crp_buf,
462 enccrd->crd_inject, ivsize,
463 ivp);
464 }
465 }
466
467 if (maccrd) {
468 switch (maccrd->crd_alg) {
469 case CRYPTO_MD5:
470 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
471 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
472 break;
473 case CRYPTO_SHA1:
474 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
475 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
476 break;
477 case CRYPTO_MD5_HMAC:
478 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
479 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
480 break;
481 case CRYPTO_SHA1_HMAC:
482 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
483 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
484 break;
485 default:
486 printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
487 maccrd->crd_alg);
488 err = -EINVAL;
489 goto errout;
490 }
491 }
492
493 if (crp->crp_flags & CRYPTO_F_SKBUF) {
494 /* using SKB buffers */
495 skb = (struct sk_buff *)crp->crp_buf;
496 if (skb_shinfo(skb)->nr_frags) {
497 printk(DRV_NAME ": skb frags unimplemented\n");
498 err = -EINVAL;
499 goto errout;
500 }
501 pasemi_desc_build(
502 &work_desc,
503 XCT_FUN_DST_PTR(skb->len, pci_map_single(
504 sc->dma_pdev, skb->data,
505 skb->len, DMA_TO_DEVICE)));
506 pasemi_desc_build(
507 &work_desc,
508 XCT_FUN_SRC_PTR(
509 srclen, pci_map_single(
510 sc->dma_pdev, skb->data,
511 srclen, DMA_TO_DEVICE)));
512 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
513 } else if (crp->crp_flags & CRYPTO_F_IOV) {
514 /* using IOV buffers */
515 uiop = (struct uio *)crp->crp_buf;
516 if (uiop->uio_iovcnt > 1) {
517 printk(DRV_NAME ": iov frags unimplemented\n");
518 err = -EINVAL;
519 goto errout;
520 }
521
522 /* crp_olen is never set; always use crp_ilen */
523 pasemi_desc_build(
524 &work_desc,
525 XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
526 sc->dma_pdev,
527 uiop->uio_iov->iov_base,
528 crp->crp_ilen, DMA_TO_DEVICE)));
529 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
530
531 pasemi_desc_build(
532 &work_desc,
533 XCT_FUN_SRC_PTR(srclen, pci_map_single(
534 sc->dma_pdev,
535 uiop->uio_iov->iov_base,
536 srclen, DMA_TO_DEVICE)));
537 } else {
538 /* using contig buffers */
539 pasemi_desc_build(
540 &work_desc,
541 XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
542 sc->dma_pdev,
543 crp->crp_buf,
544 crp->crp_ilen, DMA_TO_DEVICE)));
545 pasemi_desc_build(
546 &work_desc,
547 XCT_FUN_SRC_PTR(srclen, pci_map_single(
548 sc->dma_pdev,
549 crp->crp_buf, srclen,
550 DMA_TO_DEVICE)));
551 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
552 }
553
554 spin_lock_irqsave(&txring->fill_lock, flags);
555
556 if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
557 txring->sesn = PASEMI_SESSION(crp->crp_sid);
558 reinit = 1;
559 }
560
561 if (enccrd) {
562 pasemi_desc_start(&init_desc,
563 XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
564 pasemi_desc_build(&init_desc,
565 XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
566 }
567
568 if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
569 pasemi_desc_size(&work_desc)) -
570 txring->next_to_clean) > TX_RING_SIZE) {
571 spin_unlock_irqrestore(&txring->fill_lock, flags);
572 err = ERESTART;
573 goto errout;
574 }
575
576 pasemi_ring_add_desc(txring, &init_desc, NULL);
577 pasemi_ring_add_desc(txring, &work_desc, crp);
578
579 pasemi_ring_incr(sc, chsel,
580 pasemi_desc_size(&init_desc) +
581 pasemi_desc_size(&work_desc));
582
583 spin_unlock_irqrestore(&txring->fill_lock, flags);
584
585 mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
586
587 return 0;
588
589 erralg:
590 printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
591 crd1->crd_alg, crd2->crd_alg);
592 err = -EINVAL;
593
594 errout:
595 if (err != ERESTART) {
596 crp->crp_etype = err;
597 crypto_done(crp);
598 }
599 return err;
600 }
601
602 static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
603 {
604 int i, j, ring_idx;
605 struct pasemi_fnu_txring *ring = &sc->tx[chan];
606 u16 delta_cnt;
607 int flags, loops = 10;
608 int desc_size;
609 struct cryptop *crp;
610
611 spin_lock_irqsave(&ring->clean_lock, flags);
612
613 while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
614 & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
615 && loops--) {
616
617 for (i = 0; i < delta_cnt; i++) {
618 desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
619 crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
620 if (crp) {
621 ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
622 if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
623 /* Need to make sure signature matched,
624 * if not - return error */
625 if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
626 crp->crp_etype = -EINVAL;
627 }
628 crypto_done(TX_DESC_INFO(ring,
629 ring->next_to_clean).cf_crp);
630 TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
631 pci_unmap_single(
632 sc->dma_pdev,
633 XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
634 PCI_DMA_TODEVICE);
635
636 ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
637
638 ring->next_to_clean++;
639 for (j = 1; j < desc_size; j++) {
640 ring_idx = 2 *
641 (ring->next_to_clean &
642 (TX_RING_SIZE-1));
643 pci_unmap_single(
644 sc->dma_pdev,
645 XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
646 PCI_DMA_TODEVICE);
647 if (ring->desc[ring_idx + 1])
648 pci_unmap_single(
649 sc->dma_pdev,
650 XCT_PTR_ADDR_LEN(
651 ring->desc[
652 ring_idx + 1]),
653 PCI_DMA_TODEVICE);
654 ring->desc[ring_idx] =
655 ring->desc[ring_idx + 1] = 0;
656 ring->next_to_clean++;
657 }
658 } else {
659 for (j = 0; j < desc_size; j++) {
660 ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
661 ring->desc[ring_idx] =
662 ring->desc[ring_idx + 1] = 0;
663 ring->next_to_clean++;
664 }
665 }
666 }
667
668 ring->total_pktcnt += delta_cnt;
669 }
670 spin_unlock_irqrestore(&ring->clean_lock, flags);
671
672 return 0;
673 }
674
675 static void sweepup_tx(struct pasemi_softc *sc)
676 {
677 int i;
678
679 for (i = 0; i < sc->sc_num_channels; i++)
680 pasemi_clean_tx(sc, i);
681 }
682
683 static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
684 {
685 struct pasemi_softc *sc = arg;
686 unsigned int reg;
687 int chan = irq - sc->base_irq;
688 int chan_index = sc->base_chan + chan;
689 u64 stat = dma_status->tx_sta[chan_index];
690
691 DPRINTF("%s()\n", __FUNCTION__);
692
693 if (!(stat & PAS_STATUS_CAUSE_M))
694 return IRQ_NONE;
695
696 pasemi_clean_tx(sc, chan);
697
698 stat = dma_status->tx_sta[chan_index];
699
700 reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
701 PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
702
703 if (stat & PAS_STATUS_SOFT)
704 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
705
706 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
707
708
709 return IRQ_HANDLED;
710 }
711
712 static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
713 {
714 u32 val;
715 int chan_index = chan + sc->base_chan;
716 int ret;
717 struct pasemi_fnu_txring *ring;
718
719 ring = &sc->tx[chan];
720
721 spin_lock_init(&ring->fill_lock);
722 spin_lock_init(&ring->clean_lock);
723
724 ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
725 TX_RING_SIZE, GFP_KERNEL);
726 if (!ring->desc_info)
727 return -ENOMEM;
728
729 /* Allocate descriptors */
730 ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
731 TX_RING_SIZE *
732 2 * sizeof(u64),
733 &ring->dma, GFP_KERNEL);
734 if (!ring->desc)
735 return -ENOMEM;
736
737 memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
738
739 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
740
741 ring->total_pktcnt = 0;
742
743 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
744 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
745
746 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
747 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
748
749 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
750
751 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
752 PAS_DMA_TXCHAN_CFG_TY_FUNC |
753 PAS_DMA_TXCHAN_CFG_TATTR(chan) |
754 PAS_DMA_TXCHAN_CFG_WT(2));
755
756 /* enable tx channel */
757 out_le32(sc->dma_regs +
758 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
759 PAS_DMA_TXCHAN_TCMDSTA_EN);
760
761 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
762 PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
763
764 ring->next_to_fill = 0;
765 ring->next_to_clean = 0;
766
767 snprintf(ring->irq_name, sizeof(ring->irq_name),
768 "%s%d", "crypto", chan);
769
770 ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
771 ret = request_irq(ring->irq, (irq_handler_t)
772 pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
773 if (ret) {
774 printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
775 ring->irq, ret);
776 ring->irq = -1;
777 return ret;
778 }
779
780 setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
781
782 return 0;
783 }
784
785 static device_method_t pasemi_methods = {
786 /* crypto device methods */
787 DEVMETHOD(cryptodev_newsession, pasemi_newsession),
788 DEVMETHOD(cryptodev_freesession, pasemi_freesession),
789 DEVMETHOD(cryptodev_process, pasemi_process),
790 };
791
792 /* Set up the crypto device structure, private data,
793 * and anything else we need before we start */
794
795 static int __devinit
796 pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
797 {
798 struct pasemi_softc *sc;
799 int ret, i;
800
801 DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
802
803 sc = kzalloc(sizeof(*sc), GFP_KERNEL);
804 if (!sc)
805 return -ENOMEM;
806
807 softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
808
809 pci_set_drvdata(pdev, sc);
810
811 spin_lock_init(&sc->sc_chnlock);
812
813 sc->sc_sessions = (struct pasemi_session **)
814 kzalloc(PASEMI_INITIAL_SESSIONS *
815 sizeof(struct pasemi_session *), GFP_ATOMIC);
816 if (sc->sc_sessions == NULL) {
817 ret = -ENOMEM;
818 goto out;
819 }
820
821 sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
822 sc->sc_lastchn = 0;
823 sc->base_irq = pdev->irq + 6;
824 sc->base_chan = 6;
825 sc->sc_cid = -1;
826 sc->dma_pdev = pdev;
827
828 sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
829 if (!sc->iob_pdev) {
830 dev_err(&pdev->dev, "Can't find I/O Bridge\n");
831 ret = -ENODEV;
832 goto out;
833 }
834
835 /* This is hardcoded and ugly, but we have some firmware versions
836 * who don't provide the register space in the device tree. Luckily
837 * they are at well-known locations so we can just do the math here.
838 */
839 sc->dma_regs =
840 ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
841 sc->iob_regs =
842 ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
843 if (!sc->dma_regs || !sc->iob_regs) {
844 dev_err(&pdev->dev, "Can't map registers\n");
845 ret = -ENODEV;
846 goto out;
847 }
848
849 dma_status = __ioremap(0xfd800000, 0x1000, 0);
850 if (!dma_status) {
851 ret = -ENODEV;
852 dev_err(&pdev->dev, "Can't map dmastatus space\n");
853 goto out;
854 }
855
856 sc->tx = (struct pasemi_fnu_txring *)
857 kzalloc(sizeof(struct pasemi_fnu_txring)
858 * 8, GFP_KERNEL);
859 if (!sc->tx) {
860 ret = -ENOMEM;
861 goto out;
862 }
863
864 /* Initialize the h/w */
865 out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
866 (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
867 PAS_DMA_COM_CFG_FWF));
868 out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
869
870 for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
871 sc->sc_num_channels++;
872 ret = pasemi_dma_setup_tx_resources(sc, i);
873 if (ret)
874 goto out;
875 }
876
877 sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
878 CRYPTOCAP_F_HARDWARE);
879 if (sc->sc_cid < 0) {
880 printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
881 ret = -ENXIO;
882 goto out;
883 }
884
885 /* register algorithms with the framework */
886 printk(DRV_NAME ":");
887
888 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
889 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
890 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
891 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
892 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
893 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
894 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
895 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
896
897 return 0;
898
899 out:
900 pasemi_dma_remove(pdev);
901 return ret;
902 }
903
904 #define MAX_RETRIES 5000
905
906 static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
907 {
908 struct pasemi_fnu_txring *ring = &sc->tx[chan];
909 int chan_index = chan + sc->base_chan;
910 int retries;
911 u32 stat;
912
913 /* Stop the channel */
914 out_le32(sc->dma_regs +
915 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
916 PAS_DMA_TXCHAN_TCMDSTA_ST);
917
918 for (retries = 0; retries < MAX_RETRIES; retries++) {
919 stat = in_le32(sc->dma_regs +
920 PAS_DMA_TXCHAN_TCMDSTA(chan_index));
921 if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
922 break;
923 cond_resched();
924 }
925
926 if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
927 dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
928 chan_index);
929
930 /* Disable the channel */
931 out_le32(sc->dma_regs +
932 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
933 0);
934
935 if (ring->desc_info)
936 kfree((void *) ring->desc_info);
937 if (ring->desc)
938 dma_free_coherent(&sc->dma_pdev->dev,
939 TX_RING_SIZE *
940 2 * sizeof(u64),
941 (void *) ring->desc, ring->dma);
942 if (ring->irq != -1)
943 free_irq(ring->irq, sc);
944
945 del_timer(&ring->crypto_timer);
946 }
947
948 static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
949 {
950 struct pasemi_softc *sc = pci_get_drvdata(pdev);
951 int i;
952
953 DPRINTF("%s()\n", __FUNCTION__);
954
955 if (sc->sc_cid >= 0) {
956 crypto_unregister_all(sc->sc_cid);
957 }
958
959 if (sc->tx) {
960 for (i = 0; i < sc->sc_num_channels; i++)
961 pasemi_free_tx_resources(sc, i);
962
963 kfree(sc->tx);
964 }
965 if (sc->sc_sessions) {
966 for (i = 0; i < sc->sc_nsessions; i++)
967 kfree(sc->sc_sessions[i]);
968 kfree(sc->sc_sessions);
969 }
970 if (sc->iob_pdev)
971 pci_dev_put(sc->iob_pdev);
972 if (sc->dma_regs)
973 iounmap(sc->dma_regs);
974 if (sc->iob_regs)
975 iounmap(sc->iob_regs);
976 kfree(sc);
977 }
978
979 static struct pci_device_id pasemi_dma_pci_tbl[] = {
980 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
981 };
982
983 MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
984
985 static struct pci_driver pasemi_dma_driver = {
986 .name = "pasemi_dma",
987 .id_table = pasemi_dma_pci_tbl,
988 .probe = pasemi_dma_probe,
989 .remove = __devexit_p(pasemi_dma_remove),
990 };
991
992 static void __exit pasemi_dma_cleanup_module(void)
993 {
994 pci_unregister_driver(&pasemi_dma_driver);
995 __iounmap(dma_status);
996 dma_status = NULL;
997 }
998
999 int pasemi_dma_init_module(void)
1000 {
1001 return pci_register_driver(&pasemi_dma_driver);
1002 }
1003
1004 module_init(pasemi_dma_init_module);
1005 module_exit(pasemi_dma_cleanup_module);
1006
1007 MODULE_LICENSE("Dual BSD/GPL");
1008 MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
1009 MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
This page took 0.105631 seconds and 5 git commands to generate.