ramips: Allow ethernet interface to be taken down and up again
[openwrt.git] / target / linux / ep93xx / patches-2.6.30 / 005-ep93xx-dma.patch
1 --- /dev/null
2 +++ b/arch/arm/mach-ep93xx/dma_ep93xx.c
3 @@ -0,0 +1,2940 @@
4 +/******************************************************************************
5 + * arch/arm/mach-ep9312/dma_ep93xx.c
6 + *
7 + * Support functions for the ep93xx internal DMA channels.
8 + * (see also Documentation/arm/ep93xx/dma.txt)
9 + *
10 + * Copyright (C) 2003 Cirrus Logic
11 + *
12 + * A large portion of this file is based on the dma api implemented by
13 + * Nicolas Pitre, dma-sa1100.c, copyrighted 2000.
14 + *
15 + *
16 + * This program is free software; you can redistribute it and/or modify
17 + * it under the terms of the GNU General Public License as published by
18 + * the Free Software Foundation; either version 2 of the License, or
19 + * (at your option) any later version.
20 + *
21 + * This program is distributed in the hope that it will be useful,
22 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 + * GNU General Public License for more details.
25 + *
26 + * You should have received a copy of the GNU General Public License
27 + * along with this program; if not, write to the Free Software
28 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 + *
30 + ****************************************************************************/
31 +#include <linux/autoconf.h>
32 +#include <linux/module.h>
33 +#include <linux/init.h>
34 +#include <linux/sched.h>
35 +#include <linux/spinlock.h>
36 +#include <linux/slab.h>
37 +#include <linux/errno.h>
38 +#include <linux/delay.h>
39 +#include <linux/interrupt.h>
40 +
41 +#include <asm/system.h>
42 +#include <asm/irq.h>
43 +#include <asm/hardware.h>
44 +#include <asm/io.h>
45 +#include <asm/dma.h>
46 +#include <asm/mach/dma.h>
47 +#include "dma_ep93xx.h"
48 +
49 +/*****************************************************************************
50 + *
51 + * Debugging macros
52 + *
53 + ****************************************************************************/
54 +#undef DEBUG
55 +//#define DEBUG 1
56 +#ifdef DEBUG
57 +#define DPRINTK( fmt, arg... ) printk( fmt, ##arg )
58 +#else
59 +#define DPRINTK( fmt, arg... )
60 +#endif
61 +
62 +/*****************************************************************************
63 + *
64 + * static global variables
65 + *
66 + ****************************************************************************/
67 +ep93xx_dma_t dma_chan[MAX_EP93XX_DMA_CHANNELS];
68 +
69 +/*
70 + * lock used to protect the list of dma channels while searching for a free
71 + * channel during dma_request.
72 + */
73 +//static spinlock_t dma_list_lock;
74 +static spinlock_t dma_list_lock = SPIN_LOCK_UNLOCKED;
75 +
76 +/*****************************************************************************
77 + *
78 + * Internal DMA processing functions.
79 + *
80 + ****************************************************************************/
81 +/*****************************************************************************
82 + *
83 + * get_dma_channel_from_handle()
84 + *
85 + * If Handle is valid, returns the DMA channel # (0 to 9 for channels 1-10)
86 + * If Handle is not valid, returns -1.
87 + *
88 + ****************************************************************************/
89 +static int
90 +dma_get_channel_from_handle(int handle)
91 +{
92 + int channel;
93 +
94 + /*
95 + * Get the DMA channel # from the handle.
96 + */
97 + channel = ((int)handle & DMA_HANDLE_SPECIFIER_MASK) >> 28;
98 +
99 + /*
100 + * See if this is a valid handle.
101 + */
102 + if (dma_chan[channel].last_valid_handle != (int)handle) {
103 + DPRINTK("DMA ERROR - invalid handle 0x%x \n", handle);
104 + return(-1);
105 + }
106 +
107 + /*
108 + * See if this instance is still open
109 + */
110 + if (!dma_chan[channel].ref_count )
111 + return(-1);
112 +
113 + return(channel);
114 +}
115 +
116 +static void dma_m2m_transfer_done(ep93xx_dma_t *dma)
117 +{
118 + unsigned int uiCONTROL;
119 + unsigned int M2M_reg_base = dma->reg_base;
120 + unsigned int read_back;
121 +
122 + DPRINTK("1 ");
123 +
124 + outl( 0, M2M_reg_base+M2M_OFFSET_INTERRUPT );
125 +
126 + if (dma->total_buffers) {
127 + /*
128 + * The current_buffer has already been tranfered, so add the
129 + * byte count to the total_bytes field.
130 + */
131 + dma->total_bytes = dma->total_bytes +
132 + dma->buffer_queue[dma->current_buffer].size;
133 +
134 + /*
135 + * Mark the current_buffer as used.
136 + */
137 + dma->buffer_queue[dma->current_buffer].used = TRUE;
138 +
139 + /*
140 + * Increment the used buffer counter
141 + */
142 + dma->used_buffers++;
143 +
144 + DPRINTK("#%d", dma->current_buffer);
145 +
146 + /*
147 + * Increment the current_buffer
148 + */
149 + dma->current_buffer = (dma->current_buffer + 1) %
150 + MAX_EP93XX_DMA_BUFFERS;
151 +
152 + /*
153 + * check if there's a new buffer to transfer.
154 + */
155 + if (dma->new_buffers && dma->xfer_enable) {
156 + /*
157 + * We have a new buffer to transfer so program in the
158 + * buffer values. Since a STALL interrupt was
159 + * triggered, we program the buffer descriptor 0
160 + *
161 + * Set the SAR_BASE/DAR_BASE/BCR registers with values
162 + * from the next buffer in the queue.
163 + */
164 + outl( dma->buffer_queue[dma->current_buffer].source,
165 + M2M_reg_base + M2M_OFFSET_SAR_BASE0 );
166 +
167 + outl( dma->buffer_queue[dma->current_buffer].dest,
168 + M2M_reg_base + M2M_OFFSET_DAR_BASE0 );
169 +
170 + outl( dma->buffer_queue[dma->current_buffer].size,
171 + M2M_reg_base + M2M_OFFSET_BCR0 );
172 +
173 + DPRINTK("SAR_BASE0 - 0x%x\n", dma->buffer_queue[dma->current_buffer].source);
174 + DPRINTK("DAR_BASE0 - 0x%x\n", dma->buffer_queue[dma->current_buffer].dest);
175 + DPRINTK("BCR0 - 0x%x\n", dma->buffer_queue[dma->current_buffer].size);
176 +
177 + /*
178 + * Decrement the new buffer counter
179 + */
180 + dma->new_buffers--;
181 +
182 + /*
183 + * If there's a second new buffer, we program the
184 + * second buffer descriptor.
185 + */
186 + if (dma->new_buffers) {
187 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
188 + MAX_EP93XX_DMA_BUFFERS].source,
189 + M2M_reg_base+M2M_OFFSET_SAR_BASE1 );
190 +
191 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
192 + MAX_EP93XX_DMA_BUFFERS].dest,
193 + M2M_reg_base+M2M_OFFSET_DAR_BASE1 );
194 +
195 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
196 + MAX_EP93XX_DMA_BUFFERS].size,
197 + M2M_reg_base+M2M_OFFSET_BCR1 );
198 +
199 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
200 + uiCONTROL |= CONTROL_M2M_NFBINTEN;
201 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
202 +
203 + dma->new_buffers--;
204 + }
205 + } else {
206 + DPRINTK("2 \n");
207 + /*
208 + * There's a chance we setup both buffer descriptors,
209 + * but didn't service the NFB quickly enough, causing
210 + * the channel to transfer both buffers, then enter the
211 + * stall state. So, we need to be able to process the
212 + * second buffer.
213 + */
214 + if ((dma->used_buffers + dma->new_buffers) < dma->total_buffers)
215 + {
216 + DPRINTK("3 ");
217 +
218 + /*
219 + * The current_buffer has already been
220 + * tranferred, so add the byte count to the
221 + * total_bytes field.
222 + */
223 + dma->total_bytes = dma->total_bytes +
224 + dma->buffer_queue[dma->current_buffer].size;
225 +
226 + /*
227 + * Mark the current_buffer as used.
228 + */
229 + dma->buffer_queue[dma->current_buffer].used = TRUE;
230 +
231 + /*
232 + * Increment the used buffer counter
233 + */
234 + dma->used_buffers++;
235 +
236 + DPRINTK("#%d", dma->current_buffer);
237 +
238 + /*
239 + * Increment the current buffer pointer.
240 + */
241 + dma->current_buffer = (dma->current_buffer + 1) %
242 + MAX_EP93XX_DMA_BUFFERS;
243 +
244 + }
245 +
246 + /*
247 + * No new buffers to transfer, so disable the channel.
248 + */
249 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
250 + uiCONTROL &= ~CONTROL_M2M_ENABLE;
251 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
252 +
253 + /*
254 + * Indicate that this channel is in the pause by
255 + * starvation state by setting the pause bit to true.
256 + */
257 + dma->pause = TRUE;
258 + }
259 + } else {
260 + /*
261 + * No buffers to transfer, or old buffers to mark as used,
262 + * so disable the channel
263 + */
264 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
265 + uiCONTROL &= ~CONTROL_M2M_ENABLE;
266 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
267 +
268 + /*
269 + * Must read the control register back after a write.
270 + */
271 + read_back = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
272 +
273 + /*
274 + * Indicate that this channel is in the pause by
275 + * starvation state by setting the pause bit to true.
276 + */
277 + dma->pause = TRUE;
278 + }
279 +}
280 +
281 +static void dma_m2m_next_frame_buffer(ep93xx_dma_t *dma)
282 +{
283 + int loop;
284 + unsigned int uiCONTROL;
285 + unsigned int M2M_reg_base = dma->reg_base;
286 +
287 + DPRINTK("5 ");
288 +
289 + if (dma->total_buffers) {
290 + DPRINTK("6 ");
291 + /*
292 + * The iCurrentBuffer has already been transfered. so add the
293 + * byte count from the current buffer to the total byte count.
294 + */
295 + dma->total_bytes = dma->total_bytes +
296 + dma->buffer_queue[dma->current_buffer].size;
297 +
298 + /*
299 + * Mark the Current Buffer as used.
300 + */
301 + dma->buffer_queue[dma->current_buffer].used = TRUE;
302 +
303 + /*
304 + * Increment the used buffer counter
305 + */
306 + dma->used_buffers++;
307 +
308 + DPRINTK("#%d", dma->current_buffer);
309 +
310 + if ((dma->buffer_queue[
311 + (dma->current_buffer + 1) % MAX_EP93XX_DMA_BUFFERS].last) ||
312 + (dma->new_buffers == 0) || (dma->xfer_enable == FALSE)) {
313 + DPRINTK("7 ");
314 +
315 + /*
316 + * This is the last Buffer in this transaction, so
317 + * disable the NFB interrupt. We shouldn't get an NFB
318 + * int when the FSM moves to the ON state where it
319 + * would typically get the NFB int indicating a new
320 + * buffer can be programmed. Instead, once in the ON
321 + * state, the DMA will just proceed to complete the
322 + * transfer of the current buffer, move the FSB
323 + * directly to the STALL state where a STALL interrupt
324 + * will be generated.
325 + */
326 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
327 + uiCONTROL &= ~CONTROL_M2M_NFBINTEN ;
328 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
329 +
330 + /*
331 + * The current buffer has been transferred, so
332 + * increment the current buffer counter to reflect
333 + * this.
334 + */
335 + dma->current_buffer = (dma->current_buffer + 1) %
336 + MAX_EP93XX_DMA_BUFFERS;
337 +
338 + DPRINTK("End of NFB handling. \n");
339 + DPRINTK("CONTROL - 0x%x \n",
340 + inl(M2M_reg_base+M2M_OFFSET_CONTROL) );
341 + DPRINTK("STATUS - 0x%x \n",
342 + inl(M2M_reg_base+M2M_OFFSET_STATUS) );
343 + DPRINTK("SAR_BASE0 - 0x%x \n",
344 + inl(M2M_reg_base+M2M_OFFSET_SAR_BASE0) );
345 + DPRINTK("SAR_CUR0 - 0x%x \n",
346 + inl(M2M_reg_base+M2M_OFFSET_SAR_CURRENT0) );
347 + DPRINTK("DAR_BASE0 - 0x%x \n",
348 + inl(M2M_reg_base+M2M_OFFSET_DAR_BASE0) );
349 + DPRINTK("DAR_CUR0 - 0x%x \n",
350 + inl(M2M_reg_base+M2M_OFFSET_DAR_CURRENT0) );
351 +
352 + DPRINTK("Buffer buf_id source size last used \n");
353 + for (loop = 0; loop < 32; loop ++)
354 + DPRINTK("%d 0x%x 0x%x 0x%x %d %d \n",
355 + loop, dma->buffer_queue[loop].buf_id,
356 + dma->buffer_queue[loop].source,
357 + dma->buffer_queue[loop].size,
358 + dma->buffer_queue[loop].last,
359 + dma->buffer_queue[loop].used);
360 + DPRINTK("pause 0x%x 0x%x 0x%x %d %d \n",
361 + dma->pause_buf.buf_id, dma->pause_buf.source,
362 + dma->pause_buf.size, dma->pause_buf.last,
363 + dma->pause_buf.used);
364 +
365 + DPRINTK("Pause - %d \n", dma->pause);
366 + DPRINTK("xfer_enable - %d \n", dma->xfer_enable);
367 + DPRINTK("total bytes - 0x%x \n", dma->total_bytes);
368 + DPRINTK("total buffer - %d \n", dma->total_buffers);
369 + DPRINTK("new buffers - %d \n", dma->new_buffers);
370 + DPRINTK("current buffer - %d \n", dma->current_buffer);
371 + DPRINTK("last buffer - %d \n", dma->last_buffer);
372 + DPRINTK("used buffers - %d \n", dma->used_buffers);
373 + DPRINTK("callback addr - 0x%p \n", dma->callback);
374 +
375 + } else if (dma->new_buffers) {
376 + DPRINTK("8 ");
377 + /*
378 + * We have a new buffer, so increment the current
379 + * buffer to point to the next buffer, which is already
380 + * programmed into the DMA. Next time around, it'll be
381 + * pointing to the current buffer.
382 + */
383 + dma->current_buffer = (dma->current_buffer + 1) %
384 + MAX_EP93XX_DMA_BUFFERS;
385 +
386 + /*
387 + * We know we have a new buffer to program as the next
388 + * buffer, so check which set of SAR_BASE/DAR_BASE/BCR
389 + * registers to program.
390 + */
391 + if ( inl(M2M_reg_base+M2M_OFFSET_STATUS) & STATUS_M2M_NB ) {
392 + /*
393 + * Set the SAR_BASE1/DAR_BASE1/BCR1 registers
394 + * with values from the next buffer in the
395 + * queue.
396 + */
397 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
398 + MAX_EP93XX_DMA_BUFFERS].source,
399 + M2M_reg_base+M2M_OFFSET_SAR_BASE1 );
400 +
401 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
402 + MAX_EP93XX_DMA_BUFFERS].dest,
403 + M2M_reg_base+M2M_OFFSET_DAR_BASE1 );
404 +
405 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
406 + MAX_EP93XX_DMA_BUFFERS].size,
407 + M2M_reg_base+M2M_OFFSET_BCR1 );
408 + } else {
409 + /*
410 + * Set the SAR_BASE0/DAR_BASE0/BCR0 registers
411 + * with values from the next buffer in the
412 + * queue.
413 + */
414 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
415 + MAX_EP93XX_DMA_BUFFERS].source,
416 + M2M_reg_base+M2M_OFFSET_SAR_BASE0 );
417 +
418 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
419 + MAX_EP93XX_DMA_BUFFERS].dest,
420 + M2M_reg_base+M2M_OFFSET_DAR_BASE0 );
421 +
422 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
423 + MAX_EP93XX_DMA_BUFFERS].size,
424 + M2M_reg_base+M2M_OFFSET_BCR0 );
425 + }
426 +
427 + /*
428 + * Decrement the new buffers counter
429 + */
430 + dma->new_buffers--;
431 + }
432 + } else {
433 + /*
434 + * Total number of buffers is 0 - really we should never get
435 + * here, but just in case.
436 + */
437 + DPRINTK("9 \n");
438 +
439 + /*
440 + * No new buffers to transfer, so Disable the channel
441 + */
442 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
443 + uiCONTROL &= ~CONTROL_M2M_ENABLE;
444 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
445 +
446 + /*
447 + * Indicate that the channel is paused by starvation.
448 + */
449 + dma->pause = 1;
450 + }
451 +}
452 +
453 +/*****************************************************************************
454 + *
455 + * dma_m2m_irq_handler
456 + *
457 + ****************************************************************************/
458 +static irqreturn_t
459 +dma_m2m_irq_handler(int irq, void *dev_id)
460 +{
461 + ep93xx_dma_t *dma = (ep93xx_dma_t *)dev_id;
462 + unsigned int M2M_reg_base = dma->reg_base;
463 + ep93xx_dma_dev_t dma_int = UNDEF_INT;
464 + int status;
465 +
466 +// printk("+m2m irq=%d\n", irq);
467 +
468 + /*
469 + * Determine what kind of dma interrupt this is.
470 + */
471 + status = inl(M2M_reg_base + M2M_OFFSET_INTERRUPT);
472 + if ( status & INTERRUPT_M2M_DONEINT )
473 + dma_int = DONE; // we're done with a requested dma
474 + else if ( status & INTERRUPT_M2M_NFBINT )
475 + dma_int = NFB; // we're done with one dma buffer
476 +
477 + DPRINTK("IRQ: b=%#x st=%#x\n", (int)dma->current_buffer, dma_int);
478 +
479 + switch (dma_int) {
480 + /*
481 + * Next Frame Buffer Interrupt. If there's a new buffer program it
482 + * Check if this is the last buffer in the transfer,
483 + * and if it is, disable the NFB int to prevent being
484 + * interrupted for another buffer when we know there won't be
485 + * another.
486 + */
487 + case NFB:
488 + dma_m2m_next_frame_buffer(dma);
489 + break;
490 + /*
491 + * Done interrupt generated, indicating that the transfer is complete.
492 + */
493 + case DONE:
494 + dma_m2m_transfer_done(dma);
495 + break;
496 +
497 + default:
498 + break;
499 + }
500 +
501 + if ((dma_int != UNDEF_INT) && dma->callback)
502 + dma->callback(dma_int, dma->device, dma->user_data);
503 +
504 + return IRQ_HANDLED;
505 +}
506 +
507 +/*****************************************************************************
508 + *
509 + * dma_m2p_irq_handler
510 + *
511 + *
512 + *
513 + ****************************************************************************/
514 +static irqreturn_t
515 +dma_m2p_irq_handler(int irq, void *dev_id)
516 +{
517 + ep93xx_dma_t *dma = (ep93xx_dma_t *) dev_id;
518 + unsigned int M2P_reg_base = dma->reg_base;
519 + unsigned int read_back;
520 + ep93xx_dma_dev_t dma_int = UNDEF_INT;
521 + unsigned int loop, uiCONTROL, uiINTERRUPT;
522 +
523 + /*
524 + * Determine what kind of dma interrupt this is.
525 + */
526 + if ( inl(M2P_reg_base+M2P_OFFSET_INTERRUPT) & INTERRUPT_M2P_STALLINT )
527 + dma_int = STALL;
528 + else if ( inl(M2P_reg_base+M2P_OFFSET_INTERRUPT) & INTERRUPT_M2P_NFBINT )
529 + dma_int = NFB;
530 + else if ( inl(M2P_reg_base+M2P_OFFSET_INTERRUPT) & INTERRUPT_M2P_CHERRORINT )
531 + dma_int = CHERROR;
532 +
533 + /*
534 + * Stall Interrupt: The Channel is stalled, meaning nothing is
535 + * programmed to transfer right now. So, we're back to the
536 + * beginnning. If there's a buffer to transfer, program it into
537 + * max and base 0 registers.
538 + */
539 + if (dma_int == STALL) {
540 + DPRINTK("1 ");
541 +
542 + if (dma->total_buffers) {
543 + /*
544 + * The current_buffer has already been tranfered, so
545 + * add the byte count to the total_bytes field.
546 + */
547 + dma->total_bytes = dma->total_bytes +
548 + dma->buffer_queue[dma->current_buffer].size;
549 +
550 + /*
551 + * Mark the current_buffer as used.
552 + */
553 + dma->buffer_queue[dma->current_buffer].used = TRUE;
554 +
555 + /*
556 + * Increment the used buffer counter
557 + */
558 + dma->used_buffers++;
559 +
560 + DPRINTK("#%d", dma->current_buffer);
561 +
562 + /*
563 + * Increment the current_buffer
564 + */
565 + dma->current_buffer = (dma->current_buffer + 1) %
566 + MAX_EP93XX_DMA_BUFFERS;
567 +
568 + /*
569 + * check if there's a new buffer to transfer.
570 + */
571 + if (dma->new_buffers && dma->xfer_enable) {
572 + /*
573 + * We have a new buffer to transfer so program
574 + * in the buffer values. Since a STALL
575 + * interrupt was triggered, we program the
576 + * base0 and maxcnt0
577 + *
578 + * Set the MAXCNT0 register with the buffer
579 + * size
580 + */
581 + outl( dma->buffer_queue[dma->current_buffer].size,
582 + M2P_reg_base+M2P_OFFSET_MAXCNT0 );
583 +
584 + /*
585 + * Set the BASE0 register with the buffer base
586 + * address
587 + */
588 + outl( dma->buffer_queue[dma->current_buffer].source,
589 + M2P_reg_base+M2P_OFFSET_BASE0 );
590 +
591 + /*
592 + * Decrement the new buffer counter
593 + */
594 + dma->new_buffers--;
595 +
596 + if (dma->new_buffers) {
597 + DPRINTK("A ");
598 + /*
599 + * Set the MAXCNT1 register with the
600 + * buffer size
601 + */
602 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
603 + MAX_EP93XX_DMA_BUFFERS].size,
604 + M2P_reg_base+M2P_OFFSET_MAXCNT1 );
605 +
606 + /*
607 + * Set the BASE1 register with the
608 + * buffer base address
609 + */
610 + outl( dma->buffer_queue[dma->current_buffer + 1 %
611 + MAX_EP93XX_DMA_BUFFERS].source,
612 + M2P_reg_base+M2P_OFFSET_BASE1 );
613 +
614 + /*
615 + * Decrement the new buffer counter
616 + */
617 + dma->new_buffers--;
618 +
619 + /*
620 + * Enable the NFB Interrupt.
621 + */
622 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
623 + uiCONTROL |= CONTROL_M2P_NFBINTEN;
624 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
625 + }
626 + } else {
627 + /*
628 + * No new buffers.
629 + */
630 + DPRINTK("2 \n");
631 +
632 + /*
633 + * There's a chance we setup both buffer descriptors, but
634 + * didn't service the NFB quickly enough, causing the channel
635 + * to transfer both buffers, then enter the stall state.
636 + * So, we need to be able to process the second buffer.
637 + */
638 + if ((dma->used_buffers + dma->new_buffers) < dma->total_buffers) {
639 + DPRINTK("3 ");
640 +
641 + /*
642 + * The current_buffer has already been tranfered, so add the
643 + * byte count to the total_bytes field.
644 + */
645 + dma->total_bytes = dma->total_bytes +
646 + dma->buffer_queue[dma->current_buffer].size;
647 +
648 + /*
649 + * Mark the current_buffer as used.
650 + */
651 + dma->buffer_queue[dma->current_buffer].used = TRUE;
652 +
653 + /*
654 + * Increment the used buffer counter
655 + */
656 + dma->used_buffers++;
657 +
658 + DPRINTK("#%d", dma->current_buffer);
659 +
660 + /*
661 + * Increment the current buffer pointer.
662 + */
663 + dma->current_buffer = (dma->current_buffer + 1) %
664 + MAX_EP93XX_DMA_BUFFERS;
665 +
666 + }
667 +
668 + /*
669 + * No new buffers to transfer, so disable the channel.
670 + */
671 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
672 + uiCONTROL &= ~CONTROL_M2P_ENABLE;
673 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
674 +
675 + /*
676 + * Indicate that this channel is in the pause by starvation
677 + * state by setting the pause bit to true.
678 + */
679 + dma->pause = TRUE;
680 +
681 + DPRINTK("STATUS - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_STATUS) );
682 + DPRINTK("CONTROL - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CONTROL) );
683 + DPRINTK("REMAIN - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_REMAIN) );
684 + DPRINTK("PPALLOC - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_PPALLOC) );
685 + DPRINTK("BASE0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_BASE0) );
686 + DPRINTK("MAXCNT0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_MAXCNT0) );
687 + DPRINTK("CURRENT0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT0) );
688 + DPRINTK("BASE1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_BASE1) );
689 + DPRINTK("MAXCNT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_MAXCNT1) );
690 + DPRINTK("CURRENT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT1) );
691 +
692 + DPRINTK("Buffer buf_id source size last used \n");
693 + for (loop = 0; loop < 32; loop ++)
694 + DPRINTK("%d 0x%x 0x%x 0x%x %d %d \n",
695 + loop, dma->buffer_queue[loop].buf_id, dma->buffer_queue[loop].source,
696 + dma->buffer_queue[loop].size,
697 + dma->buffer_queue[loop].last, dma->buffer_queue[loop].used);
698 + DPRINTK("pause 0x%x 0x%x 0x%x %d %d \n",
699 + dma->pause_buf.buf_id, dma->pause_buf.source, dma->pause_buf.size,
700 + dma->pause_buf.last, dma->pause_buf.used);
701 +
702 + DPRINTK("Pause - %d \n", dma->pause);
703 + DPRINTK("xfer_enable - %d \n", dma->xfer_enable);
704 + DPRINTK("total bytes - 0x%x \n", dma->total_bytes);
705 + DPRINTK("total buffer - %d \n", dma->total_buffers);
706 + DPRINTK("new buffers - %d \n", dma->new_buffers);
707 + DPRINTK("current buffer - %d \n", dma->current_buffer);
708 + DPRINTK("last buffer - %d \n", dma->last_buffer);
709 + DPRINTK("used buffers - %d \n", dma->used_buffers);
710 + DPRINTK("callback addr - 0x%p \n", dma->callback);
711 + }
712 + } else {
713 + /*
714 + * No buffers to transfer, or old buffers to mark as used,
715 + * so Disable the channel
716 + */
717 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
718 + uiCONTROL &= ~CONTROL_M2P_ENABLE;
719 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
720 +
721 + /*
722 + * Must read the control register back after a write.
723 + */
724 + read_back = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
725 +
726 + /*
727 + * Indicate that this channel is in the pause by
728 + * starvation state by setting the pause bit to true.
729 + */
730 + dma->pause = TRUE;
731 + }
732 + }
733 +
734 + /*
735 + * Next Frame Buffer Interrupt. If there's a new buffer program it
736 + * Check if this is the last buffer in the transfer,
737 + * and if it is, disable the NFB int to prevent being
738 + * interrupted for another buffer when we know there won't be
739 + * another.
740 + */
741 + if (dma_int == NFB) {
742 + DPRINTK("5 ");
743 +
744 + if (dma->total_buffers) {
745 + DPRINTK("6 ");
746 + /*
747 + * The iCurrentBuffer has already been transfered. so add the
748 + * byte count from the current buffer to the total byte count.
749 + */
750 + dma->total_bytes = dma->total_bytes +
751 + dma->buffer_queue[dma->current_buffer].size;
752 +
753 + /*
754 + * Mark the Current Buffer as used.
755 + */
756 + dma->buffer_queue[dma->current_buffer].used = TRUE;
757 +
758 + /*
759 + * Increment the used buffer counter
760 + */
761 + dma->used_buffers++;
762 +
763 + DPRINTK("#%d", dma->current_buffer);
764 +
765 + if ((dma->buffer_queue[
766 + (dma->current_buffer + 1) % MAX_EP93XX_DMA_BUFFERS].last) ||
767 + (dma->new_buffers == 0) || (dma->xfer_enable == FALSE)) {
768 + DPRINTK("7 ");
769 +
770 + /*
771 + * This is the last Buffer in this transaction, so disable
772 + * the NFB interrupt. We shouldn't get an NFB int when the
773 + * FSM moves to the ON state where it would typically get the
774 + * NFB int indicating a new buffer can be programmed.
775 + * Instead, once in the ON state, the DMA will just proceed
776 + * to complet the transfer of the current buffer, move the
777 + * FSB directly to the STALL state where a STALL interrupt
778 + * will be generated.
779 + */
780 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
781 + uiCONTROL &= ~CONTROL_M2P_NFBINTEN;
782 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
783 +
784 + /*
785 + * The current buffer has been transferred, so increment
786 + * the current buffer counter to reflect this.
787 + */
788 + dma->current_buffer = (dma->current_buffer + 1) % MAX_EP93XX_DMA_BUFFERS;
789 +
790 + DPRINTK("End of NFB handling. \n");
791 + DPRINTK("STATUS - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_STATUS) );
792 + DPRINTK("CONTROL - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CONTROL) );
793 + DPRINTK("REMAIN - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_REMAIN) );
794 + DPRINTK("PPALLOC - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_PPALLOC) );
795 + DPRINTK("BASE0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_BASE0) );
796 + DPRINTK("MAXCNT0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_MAXCNT0) );
797 + DPRINTK("CURRENT0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT0) );
798 + DPRINTK("BASE1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_BASE1) );
799 + DPRINTK("MAXCNT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_MAXCNT1) );
800 + DPRINTK("CURRENT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT1) );
801 +
802 + DPRINTK("Buffer buf_id source size last used \n");
803 + for (loop = 0; loop < 32; loop ++)
804 + DPRINTK("%d 0x%x 0x%x 0x%x %d %d \n",
805 + loop, dma->buffer_queue[loop].buf_id, dma->buffer_queue[loop].source,
806 + dma->buffer_queue[loop].size,
807 + dma->buffer_queue[loop].last, dma->buffer_queue[loop].used);
808 + DPRINTK("pause 0x%x 0x%x 0x%x %d %d \n",
809 + dma->pause_buf.buf_id, dma->pause_buf.source, dma->pause_buf.size,
810 + dma->pause_buf.last, dma->pause_buf.used);
811 +
812 + DPRINTK("Pause - %d \n", dma->pause);
813 + DPRINTK("xfer_enable - %d \n", dma->xfer_enable);
814 + DPRINTK("total bytes - 0x%x \n", dma->total_bytes);
815 + DPRINTK("total buffer - %d \n", dma->total_buffers);
816 + DPRINTK("new buffers - %d \n", dma->new_buffers);
817 + DPRINTK("current buffer - %d \n", dma->current_buffer);
818 + DPRINTK("last buffer - %d \n", dma->last_buffer);
819 + DPRINTK("used buffers - %d \n", dma->used_buffers);
820 + DPRINTK("callback addr - 0x%p \n", dma->callback);
821 +
822 + } else if (dma->new_buffers) {
823 + DPRINTK("8 ");
824 + /*
825 + * we have a new buffer, so increment the current buffer to
826 + * point to the next buffer, which is already programmed into
827 + * the DMA. Next time around, it'll be pointing to the
828 + * current buffer.
829 + */
830 + dma->current_buffer = (dma->current_buffer + 1) % MAX_EP93XX_DMA_BUFFERS;
831 +
832 + /*
833 + * we know we have a new buffer to program as the next
834 + * buffer, so check which set of MAXCNT and BASE registers
835 + * to program.
836 + */
837 + if ( inl(M2P_reg_base+M2P_OFFSET_STATUS) & STATUS_M2P_NEXTBUFFER ) {
838 + /*
839 + * Set the MAXCNT1 register with the buffer size
840 + */
841 + outl( dma->buffer_queue[
842 + (dma->current_buffer + 1) % MAX_EP93XX_DMA_BUFFERS].size,
843 + M2P_reg_base+M2P_OFFSET_MAXCNT1 );
844 +
845 + /*
846 + * Set the BASE1 register with the buffer base address
847 + */
848 + outl( dma->buffer_queue[
849 + (dma->current_buffer + 1) % MAX_EP93XX_DMA_BUFFERS].source,
850 + M2P_reg_base+M2P_OFFSET_BASE1 );
851 + } else {
852 + /*
853 + * Set the MAXCNT0 register with the buffer size
854 + */
855 + outl( dma->buffer_queue[
856 + (dma->current_buffer + 1) % MAX_EP93XX_DMA_BUFFERS].size,
857 + M2P_reg_base+M2P_OFFSET_MAXCNT0 );
858 +
859 + /*
860 + * Set the BASE0 register with the buffer base address
861 + */
862 + outl( dma->buffer_queue[
863 + (dma->current_buffer + 1) % MAX_EP93XX_DMA_BUFFERS].source,
864 + M2P_reg_base+M2P_OFFSET_BASE0 );
865 + }
866 +
867 + /*
868 + * Decrement the new buffers counter
869 + */
870 + dma->new_buffers--;
871 + }
872 + } else {
873 + /*
874 + * Total number of buffers is 0 - really we should never get here,
875 + * but just in case.
876 + */
877 + DPRINTK("9 \n");
878 +
879 + /*
880 + * No new buffers to transfer, so Disable the channel
881 + */
882 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
883 + uiCONTROL &= ~CONTROL_M2P_ENABLE;
884 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
885 + }
886 + }
887 +
888 + /*
889 + * Channel Error Interrupt, or perhipheral interrupt, specific to the
890 + * memory to/from peripheral channels.
891 + */
892 + if (dma_int == CHERROR) {
893 + /*
894 + * just clear the interrupt, it's really up to the peripheral
895 + * driver to determine if any further action is necessary.
896 + */
897 + uiINTERRUPT = inl(M2P_reg_base+M2P_OFFSET_INTERRUPT);
898 + uiINTERRUPT &= ~INTERRUPT_M2P_CHERRORINT;
899 + outl( uiINTERRUPT, M2P_reg_base+M2P_OFFSET_INTERRUPT );
900 + }
901 +
902 + /*
903 + * Make sure the interrupt was valid, and if it was, then check
904 + * if a callback function was installed for this DMA channel. If a
905 + * callback was installed call it.
906 + */
907 + if ((dma_int != UNDEF_INT) && dma->callback)
908 + dma->callback(dma_int, dma->device, dma->user_data);
909 +
910 + return IRQ_HANDLED;
911 +}
912 +
913 +/*****************************************************************************
914 + *
915 + * ep9312_dma_open_m2p(int device)
916 + *
917 + * Description: This function will attempt to open a M2P/P2M DMA channel.
918 + * If the open is successful, the channel number is returned,
919 + * otherwise a negative number is returned.
920 + *
921 + * Parameters:
922 + * device: device for which the dma channel is requested.
923 + *
924 + ****************************************************************************/
925 +static int
926 +dma_open_m2p(int device)
927 +{
928 + int channel = -1;
929 + unsigned int loop;
930 + unsigned int M2P_reg_base;
931 + unsigned int uiPWRCNT;
932 + /*unsigned long flags;*/
933 +
934 + DPRINTK("DMA Open M2P with hw dev %d\n", device);
935 +
936 + /*
937 + * Lock the dma channel list.
938 + */
939 + //spin_lock_irqsave(&dma_list_lock, flags);
940 + spin_lock(&dma_list_lock);
941 +
942 + /*
943 + * Verify that the device requesting DMA isn't already using a DMA channel
944 + */
945 + if (device >= 10)
946 + loop = 1; // Rx transfer requested
947 + else
948 + loop = 0; // Tx transfer requested
949 +
950 + for (; loop < 10; loop = loop + 2)
951 + /*
952 + * Before checking for a matching device, check that the
953 + * channel is in use, otherwise the device field is
954 + * invalid.
955 + */
956 + if (dma_chan[loop].ref_count)
957 + if (device == dma_chan[loop].device) {
958 + DPRINTK("DMA Open M2P - Error\n");
959 + return(-1);
960 + }
961 +
962 + /*
963 + * Get a DMA channel instance for the given hardware device.
964 + * If this is a TX look for even numbered channels, else look for
965 + * odd numbered channels
966 + */
967 + if (device >= 10)
968 + loop = 1; /* Rx transfer requested */
969 + else
970 + loop = 0; /* Tx transfer requested */
971 +
972 + for (; loop < 10; loop = loop + 2)
973 + if (!dma_chan[loop].ref_count) {
974 + /*
975 + * Capture the channel and increment the reference count.
976 + */
977 + channel = loop;
978 + dma_chan[channel].ref_count++;
979 + break;
980 + }
981 +
982 + /*
983 + * Unlock the dma channel list.
984 + */
985 + //spin_unlock_irqrestore(&dma_list_lock, flags);
986 + spin_unlock(&dma_list_lock);
987 + /*
988 + * See if we got a valid channel.
989 + */
990 + if (channel < 0)
991 + return(-1);
992 +
993 + /*
994 + * Point regs to the correct dma channel register base.
995 + */
996 + M2P_reg_base = dma_chan[channel].reg_base;
997 +
998 + /*
999 + * Turn on the clock for the specified DMA channel
1000 + * TODO: need to use the correct register name for the
1001 + * power control register.
1002 + */
1003 + uiPWRCNT = inl(/*SYSCON_PWRCNT*/EP93XX_SYSCON_CLOCK_CONTROL);
1004 + switch (channel) {
1005 + case 0:
1006 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH0;
1007 + break;
1008 +
1009 + case 1:
1010 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH1;
1011 + break;
1012 +
1013 + case 2:
1014 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH2;
1015 + break;
1016 +
1017 + case 3:
1018 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH3;
1019 + break;
1020 +
1021 + case 4:
1022 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH4;
1023 + break;
1024 +
1025 + case 5:
1026 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH5;
1027 + break;
1028 +
1029 + case 6:
1030 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH6;
1031 + break;
1032 +
1033 + case 7:
1034 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH7;
1035 + break;
1036 +
1037 + case 8:
1038 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH8;
1039 + break;
1040 +
1041 + case 9:
1042 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2PCH9;
1043 + break;
1044 +
1045 + default:
1046 + return(-1);
1047 + }
1048 + outl( uiPWRCNT, /*SYSCON_PWRCNT*/EP93XX_SYSCON_CLOCK_CONTROL );
1049 +
1050 + /*
1051 + * Clear out the control register before any further setup.
1052 + */
1053 + outl( 0, M2P_reg_base+M2P_OFFSET_CONTROL );
1054 +
1055 + /*
1056 + * Setup the peripheral port value in the DMA channel registers.
1057 + */
1058 + if (device < 10)
1059 + outl( (unsigned int)device, M2P_reg_base+M2P_OFFSET_PPALLOC );
1060 + else
1061 + outl( (unsigned int)(device - 10), M2P_reg_base+M2P_OFFSET_PPALLOC );
1062 +
1063 + /*
1064 + * Let's hold on to the value of the Hw device for comparison later.
1065 + */
1066 + dma_chan[channel].device = device;
1067 +
1068 + /*
1069 + * Success.
1070 + */
1071 + return(channel);
1072 +}
1073 +
1074 +/*****************************************************************************
1075 + *
1076 + * dma_open_m2m(int device)
1077 + *
1078 + * Description: This function will attempt to open a M2M DMA channel.
1079 + * If the open is successful, the channel number is returned,
1080 + * otherwise a negative number is returned.
1081 + *
1082 + * Parameters:
1083 + * device: device for which the dma channel is requested.
1084 + *
1085 + ****************************************************************************/
1086 +static int
1087 +dma_open_m2m(int device)
1088 +{
1089 + int channel = -1;
1090 + unsigned int loop;
1091 + unsigned int M2M_reg_base;
1092 + unsigned int uiPWRCNT, uiCONTROL;
1093 + /*unsigned long flags;*/
1094 +
1095 + DPRINTK("DMA Open M2M with hw dev %d\n", device);
1096 +
1097 + /*
1098 + * Lock the dma channel list.
1099 + */
1100 + //spin_lock_irqsave(&dma_list_lock, flags);
1101 + spin_lock(&dma_list_lock);
1102 +
1103 +
1104 + /*
1105 + * Check if this device is already allocated a channel.
1106 + * TODO: can one M2M device be allocated multiple channels?
1107 + */
1108 + for (loop = 10; loop < 12; loop++)
1109 + /*
1110 + * Before checking for a matching device, check that the
1111 + * channel is in use, otherwise the device field is
1112 + * invalid.
1113 + */
1114 + if (dma_chan[loop].ref_count)
1115 + if (device == dma_chan[loop].device) {
1116 + DPRINTK("Error - dma_open_m2m - already allocated channel\n");
1117 +
1118 + /*
1119 + * Unlock the dma channel list.
1120 + */
1121 + //spin_unlock_irqrestore(&dma_list_lock, flags);
1122 + spin_unlock(&dma_list_lock);
1123 + /*
1124 + * Fail.
1125 + */
1126 + return(-1);
1127 + }
1128 +
1129 + /*
1130 + * Get a DMA channel instance for the given hardware device.
1131 + */
1132 + for (loop = 10; loop < 12; loop++)
1133 + if (!dma_chan[loop].ref_count) {
1134 + /*
1135 + * Capture the channel and increment the reference count.
1136 + */
1137 + channel = loop;
1138 + dma_chan[channel].ref_count++;
1139 + break;
1140 + }
1141 +
1142 + /*
1143 + * Unlock the dma channel list.
1144 + */
1145 + //spin_unlock(dma_list_lock);
1146 + spin_unlock(&dma_list_lock);
1147 + //spin_unlock_irqrestore(&dma_list_lock, flags);
1148 +
1149 + /*
1150 + * See if we got a valid channel.
1151 + */
1152 + if (channel < 0)
1153 + return(-1);
1154 +
1155 + /*
1156 + * Point regs to the correct dma channel register base.
1157 + */
1158 + M2M_reg_base = dma_chan[channel].reg_base;
1159 +
1160 + /*
1161 + * Turn on the clock for the specified DMA channel
1162 + * TODO: need to use the correct register name for the
1163 + * power control register.
1164 + */
1165 + uiPWRCNT = inl(/*SYSCON_PWRCNT*/EP93XX_SYSCON_CLOCK_CONTROL);
1166 + switch (channel) {
1167 + case 10:
1168 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2MCH0;
1169 + break;
1170 +
1171 + case 11:
1172 + uiPWRCNT |= SYSCON_PWRCNT_DMA_M2MCH1;
1173 + break;
1174 +
1175 + default:
1176 + return(-1);
1177 + }
1178 + outl( uiPWRCNT, /*SYSCON_PWRCNT*/EP93XX_SYSCON_CLOCK_CONTROL);
1179 +
1180 + DPRINTK("DMA Open - power control: 0x%x \n", inl(SYSCON_PWRCNT) );
1181 +
1182 + /*
1183 + * Clear out the control register before any further setup.
1184 + */
1185 + outl( 0, M2M_reg_base+M2M_OFFSET_CONTROL );
1186 +
1187 + /*
1188 + * Setup the transfer mode and the request source selection within
1189 + * the DMA M2M channel registers.
1190 + */
1191 + switch (device) {
1192 + case DMA_MEMORY:
1193 + /*
1194 + * Clear TM field, set RSS field to 0
1195 + */
1196 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1197 + uiCONTROL &= ~(CONTROL_M2M_TM_MASK | CONTROL_M2M_RSS_MASK);
1198 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1199 + break;
1200 +
1201 + case DMA_IDE:
1202 + /*
1203 + * Set RSS field to 3, Set NO_HDSK, Set PW field to 1
1204 + */
1205 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1206 + uiCONTROL &= ~(CONTROL_M2M_RSS_MASK|CONTROL_M2M_PW_MASK);
1207 + uiCONTROL |= (3<<CONTROL_M2M_RSS_SHIFT) |
1208 + CONTROL_M2M_NO_HDSK |
1209 + (2<<CONTROL_M2M_PW_SHIFT);
1210 +
1211 + uiCONTROL &= ~(CONTROL_M2M_ETDP_MASK);
1212 + uiCONTROL &= ~(CONTROL_M2M_DACKP);
1213 + uiCONTROL &= ~(CONTROL_M2M_DREQP_MASK);
1214 +
1215 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1216 + inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1217 + break;
1218 +
1219 + case DMARx_SSP:
1220 + /*
1221 + * Set RSS field to 1, Set NO_HDSK, Set TM field to 2
1222 + */
1223 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1224 + uiCONTROL &= ~(CONTROL_M2M_RSS_MASK|CONTROL_M2M_TM_MASK);
1225 + uiCONTROL |= (1<<CONTROL_M2M_RSS_SHIFT) |
1226 + CONTROL_M2M_NO_HDSK |
1227 + (2<<CONTROL_M2M_TM_SHIFT);
1228 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1229 + break;
1230 +
1231 + case DMATx_SSP:
1232 + /*
1233 + * Set RSS field to 2, Set NO_HDSK, Set TM field to 1
1234 + */
1235 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1236 + uiCONTROL &= ~(CONTROL_M2M_RSS_MASK|CONTROL_M2M_TM_MASK);
1237 + uiCONTROL |= (2<<CONTROL_M2M_RSS_SHIFT) |
1238 + CONTROL_M2M_NO_HDSK |
1239 + (1<<CONTROL_M2M_TM_SHIFT);
1240 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1241 + break;
1242 +
1243 + case DMATx_EXT_DREQ:
1244 + /*
1245 + * Set TM field to 2, set RSS field to 0
1246 + */
1247 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1248 + uiCONTROL &= ~(CONTROL_M2M_RSS_MASK|CONTROL_M2M_TM_MASK);
1249 + uiCONTROL |= 1<<CONTROL_M2M_TM_SHIFT;
1250 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1251 + break;
1252 +
1253 + case DMARx_EXT_DREQ:
1254 + /*
1255 + * Set TM field to 2, set RSS field to 0
1256 + */
1257 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1258 + uiCONTROL &= ~(CONTROL_M2M_RSS_MASK|CONTROL_M2M_TM_MASK);
1259 + uiCONTROL |= 2<<CONTROL_M2M_TM_SHIFT;
1260 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1261 + break;
1262 +
1263 + default:
1264 + return -1;
1265 + }
1266 +
1267 + /*
1268 + * Let's hold on to the value of the Hw device for comparison later.
1269 + */
1270 + dma_chan[channel].device = device;
1271 +
1272 + /*
1273 + * Success.
1274 + */
1275 + return(channel);
1276 +}
1277 +
1278 +/*****************************************************************************
1279 + *
1280 + * int dma_config_m2m(ep93xx_dma_t * dma, unsigned int flags_m2m,
1281 + * dma_callback callback, unsigned int user_data)
1282 + *
1283 + * Description: Configure the DMA channel and install a callback function.
1284 + * This function will have to be called for every transfer
1285 + *
1286 + * dma: Pointer to the dma instance data for the M2M channel to
1287 + * configure.
1288 + * flags_m2m Flags used to configure an M2M dma channel and determine
1289 + * if a callback function and user_data information are included
1290 + * in this call.
1291 + * callback function pointer which is called near the end of the
1292 + * dma channel's irq handler.
1293 + * user_data defined by the calling driver.
1294 + *
1295 + ****************************************************************************/
1296 +static int
1297 +dma_config_m2m(ep93xx_dma_t * dma, unsigned int flags_m2m,
1298 + dma_callback callback, unsigned int user_data)
1299 +{
1300 + unsigned long flags;
1301 + unsigned int M2M_reg_base, uiCONTROL;
1302 +
1303 + /*
1304 + * Make sure the channel is disabled before configuring the channel.
1305 + *
1306 + * TODO: Is this correct?? Making a big change here...
1307 + */
1308 + /* if (!dma->pause || (!dma->pause && dma->xfer_enable)) */
1309 + if (dma->xfer_enable) {
1310 + /*
1311 + * DMA channel is not paused, so we can't configure it.
1312 + */
1313 + DPRINTK("DMA channel not paused, so can't configure! \n");
1314 + return(-1);
1315 + }
1316 +
1317 + /*
1318 + * Mask interrupts.
1319 + */
1320 + local_irq_save(flags);
1321 +
1322 + /*
1323 + * Setup a pointer into the dma channel's register set.
1324 + */
1325 + M2M_reg_base = dma->reg_base;
1326 +
1327 + uiCONTROL = inl(M2M_reg_base + M2M_OFFSET_CONTROL);
1328 + outl(0, M2M_reg_base + M2M_OFFSET_CONTROL);
1329 + inl(M2M_reg_base + M2M_OFFSET_CONTROL);
1330 + outl(uiCONTROL, M2M_reg_base + M2M_OFFSET_CONTROL);
1331 +
1332 + /*
1333 + * By default we disable the stall interrupt.
1334 + */
1335 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1336 + uiCONTROL &= ~CONTROL_M2M_STALLINTEN;
1337 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1338 +
1339 + /*
1340 + * By default we disable the done interrupt.
1341 + */
1342 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1343 + uiCONTROL &= ~CONTROL_M2M_DONEINTEN;
1344 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1345 +
1346 + /*
1347 + * Set up the transfer control fields based on values passed in
1348 + * the flags_m2m field.
1349 + */
1350 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1351 +
1352 + if ( flags_m2m & DESTINATION_HOLD )
1353 + uiCONTROL |= CONTROL_M2M_DAH;
1354 + else
1355 + uiCONTROL &= ~CONTROL_M2M_DAH;
1356 +
1357 + if ( flags_m2m & SOURCE_HOLD )
1358 + uiCONTROL |= CONTROL_M2M_SAH;
1359 + else
1360 + uiCONTROL &= ~CONTROL_M2M_SAH;
1361 +
1362 + uiCONTROL &= ~CONTROL_M2M_TM_MASK;
1363 + uiCONTROL |= (((flags_m2m & TRANSFER_MODE_MASK) >> TRANSFER_MODE_SHIFT) <<
1364 + CONTROL_M2M_TM_SHIFT) & CONTROL_M2M_TM_MASK;
1365 +
1366 + uiCONTROL &= ~CONTROL_M2M_PWSC_MASK;
1367 + uiCONTROL |= (((flags_m2m & WAIT_STATES_MASK) >> WAIT_STATES_SHIFT) <<
1368 + CONTROL_M2M_PWSC_SHIFT) & CONTROL_M2M_PWSC_MASK;
1369 +
1370 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1371 + inl(M2M_reg_base + M2M_OFFSET_CONTROL);
1372 +
1373 + /*
1374 + * Save the callback function in the dma instance for this channel.
1375 + */
1376 + dma->callback = callback;
1377 +
1378 + /*
1379 + * Save the user data in the the dma instance for this channel.
1380 + */
1381 + dma->user_data = user_data;
1382 +
1383 + /*
1384 + * Put the dma instance into the pause state by setting the
1385 + * pause bit to true.
1386 + */
1387 + dma->pause = TRUE;
1388 +
1389 + local_irq_restore(flags);
1390 +
1391 + /*
1392 + * Success.
1393 + */
1394 + return(0);
1395 +}
1396 +
1397 +/*****************************************************************************
1398 + *
1399 + * int dma_start(int handle, unsigned int channels, unsigned int * handles)
1400 + *
1401 + * Description: Initiate a transfer on up to 3 channels.
1402 + *
1403 + * handle: handle for the channel to initiate transfer on.
1404 + * channels: number of channels to initiate transfers on.
1405 + * handles: pointer to an array of handles, one for each channel which
1406 + * is to be started.
1407 + *
1408 + ****************************************************************************/
1409 +static int
1410 +dma_start_m2m(int channel, ep93xx_dma_t * dma)
1411 +{
1412 + unsigned long flags;
1413 + unsigned int M2M_reg_base = dma->reg_base;
1414 + unsigned int uiCONTROL;
1415 +
1416 + /*
1417 + * Mask interrupts while we get this started.
1418 + */
1419 + local_irq_save(flags);
1420 +
1421 + /*
1422 + * Make sure the channel has at least one buffer in the queue.
1423 + */
1424 + if (dma->new_buffers < 1) {
1425 + /*
1426 + * Unmask irqs
1427 + */
1428 + local_irq_restore(flags);
1429 +
1430 + DPRINTK("DMA Start: Channel starved.\n");
1431 +
1432 + /*
1433 + * This channel does not have enough buffers queued up,
1434 + * so enter the pause by starvation state.
1435 + */
1436 + dma->xfer_enable = TRUE;
1437 + dma->pause = TRUE;
1438 +
1439 + /*
1440 + * Success.
1441 + */
1442 + return(0);
1443 + }
1444 +
1445 + /*
1446 + * Clear any pending interrupts.
1447 + */
1448 + outl(0x0, M2M_reg_base+M2M_OFFSET_INTERRUPT);
1449 +
1450 + /*
1451 + * Set up one or both buffer descriptors with values from the next one or
1452 + * two buffers in the queue. By default disable the next frame buffer
1453 + * interrupt on the channel.
1454 + */
1455 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1456 + uiCONTROL &= ~CONTROL_M2M_NFBINTEN;
1457 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1458 +
1459 + /*
1460 + * enable the done interrupt.
1461 + */
1462 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1463 + uiCONTROL |= CONTROL_M2M_DONEINTEN;
1464 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1465 +
1466 + /*
1467 + * Update the dma channel instance transfer state.
1468 + */
1469 + dma->xfer_enable = TRUE;
1470 + dma->pause = FALSE;
1471 +
1472 + /*
1473 + * Program up the first buffer descriptor with a source and destination
1474 + * and a byte count.
1475 + */
1476 + outl( dma->buffer_queue[dma->current_buffer].source,
1477 + M2M_reg_base+M2M_OFFSET_SAR_BASE0 );
1478 +
1479 + outl( dma->buffer_queue[dma->current_buffer].dest,
1480 + M2M_reg_base+M2M_OFFSET_DAR_BASE0 );
1481 +
1482 + outl( dma->buffer_queue[dma->current_buffer].size,
1483 + M2M_reg_base+M2M_OFFSET_BCR0 );
1484 +
1485 + /*
1486 + * Decrement the new buffers counter.
1487 + */
1488 + dma->new_buffers--;
1489 +
1490 + /*
1491 + * Set up the second buffer descriptor with a second buffer if we have
1492 + * a second buffer.
1493 + */
1494 + if (dma->new_buffers) {
1495 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
1496 + MAX_EP93XX_DMA_BUFFERS].source,
1497 + M2M_reg_base+M2M_OFFSET_SAR_BASE1 );
1498 +
1499 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
1500 + MAX_EP93XX_DMA_BUFFERS].dest,
1501 + M2M_reg_base+M2M_OFFSET_DAR_BASE1 );
1502 +
1503 + outl( dma->buffer_queue[(dma->current_buffer + 1) %
1504 + MAX_EP93XX_DMA_BUFFERS].size,
1505 + M2M_reg_base+M2M_OFFSET_BCR1 );
1506 +
1507 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1508 + uiCONTROL |= CONTROL_M2M_NFBINTEN;
1509 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1510 +
1511 + dma->new_buffers--;
1512 + }
1513 +
1514 + /*
1515 + * Now we enable the channel. This initiates the transfer.
1516 + */
1517 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1518 + uiCONTROL |= CONTROL_M2M_ENABLE;
1519 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1520 + inl(M2M_reg_base + M2M_OFFSET_CONTROL);
1521 +
1522 + /*
1523 + * If this is a memory to memory transfer, we need to s/w trigger the
1524 + * transfer by setting the start bit within the control register.
1525 + */
1526 + if (dma->device == DMA_MEMORY) {
1527 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
1528 + uiCONTROL |= CONTROL_M2M_START;
1529 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
1530 + }
1531 +
1532 + DPRINTK("DMA - It's been started!!");
1533 + DPRINTK("CONTROL - 0x%x \n", inl(M2M_reg_base+M2M_OFFSET_CONTROL) );
1534 + DPRINTK("STATUS - 0x%x \n", inl(M2M_reg_base+M2M_OFFSET_STATUS) );
1535 + DPRINTK("BCR0 - 0x%x \n", dma->buffer_queue[dma->current_buffer].size);
1536 + DPRINTK("SAR_BASE0 - 0x%x \n", inl(M2M_reg_base+M2M_OFFSET_SAR_BASE0) );
1537 + DPRINTK("SAR_CUR0 - 0x%x \n", inl(M2M_reg_base+M2M_OFFSET_SAR_CURRENT0) );
1538 + DPRINTK("DAR_BASE0 - 0x%x \n", inl(M2M_reg_base+M2M_OFFSET_DAR_BASE0) );
1539 + DPRINTK("DAR_CUR0 - 0x%x \n", inl(M2M_reg_base+M2M_OFFSET_DAR_CURRENT0) );
1540 +
1541 + /*
1542 + * Unmask irqs
1543 + */
1544 + local_irq_restore(flags);
1545 +
1546 + /*
1547 + * Success.
1548 + */
1549 + return(0);
1550 +}
1551 +
1552 +/*****************************************************************************
1553 + *
1554 + * DMA interface functions
1555 + *
1556 + ****************************************************************************/
1557 +
1558 +/*****************************************************************************
1559 + *
1560 + * int dma_init(int handle, unsigned int flags_m2p, unsigned int flags_m2m,
1561 + * dma_callback callback, unsigned int user_data)
1562 + *
1563 + * Description: Configure the DMA channel and install a callback function.
1564 + *
1565 + * handle: Handle unique the each instance of the dma interface, used
1566 + * to verify this call.
1567 + * flags_m2p Flags used to configure an M2P/P2M dma channel and determine
1568 + * if a callback function and user_data information are included
1569 + * in this call. This field should be NULL if handle represents
1570 + * an M2M channel.
1571 + * flags_m2m Flags used to configure an M2M dma channel and determine
1572 + * if a callback function and user_data information are included
1573 + * in this call. This field should be NULL if handle represents
1574 + * an M2P/P2M channel.
1575 + * callback function pointer which is called near the end of the
1576 + * dma channel's irq handler.
1577 + * user_data defined by the calling driver.
1578 + *
1579 + ****************************************************************************/
1580 +int
1581 +ep93xx_dma_config(int handle, unsigned int flags_m2p, unsigned int flags_m2m,
1582 + dma_callback callback, unsigned int user_data)
1583 +{
1584 + int channel;
1585 + ep93xx_dma_t * dma;
1586 + unsigned long flags;
1587 + unsigned int M2P_reg_base, uiCONTROL;
1588 +
1589 + /*
1590 + * Get the DMA hw channel # from the handle.
1591 + */
1592 + channel = dma_get_channel_from_handle(handle);
1593 +
1594 + /*
1595 + * See if this is a valid handle.
1596 + */
1597 + if (channel < 0) {
1598 + printk(KERN_ERR
1599 + "DMA Config: Invalid dma handle.\n");
1600 + return(-EINVAL);
1601 + }
1602 +
1603 + DPRINTK("DMA Config \n");
1604 +
1605 + dma = &dma_chan[channel];
1606 +
1607 + local_irq_save(flags);
1608 +
1609 + /*
1610 + * Check if the channel is currently transferring.
1611 + */
1612 + if (dma->xfer_enable) {
1613 + local_irq_restore(flags);
1614 + return(-EINVAL);
1615 + }
1616 +
1617 + /*
1618 + * Check if this is an m2m function.
1619 + */
1620 + if (channel >= 10) {
1621 + local_irq_restore(flags);
1622 +
1623 + /*
1624 + * Call another function to handle m2m config.
1625 + */
1626 + return(dma_config_m2m(dma, flags_m2m, callback, user_data));
1627 + }
1628 +
1629 + /*
1630 + * Setup a pointer into the dma channel's register set.
1631 + */
1632 + M2P_reg_base = dma->reg_base;
1633 +
1634 + /*
1635 + * By default we enable the stall interrupt.
1636 + */
1637 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
1638 + uiCONTROL |= CONTROL_M2P_STALLINTEN;
1639 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
1640 +
1641 + /*
1642 + * Configure the channel for an error from the peripheral.
1643 + */
1644 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
1645 + if ( flags_m2p && CHANNEL_ERROR_INT_ENABLE )
1646 + uiCONTROL |= CONTROL_M2P_CHERRORINTEN;
1647 + else
1648 + uiCONTROL &= ~CONTROL_M2P_CHERRORINTEN;
1649 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
1650 +
1651 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
1652 + if ( flags_m2p && CHANNEL_ABORT )
1653 + uiCONTROL |= CONTROL_M2P_ABRT;
1654 + else
1655 + uiCONTROL &= ~CONTROL_M2P_ABRT;
1656 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
1657 +
1658 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
1659 + if ( flags_m2p && IGNORE_CHANNEL_ERROR )
1660 + uiCONTROL |= CONTROL_M2P_ICE;
1661 + else
1662 + uiCONTROL &= ~CONTROL_M2P_ICE;
1663 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
1664 +
1665 + /*
1666 + * Save the callback function in the dma instance for this channel.
1667 + */
1668 + dma->callback = callback;
1669 +
1670 + /*
1671 + * Save the user data in the the dma instance for this channel.
1672 + */
1673 + dma->user_data = user_data;
1674 +
1675 + /*
1676 + * Put the dma instance into the pause state by setting the
1677 + * pause bit to true.
1678 + */
1679 + dma->pause = TRUE;
1680 +
1681 + local_irq_restore(flags);
1682 +
1683 + /*
1684 + * Success.
1685 + */
1686 + return(0);
1687 +}
1688 +
1689 +/*****************************************************************************
1690 + *
1691 + * int dma_start(int handle, unsigned int channels, unsigned int * handles)
1692 + *
1693 + * Description: Initiate a transfer on up to 3 channels.
1694 + *
1695 + * handle: handle for the channel to initiate transfer on.
1696 + * channels: number of channels to initiate transfers on.
1697 + * handles: pointer to an array of handles, one for each channel which
1698 + * is to be started.
1699 + *
1700 + ****************************************************************************/
1701 +int
1702 +ep93xx_dma_start(int handle, unsigned int channels, unsigned int * handles)
1703 +{
1704 + ep93xx_dma_t * dma_pointers[3];
1705 + unsigned int M2P_reg_bases[3];
1706 + unsigned int loop, uiCONTROL;
1707 + unsigned long flags;
1708 + int channel;
1709 +
1710 + /*
1711 + * Get the DMA hw channel # from the handle.
1712 + */
1713 + channel = dma_get_channel_from_handle(handle);
1714 +
1715 + /*
1716 + * See if this is a valid handle.
1717 + */
1718 + if (channel < 0) {
1719 + printk(KERN_ERR "DMA Start: Invalid dma handle.\n");
1720 + return(-EINVAL);
1721 + }
1722 +
1723 + if (channels < 1) {
1724 + printk(KERN_ERR "DMA Start: Invalid parameter.\n");
1725 + return(-EINVAL);
1726 + }
1727 +
1728 + DPRINTK("DMA Start \n");
1729 +
1730 + /*
1731 + * Mask off registers.
1732 + */
1733 + local_irq_save(flags);
1734 +
1735 + /*
1736 + * Check if this is a start multiple.
1737 + */
1738 + if (channels > 1) {
1739 + DPRINTK("DMA ERROR: Start, multiple start not supported yet \n");
1740 + return(-1);
1741 + } else {
1742 + /*
1743 + * Check if this channel is already transferring.
1744 + */
1745 + if (dma_chan[channel].xfer_enable && !dma_chan[channel].pause) {
1746 + printk(KERN_ERR
1747 + "DMA Start: Invalid command for channel %d.\n", channel);
1748 +
1749 + /*
1750 + * Unmask irqs
1751 + */
1752 + local_irq_restore(flags);
1753 +
1754 + /*
1755 + * This channel is already transferring, so return an error.
1756 + */
1757 + return(-EINVAL);
1758 + }
1759 +
1760 + /*
1761 + * If this is an M2M channel, call a different function.
1762 + */
1763 + if (channel >= 10) {
1764 + /*
1765 + * Unmask irqs
1766 + */
1767 + local_irq_restore(flags);
1768 +
1769 + /*
1770 + * Call the m2m start function. Only start one channel.
1771 + */
1772 + return(dma_start_m2m(channel, &dma_chan[channel]));
1773 + }
1774 +
1775 + /*
1776 + * Make sure the channel has at least one buffer in the queue.
1777 + */
1778 + if (dma_chan[channel].new_buffers < 1) {
1779 + DPRINTK("DMA Start: Channel starved.\n");
1780 +
1781 + /*
1782 + * This channel does not have enough buffers queued up,
1783 + * so enter the pause by starvation state.
1784 + */
1785 + dma_chan[channel].xfer_enable = TRUE;
1786 + dma_chan[channel].pause = TRUE;
1787 +
1788 + /*
1789 + * Unmask irqs
1790 + */
1791 + local_irq_restore(flags);
1792 +
1793 + /*
1794 + * Success.
1795 + */
1796 + return(0);
1797 + }
1798 +
1799 + /*
1800 + * Set up a dma instance pointer for this dma channel.
1801 + */
1802 + dma_pointers[0] = &dma_chan[channel];
1803 +
1804 + /*
1805 + * Set up a pointer to the register set for this channel.
1806 + */
1807 + M2P_reg_bases[0] = dma_pointers[0]->reg_base;
1808 + }
1809 +
1810 + /*
1811 + * Setup both MAXCNT registers with values from the next two buffers
1812 + * in the queue, and enable the next frame buffer interrupt on the channel.
1813 + */
1814 + for (loop = 0; loop < channels; loop++) {
1815 + /*
1816 + * Check if we need to restore a paused transfer.
1817 + */
1818 + if (dma_pointers[loop]->pause_buf.buf_id != -1)
1819 + outl( dma_pointers[loop]->pause_buf.size,
1820 + M2P_reg_bases[loop]+M2P_OFFSET_MAXCNT0 );
1821 + else
1822 + outl( dma_pointers[loop]->buffer_queue[dma_pointers[loop]->current_buffer].size,
1823 + M2P_reg_bases[loop]+M2P_OFFSET_MAXCNT0 );
1824 + }
1825 +
1826 + for (loop = 0; loop < channels; loop++) {
1827 + /*
1828 + * Enable the specified dma channels.
1829 + */
1830 + uiCONTROL = inl(M2P_reg_bases[loop]+M2P_OFFSET_CONTROL);
1831 + uiCONTROL |= CONTROL_M2P_ENABLE;
1832 + outl( uiCONTROL, M2P_reg_bases[loop]+M2P_OFFSET_CONTROL );
1833 +
1834 + /*
1835 + * Update the dma channel instance transfer state.
1836 + */
1837 + dma_pointers[loop]->xfer_enable = TRUE;
1838 + dma_pointers[loop]->pause = FALSE;
1839 + }
1840 +
1841 + /*
1842 + * Program up the BASE0 registers for all specified channels, this
1843 + * will initiate transfers on all specified channels.
1844 + */
1845 + for (loop = 0; loop < channels; loop++)
1846 + /*
1847 + * Check if we need to restore a paused transfer.
1848 + */
1849 + if (dma_pointers[loop]->pause_buf.buf_id != -1) {
1850 + outl( dma_pointers[loop]->pause_buf.source,
1851 + M2P_reg_bases[loop]+M2P_OFFSET_BASE0 );
1852 +
1853 + /*
1854 + * Set the pause buffer to NULL
1855 + */
1856 + dma_pointers[loop]->pause_buf.buf_id = -1;
1857 + dma_pointers[loop]->pause_buf.size = 0;
1858 + } else if(dma_pointers[loop]->new_buffers){
1859 + outl( dma_pointers[loop]->buffer_queue[
1860 + dma_pointers[loop]->current_buffer].source,
1861 + M2P_reg_bases[loop]+M2P_OFFSET_BASE0 );
1862 + dma_pointers[loop]->new_buffers--;
1863 +
1864 + }
1865 +
1866 + /*
1867 + * Before restoring irqs setup the second MAXCNT/BASE
1868 + * register with a second buffer.
1869 + */
1870 + for (loop = 0; loop < channels; loop++)
1871 + if (dma_pointers[loop]->new_buffers) {
1872 + /*
1873 + * By default we enable the next frame buffer interrupt.
1874 + */
1875 + uiCONTROL = inl(M2P_reg_bases[loop]+M2P_OFFSET_CONTROL);
1876 + uiCONTROL |= CONTROL_M2P_NFBINTEN;
1877 + outl( uiCONTROL, M2P_reg_bases[loop]+M2P_OFFSET_CONTROL );
1878 +
1879 + outl( dma_pointers[loop]->buffer_queue[
1880 + (dma_pointers[loop]->current_buffer + 1) %
1881 + MAX_EP93XX_DMA_BUFFERS].size,
1882 + M2P_reg_bases[loop]+M2P_OFFSET_MAXCNT1 );
1883 +
1884 + outl( dma_pointers[loop]->buffer_queue[
1885 + (dma_pointers[loop]->current_buffer + 1) %
1886 + MAX_EP93XX_DMA_BUFFERS].source,
1887 + M2P_reg_bases[loop]+M2P_OFFSET_BASE1 );
1888 + dma_pointers[loop]->new_buffers--;
1889 + }
1890 +
1891 + /*
1892 + DPRINTK("DMA - It's been started!!");
1893 + DPRINTK("STATUS - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_STATUS) );
1894 + DPRINTK("CONTROL - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CONTROL) );
1895 + DPRINTK("REMAIN - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_REMAIN) );
1896 + DPRINTK("PPALLOC - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_PPALLOC) );
1897 + DPRINTK("BASE0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_BASE0) );
1898 + DPRINTK("MAXCNT0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_MAXCNT0) );
1899 + DPRINTK("CURRENT0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT0) );
1900 + DPRINTK("BASE1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_BASE1) );
1901 + DPRINTK("MAXCNT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_MAXCNT1) );
1902 + DPRINTK("CURRENT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT1) );
1903 +
1904 + DPRINTK("Pause - %d \n", dma_pointers[0]->pause);
1905 + DPRINTK("xfer_enable - %d \n", dma_pointers[0]->xfer_enable);
1906 + DPRINTK("total bytes - 0x%x \n", dma_pointers[0]->total_bytes);
1907 + DPRINTK("total buffer - %d \n", dma_pointers[0]->total_buffers);
1908 + DPRINTK("new buffers - %d \n", dma_pointers[0]->new_buffers);
1909 + DPRINTK("current buffer - %d \n", dma_pointers[0]->current_buffer);
1910 + DPRINTK("last buffer - %d \n", dma_pointers[0]->last_buffer);
1911 + DPRINTK("used buffers - %d \n", dma_pointers[0]->used_buffers);
1912 + */
1913 + /*
1914 + * Unmask irqs
1915 + */
1916 + local_irq_restore(flags);
1917 +
1918 + /*
1919 + * Success.
1920 + */
1921 + return(0);
1922 +}
1923 +
1924 +/*****************************************************************************
1925 + *
1926 + * int ep93xx_dma_add_buffer(int handle, unsigned int * address,
1927 + * unsigned int size, unsigned int last)
1928 + *
1929 + * Description: Add a buffer entry to the DMA buffer queue.
1930 + *
1931 + * handle: handle for the channel to add this buffer to.
1932 + * address: Pointer to an integer which is the start address of the
1933 + * buffer which is to be added to the queue.
1934 + * size: size of the buffer in bytes.
1935 + * last: 1 if this is the last buffer in this stream, 0 otherwise.
1936 + *
1937 + ****************************************************************************/
1938 +int
1939 +ep93xx_dma_add_buffer(int handle, unsigned int source, unsigned int dest,
1940 + unsigned int size, unsigned int last,
1941 + unsigned int buf_id)
1942 +{
1943 + unsigned long flags;
1944 + ep93xx_dma_t * dma;
1945 + int channel;
1946 +#if 0
1947 + static int peak_total_buffers=0;
1948 +#endif
1949 + /*
1950 + * Get the DMA hw channel # from the handle.
1951 + */
1952 + channel = dma_get_channel_from_handle(handle);
1953 +
1954 + /*
1955 + * See if this is a valid handle.
1956 + */
1957 + if (channel < 0) {
1958 + printk(KERN_ERR
1959 + "DMA Add Buffer: Invalid dma handle.\n");
1960 + return(-EINVAL);
1961 + }
1962 +
1963 + /*
1964 + * Get a pointer to the dma instance.
1965 + */
1966 + dma = &dma_chan[channel];
1967 +
1968 +#if 0
1969 + if( dma->total_buffers > peak_total_buffers )
1970 + {
1971 + peak_total_buffers=dma->total_buffers;
1972 + printk("peak_total_buffers=%d\n", peak_total_buffers );
1973 + }
1974 +#endif
1975 + /*
1976 + * Mask interrupts and hold on to the original state.
1977 + */
1978 + local_irq_save(flags);
1979 +
1980 + /*
1981 + * If the buffer queue is full, last_buffer is the same as current_buffer and
1982 + * we're not tranfering, or last_buffer is pointing to a used buffer, then exit.
1983 + * TODO: do I need to do any more checks?
1984 + */
1985 + if (dma->total_buffers >= MAX_EP93XX_DMA_BUFFERS)
1986 + {
1987 + DPRINTK("too many dma buffers: MAX_EP93XX_DMA_BUFFERS set to low ?\n");
1988 + /*
1989 + * Restore the state of the irqs
1990 + */
1991 + local_irq_restore(flags);
1992 +
1993 + /*
1994 + * Fail.
1995 + */
1996 + return(-1);
1997 + }
1998 +
1999 + /*
2000 + * Add this buffer to the queue
2001 + */
2002 + dma->buffer_queue[dma->last_buffer].source = source;
2003 + dma->buffer_queue[dma->last_buffer].dest = dest;
2004 + dma->buffer_queue[dma->last_buffer].size = size;
2005 + dma->buffer_queue[dma->last_buffer].last = last;
2006 + dma->buffer_queue[dma->last_buffer].buf_id = buf_id;
2007 +
2008 + /*
2009 + * Reset the used field of the buffer structure.
2010 + */
2011 + dma->buffer_queue[dma->last_buffer].used = FALSE;
2012 +
2013 + /*
2014 + * Increment the End Item Pointer.
2015 + */
2016 + dma->last_buffer = (dma->last_buffer + 1) % MAX_EP93XX_DMA_BUFFERS;
2017 +
2018 + /*
2019 + * Increment the new buffers counter and the total buffers counter
2020 + */
2021 + dma->new_buffers++;
2022 + dma->total_buffers++;
2023 +
2024 + /*
2025 + * restore the interrupt state.
2026 + */
2027 + local_irq_restore(flags);
2028 +
2029 + /*
2030 + * Check if the channel was starved into a stopped state.
2031 + */
2032 + if (dma->pause && dma->xfer_enable) {
2033 + if (dma->new_buffers >= 1) {
2034 + DPRINTK("DMA - calling start from add after starve. \n");
2035 +
2036 + /*
2037 + * The channel was starved into a stopped state, and we've got
2038 + * 2 new buffers, so start tranferring again.
2039 + */
2040 + ep93xx_dma_start(handle, 1, 0);
2041 + }
2042 + }
2043 +
2044 + /*
2045 + * Success.
2046 + */
2047 + return(0);
2048 +}
2049 +
2050 +/*****************************************************************************
2051 + *
2052 + * int ep93xx_dma_remove_buffer(int handle, unsigned int * address,
2053 + * unsigned int * size)
2054 + *
2055 + * Description: Remove a buffer entry from the DMA buffer queue. If
2056 + * buffer was removed successfully, return 0, otherwise
2057 + * return -1.
2058 + *
2059 + * handle: handle for the channel to remove a buffer from.
2060 + * address: Pointer to an integer which is filled in with the start
2061 + * address of the removed buffer.
2062 + * size: Pointer to an integer which is filled in with the size in
2063 + * bytes of the removed buffer.
2064 + *
2065 + ****************************************************************************/
2066 +int
2067 +ep93xx_dma_remove_buffer(int handle, unsigned int * buf_id)
2068 +{
2069 + unsigned int test;
2070 + unsigned int loop;
2071 + int return_val = -1;
2072 + unsigned long flags;
2073 + ep93xx_dma_t *dma;
2074 + int channel;
2075 +
2076 + /*
2077 + * Get the DMA hw channel # from the handle.
2078 + */
2079 + channel = dma_get_channel_from_handle(handle);
2080 +
2081 + /*
2082 + * See if this is a valid handle.
2083 + */
2084 + if (channel < 0) {
2085 + printk(KERN_ERR
2086 + "DMA Remove Buffer: Invalid dma handle.\n");
2087 + return(-EINVAL);
2088 + }
2089 +
2090 + dma = &dma_chan[channel];
2091 +
2092 + /*
2093 + * Mask interrupts and hold on to the original state.
2094 + */
2095 + local_irq_save(flags);
2096 +
2097 + /*
2098 + * Make sure there are used buffers to be returned.
2099 + */
2100 + if (dma->used_buffers) {
2101 + test = dma->last_buffer;
2102 +
2103 + for (loop = 0; loop < MAX_EP93XX_DMA_BUFFERS; loop++) {
2104 + if (dma->buffer_queue[test].used && (dma->buffer_queue[test].buf_id != -1)) {
2105 + /*DPRINTK("buffer %d used \n", test); */
2106 +
2107 + /*
2108 + * This is a used buffer, fill in the buf_id pointer
2109 + * with the buf_id for this buffer.
2110 + */
2111 + *buf_id = dma->buffer_queue[test].buf_id;
2112 +
2113 + /*
2114 + * Reset this buffer structure
2115 + */
2116 + dma->buffer_queue[test].buf_id = -1;
2117 +
2118 + /*
2119 + * Decrement the used buffer counter, and the total buffer counter.
2120 + */
2121 + dma->used_buffers--;
2122 + dma->total_buffers--;
2123 +
2124 + /*
2125 + * Successful removal of a buffer, so set the return
2126 + * value to 0, then exit this loop.
2127 + */
2128 + return_val = 0;
2129 + break;
2130 + }
2131 +
2132 + /*
2133 + * This buffer isn't used, let's see if the next one is.
2134 + */
2135 + test = (test + 1) % MAX_EP93XX_DMA_BUFFERS;
2136 + }
2137 + }
2138 +
2139 + /*
2140 + * Restore interrupts.
2141 + */
2142 + local_irq_restore(flags);
2143 +
2144 + /*
2145 + * Success.
2146 + */
2147 + return(return_val);
2148 +}
2149 +
2150 +/*****************************************************************************
2151 + *
2152 + * int ep93xx_dma_pause(int handle, unsigned int channels,
2153 + * unsigned int * handles)
2154 + *
2155 + * Description: Disable any ongoing transfer for the given channel, retaining
2156 + * the state of the current buffer transaction so that upon
2157 + * resume, the dma will continue where it left off.
2158 + *
2159 + * handle: Handle for the channel to be paused. If this is a pause for
2160 + * for multiple channels, handle is a valid handle for one of
2161 + * the channels to be paused.
2162 + * channels: number of channel to pause transfers on.
2163 + * handles: Pointer to an array of handles, one for each channel which
2164 + * to be paused. If this pause is intended only for one
2165 + * channel, this field should be set to NULL.
2166 + *
2167 + ****************************************************************************/
2168 +int
2169 +ep93xx_dma_pause(int handle, unsigned int channels, unsigned int * handles)
2170 +{
2171 + unsigned long flags;
2172 + ep93xx_dma_t * dma;
2173 + int channel;
2174 +
2175 + DPRINTK("ep93xx_dma_pause \n");
2176 +
2177 + /*
2178 + * Mask interrupts and hold on to the original state.
2179 + */
2180 + local_irq_save(flags);
2181 +
2182 + /*
2183 + * Get the DMA hw channel # from the handle.
2184 + */
2185 + channel = dma_get_channel_from_handle(handle);
2186 +
2187 + /*
2188 + * See if this is a valid handle.
2189 + */
2190 + if (channel < 0) {
2191 + /*
2192 + * restore interrupts.
2193 + */
2194 + local_irq_restore(flags);
2195 +
2196 + printk(KERN_ERR
2197 + "DMA Pause: Invalid dma handle.\n");
2198 +
2199 + /*
2200 + * Fail.
2201 + */
2202 + return(-EINVAL);
2203 + }
2204 +
2205 + DPRINTK("DMA %d: pause \n", channel);
2206 +
2207 + /*
2208 + * Set up a pointer to the dma instance data.
2209 + */
2210 + dma = &dma_chan[channel];
2211 +
2212 + /*
2213 + * Check if we're already paused.
2214 + */
2215 + if (dma->pause) {
2216 + /*
2217 + * We're paused, but are we stopped?
2218 + */
2219 + if (dma->xfer_enable)
2220 + /*
2221 + * Put the channel in the stopped state.
2222 + */
2223 + dma->xfer_enable = FALSE;
2224 +
2225 + DPRINTK("DMA Pause - already paused.");
2226 + } else {
2227 + /*
2228 + * Put the channel into the stopped state.
2229 + */
2230 + dma->xfer_enable = FALSE;
2231 + dma->pause = TRUE;
2232 + }
2233 +
2234 + /*
2235 + * restore interrupts.
2236 + */
2237 + local_irq_restore(flags);
2238 +
2239 + /*
2240 + * Already paused, so exit.
2241 + */
2242 + return(0);
2243 +}
2244 +
2245 +/*****************************************************************************
2246 + *
2247 + * void ep93xx_dma_flush(int handle)
2248 + *
2249 + * Description: Flushes all queued buffers and transfers in progress
2250 + * for the given channel. Return the buffer entries
2251 + * to the calling function.
2252 + *
2253 + * handle: handle for the channel for which the flush is intended.
2254 + *
2255 + ****************************************************************************/
2256 +int
2257 +ep93xx_dma_flush(int handle)
2258 +{
2259 + unsigned int loop;
2260 + unsigned long flags;
2261 + ep93xx_dma_t * dma;
2262 + int channel;
2263 + unsigned int M2P_reg_base,uiCONTROL;
2264 +
2265 + /*
2266 + * Get the DMA hw channel # from the handle.
2267 + */
2268 + channel = dma_get_channel_from_handle(handle);
2269 +
2270 + /*
2271 + * See if this is a valid handle.
2272 + */
2273 + if (channel < 0) {
2274 + printk(KERN_ERR "DMA Flush: Invalid dma handle.\n");
2275 + return(-EINVAL);
2276 + }
2277 +
2278 + DPRINTK("DMA %d: flush \n", channel);
2279 +
2280 + /*
2281 + * Set up a pointer to the dma instance data for this channel
2282 + */
2283 + dma = &dma_chan[channel];
2284 +
2285 + /*
2286 + * Mask interrupts and hold on to the original state.
2287 + */
2288 + local_irq_save(flags);
2289 +
2290 + /*
2291 + * Disable the dma channel
2292 + */
2293 + if (channel < 10) {
2294 + /*
2295 + * M2P channel
2296 + */
2297 + uiCONTROL = inl(dma->reg_base+M2P_OFFSET_CONTROL);
2298 + uiCONTROL &= ~CONTROL_M2P_ENABLE;
2299 + outl( uiCONTROL, dma->reg_base+M2P_OFFSET_CONTROL );
2300 + } else {
2301 + /*
2302 + * M2M channel
2303 + */
2304 + uiCONTROL = inl(dma->reg_base+M2M_OFFSET_CONTROL);
2305 + uiCONTROL &= ~CONTROL_M2M_ENABLE;
2306 + outl( uiCONTROL, dma->reg_base+M2M_OFFSET_CONTROL );
2307 + }
2308 +
2309 + for (loop = 0; loop < MAX_EP93XX_DMA_BUFFERS; loop++)
2310 + {
2311 + dma->buffer_queue[loop].buf_id = -1;
2312 + dma->buffer_queue[loop].last = 0;
2313 + }
2314 +
2315 + /*
2316 + * Set the Current and Last item to zero.
2317 + */
2318 + dma->current_buffer = 0;
2319 + dma->last_buffer = 0;
2320 +
2321 + /*
2322 + * Reset the Buffer counters
2323 + */
2324 + dma->used_buffers = 0;
2325 + dma->new_buffers = 0;
2326 + dma->total_buffers = 0;
2327 +
2328 + /*
2329 + * reset the Total bytes counter.
2330 + */
2331 + dma->total_bytes = 0;
2332 +
2333 + /*
2334 + * Reset the paused buffer.
2335 + */
2336 + dma->pause_buf.last = 0;
2337 + dma->pause_buf.buf_id = -1;
2338 +
2339 + M2P_reg_base = dma_chan[channel].reg_base;
2340 +
2341 + /*
2342 + * restore interrupts.
2343 + */
2344 + local_irq_restore(flags);
2345 +
2346 + /*
2347 + * Success.
2348 + */
2349 + return(0);
2350 +}
2351 +
2352 +/*****************************************************************************
2353 + *
2354 + * int ep93xx_dma_queue_full(int handle)
2355 + *
2356 + * Description: Query to determine if the DMA queue of buffers for
2357 + * a given channel is full.
2358 + * 0 = queue is full
2359 + * 1 = queue is not full
2360 + *
2361 + * handle: handle for the channel to query.
2362 + *
2363 + ****************************************************************************/
2364 +int
2365 +ep93xx_dma_queue_full(int handle)
2366 +{
2367 + int list_full = 0;
2368 + unsigned long flags;
2369 + int channel;
2370 +
2371 + /*
2372 + * Get the DMA hw channel # from the handle.
2373 + */
2374 + channel = dma_get_channel_from_handle(handle);
2375 +
2376 + /*
2377 + * See if this is a valid handle.
2378 + */
2379 + if (channel < 0) {
2380 + printk(KERN_ERR "DMA Queue Full: Invalid dma handle.\n");
2381 + return(-EINVAL);
2382 + }
2383 +
2384 + DPRINTK("DMA %d: queue full \n", channel);
2385 +
2386 + /*
2387 + * Mask interrupts and hold on to the original state.
2388 + */
2389 + local_irq_save(flags);
2390 +
2391 + /*
2392 + * If the last item is equal to the used item then
2393 + * the queue is full.
2394 + */
2395 + if (dma_chan[channel].total_buffers < MAX_EP93XX_DMA_BUFFERS)
2396 + list_full = FALSE;
2397 + else
2398 + list_full = TRUE;
2399 +
2400 + /*
2401 + * restore interrupts.
2402 + */
2403 + local_irq_restore(flags);
2404 +
2405 + return(list_full);
2406 +}
2407 +
2408 +/*****************************************************************************
2409 + *
2410 + * int ep93xx_dma_get_position()
2411 + *
2412 + * Description: Takes two integer pointers and fills them with the start
2413 + * and current address of the buffer currently transferring
2414 + * on the specified DMA channel.
2415 + *
2416 + * handle handle for the channel to query.
2417 + * *buf_id buffer id for the current buffer transferring on the
2418 + * dma channel.
2419 + * *total total bytes transferred on the channel. Only counts
2420 + * whole buffers transferred.
2421 + * *current_frac number of bytes transferred so far in the current buffer.
2422 + ****************************************************************************/
2423 +int
2424 +ep93xx_dma_get_position(int handle, unsigned int * buf_id,
2425 + unsigned int * total, unsigned int * current_frac )
2426 +{
2427 + int channel;
2428 + ep93xx_dma_t * dma;
2429 + unsigned int buf_id1, total1, current_frac1, buf_id2, total2;
2430 + unsigned int Status, NextBuffer, StateIsBufNext, M2P_reg_base=0;
2431 + unsigned int pause1, pause2;
2432 +
2433 + /*
2434 + * Get the DMA hw channel # from the handle. See if this is a
2435 + * valid handle.
2436 + */
2437 + channel = dma_get_channel_from_handle(handle);
2438 + if (channel < 0) {
2439 + printk(KERN_ERR "DMA Get Position: Invalid dma handle.\n");
2440 + return(-EINVAL);
2441 + }
2442 +
2443 + dma = &dma_chan[channel];
2444 +
2445 + /*
2446 + * If DMA moves to a new buffer in the middle of us grabbing the
2447 + * buffer info, then do it over again.
2448 + */
2449 + do{
2450 + buf_id1 = dma->buffer_queue[dma->current_buffer].buf_id;
2451 + total1 = dma->total_bytes;
2452 + pause1 = dma->pause;
2453 +
2454 + if (channel < 10) {
2455 + // M2P
2456 + M2P_reg_base = dma->reg_base;
2457 +
2458 + Status = inl(M2P_reg_base+M2P_OFFSET_STATUS);
2459 +
2460 + NextBuffer = ((Status & STATUS_M2P_NEXTBUFFER) != 0);
2461 +
2462 + StateIsBufNext = ((Status & STATUS_M2P_CURRENT_MASK) ==
2463 + STATUS_M2P_DMA_BUF_NEXT);
2464 +
2465 + if( NextBuffer ^ StateIsBufNext )
2466 + current_frac1 = inl(M2P_reg_base+M2P_OFFSET_CURRENT1) -
2467 + inl(M2P_reg_base+M2P_OFFSET_BASE1);
2468 + else
2469 + current_frac1 = inl(M2P_reg_base+M2P_OFFSET_CURRENT0) -
2470 + inl(M2P_reg_base+M2P_OFFSET_BASE0);
2471 +
2472 + } else {
2473 + // M2M - TODO implement this for M2M
2474 + current_frac1 = 0;
2475 + }
2476 +
2477 + buf_id2 = dma->buffer_queue[dma->current_buffer].buf_id;
2478 + total2 = dma->total_bytes;
2479 + pause2 = dma->pause;
2480 +
2481 + } while ( (buf_id1 != buf_id2) || (total1 != total2) || (pause1 != pause2) );
2482 +
2483 + if (pause1)
2484 + current_frac1 = 0;
2485 +
2486 + if (buf_id)
2487 + *buf_id = buf_id1;
2488 +
2489 + if (total)
2490 + *total = total1;
2491 +
2492 + if (current_frac)
2493 + *current_frac = current_frac1;
2494 +
2495 +// DPRINTK("DMA buf_id %d, total %d, frac %d\n", buf_id1, total1, current_frac1);
2496 +
2497 + /*
2498 + * Success.
2499 + */
2500 + return(0);
2501 +}
2502 +
2503 +/*****************************************************************************
2504 + *
2505 + * int ep93xx_dma_get_total(int handle)
2506 + *
2507 + * Description: Returns the total number of bytes transferred on the
2508 + * specified channel since the channel was requested.
2509 + *
2510 + * handle: handle for the channel to query.
2511 + *
2512 + ****************************************************************************/
2513 +int
2514 +ep93xx_dma_get_total(int handle)
2515 +{
2516 + int channel;
2517 +
2518 + /*
2519 + * Get the DMA hw channel # from the handle.
2520 + */
2521 + channel = dma_get_channel_from_handle(handle);
2522 +
2523 + /*
2524 + * See if this is a valid handle.
2525 + */
2526 + if (channel < 0) {
2527 + printk(KERN_ERR "DMA Get Total: Invalid dma handle.\n");
2528 + return(-EINVAL);
2529 + }
2530 +
2531 + DPRINTK("DMA %d: total: %d \n", channel, dma_chan[channel].total_bytes);
2532 +
2533 + /*
2534 + * Return the total number of bytes transferred on this channel since
2535 + * it was requested.
2536 + */
2537 + return(dma_chan[channel].total_bytes);
2538 +}
2539 +
2540 +/*****************************************************************************
2541 + *
2542 + * int ep93xx_dma_is_done(int handle)
2543 + *
2544 + * Description: Determines if the specified channel is done
2545 + * transferring the requested data.
2546 + *
2547 + * handle: handle for the channel to query.
2548 + *
2549 + ****************************************************************************/
2550 +int
2551 +ep93xx_dma_is_done(int handle)
2552 +{
2553 + ep93xx_dma_t *dma;
2554 + int channel;
2555 +
2556 + /*
2557 + * Get the DMA hw channel # from the handle.
2558 + */
2559 + channel = dma_get_channel_from_handle(handle);
2560 +
2561 + /*
2562 + * See if this is a valid handle.
2563 + */
2564 + if (channel < 0) {
2565 + printk(KERN_ERR "ep93xx_dma_is_done: Invalid dma handle.\n");
2566 + return(-EINVAL);
2567 + }
2568 +
2569 + /*
2570 + * Get a pointer to the DMA channel state structure.
2571 + */
2572 + dma = &dma_chan[channel];
2573 +
2574 + /*
2575 + * See if there are any buffers remaining to be provided to the HW.
2576 + */
2577 + if (dma->new_buffers)
2578 + return 0;
2579 +
2580 + /*
2581 + * See if this is a M2P or M2M channel.
2582 + */
2583 + if (channel < 10) {
2584 + /*
2585 + * If the bytes remaining register of the HW is not zero, then
2586 + * there is more work to be done.
2587 + */
2588 + if (inl(dma->reg_base + M2P_OFFSET_REMAIN) != 0)
2589 + return 0;
2590 + } else {
2591 + /*
2592 + * If either byte count register in the HW is not zero, then there
2593 + * is more work to be done.
2594 + */
2595 + if ((inl(dma->reg_base + M2M_OFFSET_BCR0) != 0) ||
2596 + (inl(dma->reg_base + M2M_OFFSET_BCR1) != 0))
2597 + return 0;
2598 + }
2599 +
2600 + /*
2601 + * The DMA is complete.
2602 + */
2603 + return 1;
2604 +}
2605 +
2606 +/*****************************************************************************
2607 + * ep93xx_dma_request
2608 + *
2609 + * Description: This function will allocate a DMA channel for a particular
2610 + * hardware peripheral. Before initiating a transfer on the allocated
2611 + * channel, the channel must be set up and buffers have to queued up.
2612 + *
2613 + * handle: pointer to an integer which is filled in with a unique
2614 + * handle for this instance of the dma interface.
2615 + * device_id string with the device name, primarily used by /proc.
2616 + * device hardware device ID for which the requested dma channel will
2617 + * transfer data.
2618 + *
2619 + ****************************************************************************/
2620 +int
2621 +ep93xx_dma_request(int * handle, const char *device_id,
2622 + ep93xx_dma_dev_t device)
2623 +{
2624 + ep93xx_dma_t *dma = NULL;
2625 + int channel;
2626 + unsigned int error = 0;
2627 + unsigned int loop;
2628 + unsigned int M2P_reg_base;
2629 +
2630 + /*
2631 + * Check if the device requesting a DMA channel is a valid device.
2632 + */
2633 + if ((device >= UNDEF_DMA) || (device < 0))
2634 + return(-ENODEV);
2635 +
2636 + /*
2637 + * We've got a valid hardware device requesting a DMA channel.
2638 + * Now check if the device should open an M2P or M2M channel
2639 + */
2640 + if (device < 20)
2641 + channel = dma_open_m2p(device);
2642 + else
2643 + channel = dma_open_m2m(device);
2644 +
2645 + /*
2646 + * Check if we successfully opened a DMA channel
2647 + */
2648 + if (channel < 0) {
2649 + printk(KERN_ERR "%s: Could not open dma channel for this device.\n",
2650 + device_id);
2651 + return(-EBUSY);
2652 + }
2653 +
2654 + dma = &dma_chan[channel];
2655 +
2656 + if(dma->terminated==1) {
2657 + free_irq(dma->irq, (void *) dma);
2658 + dma->terminated=0;
2659 + }
2660 +
2661 + /*
2662 + * Request the appropriate IRQ for the specified channel
2663 + */
2664 + if (channel < 10)
2665 + error = request_irq(dma->irq, dma_m2p_irq_handler,
2666 + IRQF_DISABLED, device_id, (void *) dma);
2667 + else
2668 + error = request_irq(dma->irq, &dma_m2m_irq_handler,
2669 + IRQF_DISABLED, device_id, (void *) dma);
2670 +
2671 + /*
2672 + * Check for any errors during the irq request
2673 + */
2674 + if (error) {
2675 + printk(KERN_ERR "%s: unable to request IRQ %d for DMA channel\n",
2676 + device_id, dma->irq);
2677 + return(error);
2678 + }
2679 +
2680 + /*
2681 + * Generate a valid handle and exit.
2682 + *
2683 + * Increment the last valid handle.
2684 + * Check for wraparound (unlikely, but we like to be complete).
2685 + */
2686 + dma->last_valid_handle++;
2687 +
2688 + if ( (dma->last_valid_handle & DMA_HANDLE_SPECIFIER_MASK) !=
2689 + (channel << 28) )
2690 + dma->last_valid_handle = (channel << 28) + 1;
2691 +
2692 + /*
2693 + * Fill in the handle pointer with a valid handle for
2694 + * this dma channel instance.
2695 + */
2696 + *handle = dma->last_valid_handle;
2697 +
2698 + DPRINTK("Handle for channel %d: 0x%x\n", channel, *handle);
2699 +
2700 + /*
2701 + * Save the device ID and device name.
2702 + */
2703 + dma->device = device;
2704 + dma->device_id = device_id;
2705 +
2706 + /*
2707 + * Init all fields within the dma instance.
2708 + */
2709 + for (loop = 0; loop < MAX_EP93XX_DMA_BUFFERS; loop++)
2710 + dma->buffer_queue[loop].buf_id = -1;
2711 +
2712 + /*
2713 + * Initialize all buffer queue variables.
2714 + */
2715 + dma->current_buffer = 0;
2716 + dma->last_buffer = 0;
2717 +
2718 + dma->new_buffers = 0;
2719 + dma->used_buffers = 0;
2720 + dma->total_buffers = 0;
2721 +
2722 + /*
2723 + * Initialize the total bytes variable
2724 + */
2725 + dma->total_bytes = 0;
2726 +
2727 + /*
2728 + * Initialize the transfer and pause state variables to 0.
2729 + */
2730 + dma->xfer_enable = 0;
2731 +
2732 + dma->pause = 0;
2733 +
2734 + /*
2735 + * Initialize the pause buffer structure.
2736 + */
2737 + dma->pause_buf.buf_id = -1;
2738 +
2739 + /*
2740 + * Initialize the callback function and user data fields.
2741 + */
2742 + dma->callback = NULL;
2743 +
2744 + /*
2745 + * User data used as a parameter for the Callback function. The user
2746 + * sets up the data and sends it with the callback function.
2747 + */
2748 + dma->user_data = 0;
2749 +
2750 + M2P_reg_base = dma_chan[channel].reg_base;
2751 +
2752 + /*
2753 + * Debugging message.
2754 + */
2755 + DPRINTK("Successfully requested dma channel %d\n", channel);
2756 + DPRINTK("STATUS - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_STATUS) );
2757 + DPRINTK("CONTROL - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CONTROL) );
2758 + DPRINTK("REMAIN - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_REMAIN) );
2759 + DPRINTK("PPALLOC - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_PPALLOC) );
2760 + DPRINTK("BASE0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_BASE0) );
2761 + DPRINTK("MAXCNT0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_MAXCNT0) );
2762 + DPRINTK("CURRENT0 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT0) );
2763 + DPRINTK("BASE1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_BASE1) );
2764 + DPRINTK("MAXCNT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_MAXCNT1) );
2765 + DPRINTK("CURRENT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT1) );
2766 +
2767 + DPRINTK("Buffer source size last used \n");
2768 + for (loop = 0; loop < 5; loop ++)
2769 + DPRINTK("%d 0x%x 0x%x %d %d \n",
2770 + loop, dma->buffer_queue[loop].source, dma->buffer_queue[loop].size,
2771 + dma->buffer_queue[loop].last, dma->buffer_queue[loop].used);
2772 + DPRINTK("pause 0x%x 0x%x %d %d \n",
2773 + dma->pause_buf.source, dma->pause_buf.size,
2774 + dma->pause_buf.last, dma->pause_buf.used);
2775 +
2776 + DPRINTK("Pause - %d \n", dma->pause);
2777 + DPRINTK("xfer_enable - %d \n", dma->xfer_enable);
2778 + DPRINTK("total bytes - 0x%x \n", dma->total_bytes);
2779 + DPRINTK("total buffer - %d \n", dma->total_buffers);
2780 + DPRINTK("new buffers - %d \n", dma->new_buffers);
2781 + DPRINTK("current buffer - %d \n", dma->current_buffer);
2782 + DPRINTK("last buffer - %d \n", dma->last_buffer);
2783 + DPRINTK("used buffers - %d \n", dma->used_buffers);
2784 +
2785 + DPRINTK("CURRENT1 - 0x%x \n", inl(M2P_reg_base+M2P_OFFSET_CURRENT1) );
2786 + DPRINTK("VIC0IRQSTATUS - 0x%x, VIC0INTENABLE - 0x%x \n",
2787 + *(unsigned int *)(VIC0IRQSTATUS),
2788 + *(unsigned int *)(VIC0INTENABLE));
2789 +
2790 + /*
2791 + * Success.
2792 + */
2793 + return(0);
2794 +}
2795 +
2796 +/*****************************************************************************
2797 + *
2798 + * ep93xx_dma_free
2799 + *
2800 + * Description: This function will free the dma channel for future requests.
2801 + *
2802 + * handle: handle for the channel to be freed.
2803 + *
2804 + ****************************************************************************/
2805 +int
2806 +ep93xx_dma_free(int handle)
2807 +{
2808 + ep93xx_dma_t *dma;
2809 + unsigned int M2M_reg_base, M2P_reg_base, uiCONTROL;
2810 + int channel;
2811 +
2812 + /*
2813 + * Get the DMA hw channel # from the handle.
2814 + */
2815 + channel = dma_get_channel_from_handle(handle);
2816 +
2817 + /*
2818 + * See if this is a valid handle.
2819 + */
2820 + if (channel < 0) {
2821 + printk(KERN_ERR "DMA Free: Invalid dma handle.\n");
2822 + return(-EINVAL);
2823 + }
2824 +
2825 + /*
2826 + * Get a pointer to the dma instance.
2827 + */
2828 + dma = &dma_chan[channel];
2829 +
2830 + /*
2831 + * Disable the dma channel
2832 + */
2833 + if (channel < 10) {
2834 + /*
2835 + * M2P channel
2836 + */
2837 + M2P_reg_base = dma->reg_base;
2838 +
2839 + uiCONTROL = inl(M2P_reg_base+M2P_OFFSET_CONTROL);
2840 + uiCONTROL &= ~CONTROL_M2P_ENABLE;
2841 + outl( uiCONTROL, M2P_reg_base+M2P_OFFSET_CONTROL );
2842 + } else {
2843 + /*
2844 + * M2M channel
2845 + */
2846 + M2M_reg_base = dma->reg_base;
2847 +
2848 + uiCONTROL = inl(M2M_reg_base+M2M_OFFSET_CONTROL);
2849 + uiCONTROL &= ~CONTROL_M2M_ENABLE;
2850 + outl( uiCONTROL, M2M_reg_base+M2M_OFFSET_CONTROL );
2851 + }
2852 +
2853 + /*
2854 + * Free the interrupt servicing this dma channel
2855 + */
2856 + //free_irq(dma->irq, (void *) dma);
2857 + dma->terminated=1;
2858 +
2859 + /*
2860 + * Decrement the reference count for this instance of the dma interface
2861 + */
2862 + dma->ref_count--;
2863 +
2864 + /*
2865 + * Set the transfer and pause state variables to 0
2866 + * (unititialized state).
2867 + */
2868 + dma->xfer_enable = 0;
2869 + dma->pause = 0;
2870 +
2871 + /*
2872 + * Debugging message.
2873 + */
2874 + DPRINTK("Successfully freed dma channel %d\n", channel);
2875 + /*
2876 + * Success.
2877 + */
2878 + return(0);
2879 +}
2880 +
2881 +/*****************************************************************************
2882 + *
2883 + * ep93xx_dma_init(void)
2884 + *
2885 + * Description: This function is called during system initialization to
2886 + * setup the interrupt number and register set base address for each DMA
2887 + * channel.
2888 + *
2889 + ****************************************************************************/
2890 +static int __init
2891 +ep93xx_dma_init(void)
2892 +{
2893 + int channel;
2894 +
2895 + /*
2896 + * Init some values in each dma instance.
2897 + */
2898 + for (channel = 0; channel < MAX_EP93XX_DMA_CHANNELS; channel++) {
2899 + /*
2900 + * IRQ for the specified dma channel.
2901 + */
2902 + dma_chan[channel].irq = IRQ_EP93XX_DMAM2P0 + channel;
2903 +
2904 + dma_chan[channel].terminated = 0;
2905 +
2906 + /*
2907 + * Initial value of the dma channel handle.
2908 + */
2909 + dma_chan[channel].last_valid_handle = channel << 28;
2910 +
2911 + /*
2912 + * Give the instance a pointer to the dma channel register
2913 + * base.
2914 + */
2915 + if (channel < 10)
2916 + dma_chan[channel].reg_base = DMAM2PChannelBase[channel];
2917 + else
2918 + dma_chan[channel].reg_base = DMAM2MChannelBase[channel - 10];
2919 +
2920 + /*
2921 + * Initialize the reference count for this channel.
2922 + */
2923 + dma_chan[channel].ref_count = 0;
2924 + }
2925 +
2926 + DPRINTK("DMA Interface intitialization complete\n");
2927 +
2928 + /*
2929 + * Success
2930 + */
2931 + return 0;
2932 +}
2933 +
2934 +arch_initcall(ep93xx_dma_init);
2935 +
2936 +EXPORT_SYMBOL(ep93xx_dma_free);
2937 +EXPORT_SYMBOL(ep93xx_dma_request);
2938 +EXPORT_SYMBOL(ep93xx_dma_flush);
2939 +EXPORT_SYMBOL(ep93xx_dma_pause);
2940 +EXPORT_SYMBOL(ep93xx_dma_remove_buffer);
2941 +EXPORT_SYMBOL(ep93xx_dma_add_buffer);
2942 +EXPORT_SYMBOL(ep93xx_dma_start);
2943 +EXPORT_SYMBOL(ep93xx_dma_config);
2944 --- /dev/null
2945 +++ b/arch/arm/mach-ep93xx/dma_ep93xx.h
2946 @@ -0,0 +1,676 @@
2947 +/*****************************************************************************
2948 + *
2949 + * arch/arm/mach-ep93xx/dma_ep93xx.h
2950 + *
2951 + * DESCRIPTION: 93XX DMA controller API private defintions.
2952 + *
2953 + * Copyright Cirrus Logic Corporation, 2003. All rights reserved
2954 + *
2955 + * This program is free software; you can redistribute it and/or modify
2956 + * it under the terms of the GNU General Public License as published by
2957 + * the Free Software Foundation; either version 2 of the License, or
2958 + * (at your option) any later version.
2959 + *
2960 + * This program is distributed in the hope that it will be useful,
2961 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2962 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2963 + * GNU General Public License for more details.
2964 + *
2965 + * You should have received a copy of the GNU General Public License
2966 + * along with this program; if not, write to the Free Software
2967 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
2968 + *
2969 + ****************************************************************************/
2970 +#ifndef _EP93XX_DMA_H_
2971 +#define _EP93XX_DMA_H_
2972 +
2973 +// as it turns out the ide dma is the biggest dma buffer hog so far
2974 +// in case the HDD is "thinking" (seek/buffer flush)
2975 +// the continueing r/w DMAs to the HDD will be queued up to up to PRD_ENTRIES entries...
2976 +#include <linux/ide.h>
2977 +#define MAX_EP93XX_DMA_BUFFERS PRD_ENTRIES
2978 +
2979 +#ifndef TRUE
2980 +#define TRUE 1
2981 +#endif
2982 +
2983 +#ifndef FALSE
2984 +#define FALSE 0
2985 +#endif
2986 +
2987 +#ifndef NULL
2988 +#define NULL 0
2989 +#endif
2990 +
2991 +#define EP93XX_DMA_BASE (EP93XX_AHB_VIRT_BASE + 0x00000000)
2992 +
2993 +/*****************************************************************************
2994 + * 0x8000.0000 -> 0x8000.003C M2P Channel 0 Registers (Tx)
2995 + * 0x8000.0040 -> 0x8000.007C M2P Channel 1 Registers (Rx)
2996 + * 0x8000.0080 -> 0x8000.00BC M2P Channel 2 Registers (Tx)
2997 + * 0x8000.00C0 -> 0x8000.00FC M2P Channel 3 Registers (Rx)
2998 + * 0x8000.0100 -> 0x8000.013C M2M Channel 0 Registers
2999 + * 0x8000.0140 -> 0x8000.017C M2M Channel 1 Registers
3000 + * 0x8000.0180 -> 0x8000.01BC Not Used
3001 + * 0x8000.01C0 -> 0x8000.01FC Not Used
3002 + * 0x8000.0200 -> 0x8000.023C M2P Channel 5 Registers (Rx)
3003 + * 0x8000.0240 -> 0x8000.027C M2P Channel 4 Registers (Tx)
3004 + * 0x8000.0280 -> 0x8000.02BC M2P Channel 7 Registers (Rx)
3005 + * 0x8000.02C0 -> 0x8000.02FC M2P Channel 6 Registers (Tx)
3006 + * 0x8000.0300 -> 0x8000.033C M2P Channel 9 Registers (Rx)
3007 + * 0x8000.0340 -> 0x8000.037C M2P Channel 8 Registers (Tx)
3008 + * 0x8000.0380 DMA Channel Arbitration register
3009 + * 0x8000.03C0 DMA Global Interrupt register
3010 + * 0x8000.03C4 -> 0x8000.03FC Not Used
3011 + *
3012 + *
3013 + * Internal M2P/P2M Channel Register Map
3014 + *
3015 + * Offset Name Access Bits Reset Value
3016 + * 0x00 CONTROL R/W 6 0
3017 + * 0x04 INTERRUPT R/W TC* 3 0
3018 + * 0x08 PPALLOC R/W 4 channel dependant
3019 + * (see reg description)
3020 + * 0x0C STATUS RO 8 0
3021 + * 0x10 reserved
3022 + * 0x14 REMAIN RO 16 0
3023 + * 0X18 Reserved
3024 + * 0X1C Reserved
3025 + * 0x20 MAXCNT0 R/W 16 0
3026 + * 0x24 BASE0 R/W 32 0
3027 + * 0x28 CURRENT0 RO 32 0
3028 + * 0x2C Reserved
3029 + * 0x30 MAXCNT1 R/W 16 0
3030 + * 0x34 BASE1 R/W 32 0
3031 + * 0X38 CURRENT1 RO 32 0
3032 + * 0X3C Reserved
3033 + *
3034 + * M2M Channel Register Map
3035 + * Offset Name Access Bits Reset Value
3036 + *
3037 + * 0x00 CONTROL R/W 22 0
3038 + * 0x04 INTERRUPT R/W TC* 3 0
3039 + * 0x08 Reserved
3040 + * 0x0C STATUS R/W TC* 14 0
3041 + * 0x10 BCR0 R/W 16 0
3042 + * 0x14 BCR1 R/W 16 0
3043 + * 0x18 SAR_BASE0 R/W 32 0
3044 + * 0x1C SAR_BASE1 R/W 32 0
3045 + * 0x20 Reserved
3046 + * 0x24 SAR_CURRENT0 RO 32 0
3047 + * 0x28 SAR_CURRENT1 RO 32 0
3048 + * 0x2C DAR_BASE0 R/W 32 0
3049 + * 0x30 DAR_BASE1 R/W 32 0
3050 + * 0x34 DAR_CURRENT0 RO 32 0
3051 + * 0X38 Reserved
3052 + * 0X3C DAR_CURRENT1 RO 32 0
3053 + * * Write this location once to clear the bit (see
3054 + * Interrupt/Status register description for which bits
3055 + * this rule applies to).
3056 + *
3057 + ****************************************************************************/
3058 +
3059 +
3060 +/*----------------------------------------------------------------------------------*/
3061 +/* M2P Registers */
3062 +/*----------------------------------------------------------------------------------*/
3063 +/*
3064 + * M2P CONTROL register bit defines
3065 + */
3066 +#define CONTROL_M2P_STALLINTEN 0x00000001 /* Enables the STALL interrupt */
3067 +#define CONTROL_M2P_NFBINTEN 0x00000002 /* Enables the NFB interrupt */
3068 +#define CONTROL_M2P_CHERRORINTEN 0x00000008 /* Enables the ChError interrupt*/
3069 +#define CONTROL_M2P_ENABLE 0x00000010 /* Enables the channel */
3070 +#define CONTROL_M2P_ABRT 0x00000020 /* Determines how DMA behaves in*/
3071 + /* NEXT state with peripheral */
3072 + /* error */
3073 + /* 0: NEXT -> ON, ignore error */
3074 + /* 1: NEXT -> STALL, disable ch.*/
3075 +#define CONTROL_M2P_ICE 0x00000040 /* Ignore Channel Error */
3076 +
3077 +/*
3078 + * M2P INTERRUPT register bit defines
3079 + */
3080 +#define INTERRUPT_M2P_STALLINT 0x00000001 /* Indicates channel stalled. */
3081 +#define INTERRUPT_M2P_NFBINT 0x00000002 /* Indicates channel is hungry. */
3082 +#define INTERRUPT_M2P_CHERRORINT 0x00000008 /* Peripheral detects error */
3083 +
3084 +
3085 +/*
3086 + * STATUS register bit defines
3087 + */
3088 +#define STATUS_M2P_STALL 0x00000001 /* A '1' indicates channel is */
3089 + /* stalled */
3090 +#define STATUS_M2P_NFB 0x00000002 /* A '1' indicates channel has moved*/
3091 + /* from NEXT state to ON state, but */
3092 + /* waiting for next buffer to be */
3093 + /* programmed. */
3094 +#define STATUS_M2P_CHERROR 0x00000008 /* Enables the ChError interrupt */
3095 +#define STATUS_M2P_CURRENT_MASK 0x00000030 /* Current state of the FSM */
3096 +#define STATUS_M2P_CURRENT_SHIFT 4
3097 +#define STATUS_M2P_NEXTBUFFER 0x00000040 /* Informs the int handler after an */
3098 + /* NFB int which pair of maxcnt and */
3099 + /* base regs to update. */
3100 +#define STATUS_M2P_BYTES_MASK 0x0000f800 /* number of valid DMA data */
3101 +#define STATUS_M2P_BYTES_SHIFT 7 /* currently in */
3102 + /* packer/unpacker */
3103 +
3104 +#define STATUS_M2P_DMA_NO_BUF 0x00000000
3105 +#define STATUS_M2P_DMA_BUF_ON 0x00000010
3106 +#define STATUS_M2P_DMA_BUF_NEXT 0x00000020
3107 +
3108 +/*
3109 + * Register masks to mask off reserved bits after reading register.
3110 + */
3111 +#define M2P_MASK_PPALLOC 0x0000000f
3112 +#define M2P_MASK_REMAIN 0x0000ffff
3113 +#define M2P_MASK_MAXCNT0 0x0000ffff
3114 +#define M2P_MASK_BASE0 0xffffffff
3115 +#define M2P_MASK_CURRENT0 0xffffffff
3116 +#define M2P_MASK_MAXCNT1 0x0000ffff
3117 +#define M2P_MASK_BASE1 0xffffffff
3118 +#define M2P_MASK_CURRENT1 0xffffffff
3119 +
3120 +
3121 +/*----------------------------------------------------------------------------------*/
3122 +/* M2M Registers */
3123 +/*----------------------------------------------------------------------------------*/
3124 +
3125 +#define CONTROL_M2M_STALLINTEN 0x00000001 /* Enables the STALL interrupt */
3126 +#define CONTROL_M2M_SCT 0x00000002 /* Source Copy Transfer. Setup a */
3127 + /* block transfer from 1 memory source */
3128 + /* location. */
3129 +#define CONTROL_M2M_DONEINTEN 0x00000004 /* Enables the DONE interrupt which */
3130 + /* indicates if the xfer completed */
3131 + /* successfully */
3132 +#define CONTROL_M2M_ENABLE 0x00000008 /* Enables the channel */
3133 +#define CONTROL_M2M_START 0x00000010 /* Initiates the xfer. 'software trigger' */
3134 +#define CONTROL_M2M_BWC_MASK 0x000001e0 /* Bandwidth control. Indicate number of */
3135 +#define CONTROL_M2M_BWC_SHIFT 5 /* bytes in a transfer. */
3136 +#define CONTROL_M2M_PW_MASK 0x00000600 /* Peripheral width. Used for xfers */
3137 +#define CONTROL_M2M_PW_SHIFT 9 /* between memory and external peripheral. */
3138 + /* 00: byte, 01: halfword, 10: word. */
3139 +#define CONTROL_M2M_DAH 0x00000800 /* Destination Address Hold */
3140 +#define CONTROL_M2M_SAH 0x00001000 /* Source Address Hold */
3141 +#define CONTROL_M2M_TM_MASK 0x00006000 /* Transfer Mode. 00: sw triggered, */
3142 +#define CONTROL_M2M_TM_SHIFT 13 /* 01: hw initiated M2P, 01: hw initiated P2M */
3143 +#define CONTROL_M2M_ETDP_MASK 0x00018000 /* End-of-Transfer/Terminal Count pin */
3144 +#define CONTROL_M2M_ETDP_SHIFT 15 /* direction and polarity. */
3145 +#define CONTROL_M2M_DACKP 0x00020000 /* DMA acknowledge pin polarity */
3146 +
3147 +#define CONTROL_M2M_DREQP_MASK 0x00180000 /* DMA request pin polarity. must be set */
3148 +#define CONTROL_M2M_DREQP_SHIFT 19 /* before enable bit. */
3149 +#define CONTROL_M2M_NFBINTEN 0x00200000 /* Enables generation of the NFB interrupt. */
3150 +#define CONTROL_M2M_RSS_MASK 0x00c00000 /* Request source selection: */
3151 +#define CONTROL_M2M_RSS_SHIFT 22 /* 000 - External DReq[0] */
3152 + /* 001 - External DReq[1] */
3153 + /* 01X - Internal SSPRx */
3154 + /* 10X - Internal SSPTx */
3155 + /* 11X - Internal IDE */
3156 +#define CONTROL_M2M_NO_HDSK 0x01000000 /* No handshake. When set the peripheral doesn't */
3157 + /* require the regular handshake protocal. Must */
3158 + /* be set for SSP and IDE operations, optional */
3159 + /* for external peripherals. */
3160 +#define CONTROL_M2M_PWSC_MASK 0xfe000000 /* Peripheral wait states count. Gives the latency */
3161 +#define CONTROL_M2M_PWSC_SHIFT 25 /* (in PCLK cycles) needed by the peripheral to */
3162 + /* deassert its' request once the M2M xfer w/ DMA */
3163 + /* is complete. */
3164 +
3165 +/*
3166 + * M2M INTERRUPT register bit defines
3167 + */
3168 +#define INTERRUPT_M2M_STALLINT 0x00000001 /* Stall interrupt indicates channel stalled. */
3169 +#define INTERRUPT_M2M_DONEINT 0x00000002 /* Transaction done. */
3170 +#define INTERRUPT_M2M_NFBINT 0x00000004 /* Next frame buffer interrupt indicates */
3171 + /* channel requires a new buffer */
3172 +
3173 +
3174 +
3175 +/*
3176 + * M2M STATUS register bit defines
3177 + */
3178 +#define STATUS_M2M_STALL 0x00000001 /* A '1' indicates channel is stalled */
3179 +#define STATUS_M2M_CURRENTSTATE_MASK 0x0000003e /* Indicates state of M2M Channel control */
3180 +#define STATUS_M2M_CURRENTSTATE_SHIFT 1 /* FSM (0-2): */
3181 + /* 000 - IDLE, 001 - STALL, 010 - MEM_RD, */
3182 + /* 011 - MEM_WR, 100 - BWC_WAIT */
3183 + /* and M2M buffer FSM (3-2): */
3184 + /* 00 - NO_BUF, 01 - BUF_ON, 10 - BUF_NEXT */
3185 +#define STATUS_M2M_DONE 0x00000040 /* Transfer completed successfully if 1. */
3186 +#define STATUS_M2M_TCS_MASK 0x00000180 /* Terminal Count status. Indicates whether or */
3187 +#define STATUS_M2M_TCS_SHIFT 7 /* or not the actual byte count reached */
3188 + /* programmed limit for buffer descriptor */
3189 +#define STATUS_M2M_EOTS_MASK 0x00000600 /* End-of-Transfer status for buffer */
3190 +#define STATUS_M2M_EOTS_SHIFT 9
3191 +#define STATUS_M2M_NFB 0x00000800 /* A '1' indicates channel has moved */
3192 + /* from NEXT state to ON state, but the next */
3193 + /* byte count reg for next buffer has not been */
3194 + /* programmed yet. */
3195 +#define STATUS_M2M_NB 0x00001000 /* NextBuffer status. Informs NFB service */
3196 + /* routine, after NFB int, which pair of buffer */
3197 + /* descriptor registers is free to update. */
3198 +#define STATUS_M2M_DREQS 0x00002000 /* DREQ status. Reflects the status of the */
3199 + /* synchronized external peripherals DMA */
3200 + /* request signal. */
3201 +
3202 +/*
3203 + * Register masks to mask off reserved bits after reading register.
3204 + */
3205 +#define M2M_MASK_BCR0 0x0000ffff
3206 +#define M2M_MASK_BCR1 0x0000ffff
3207 +#define M2M_MASK_SAR_BASE0 0xffffffff
3208 +#define M2M_MASK_SAR_BASE1 0xffffffff
3209 +#define M2M_MASK_SAR_CURRENT0 0xffffffff
3210 +#define M2M_MASK_SAR_CURRENT1 0xffffffff
3211 +#define M2M_MASK_DAR_BASE0 0xffffffff
3212 +#define M2M_MASK_DAR_BASE1 0xffffffff
3213 +#define M2M_MASK_DAR_CURRENT0 0xffffffff
3214 +#define M2M_MASK_DAR_CURRENT1 0xffffffff
3215 +
3216 +
3217 +//
3218 +/* 8000_0000 - 8000_ffff: DMA */
3219 +#define DMA_OFFSET 0x000000
3220 +#define DMA_BASE (EP93XX_DMA_BASE)
3221 +#define DMAMP_TX_0_CONTROL (DMA_BASE+0x0000)
3222 +#define DMAMP_TX_0_INTERRUPT (DMA_BASE+0x0004)
3223 +#define DMAMP_TX_0_PPALLOC (DMA_BASE+0x0008)
3224 +#define DMAMP_TX_0_STATUS (DMA_BASE+0x000C)
3225 +#define DMAMP_TX_0_REMAIN (DMA_BASE+0x0014)
3226 +#define DMAMP_TX_0_MAXCNT0 (DMA_BASE+0x0020)
3227 +#define DMAMP_TX_0_BASE0 (DMA_BASE+0x0024)
3228 +#define DMAMP_TX_0_CURRENT0 (DMA_BASE+0x0028)
3229 +#define DMAMP_TX_0_MAXCNT1 (DMA_BASE+0x0030)
3230 +#define DMAMP_TX_0_BASE1 (DMA_BASE+0x0034)
3231 +#define DMAMP_TX_0_CURRENT1 (DMA_BASE+0x0038)
3232 +
3233 +#define DMAMP_RX_1_CONTROL (DMA_BASE+0x0040)
3234 +#define DMAMP_RX_1_INTERRUPT (DMA_BASE+0x0044)
3235 +#define DMAMP_RX_1_PPALLOC (DMA_BASE+0x0048)
3236 +#define DMAMP_RX_1_STATUS (DMA_BASE+0x004C)
3237 +#define DMAMP_RX_1_REMAIN (DMA_BASE+0x0054)
3238 +#define DMAMP_RX_1_MAXCNT0 (DMA_BASE+0x0060)
3239 +#define DMAMP_RX_1_BASE0 (DMA_BASE+0x0064)
3240 +#define DMAMP_RX_1_CURRENT0 (DMA_BASE+0x0068)
3241 +#define DMAMP_RX_1_MAXCNT1 (DMA_BASE+0x0070)
3242 +#define DMAMP_RX_1_BASE1 (DMA_BASE+0x0074)
3243 +#define DMAMP_RX_1_CURRENT1 (DMA_BASE+0x0078)
3244 +
3245 +#define DMAMP_TX_2_CONTROL (DMA_BASE+0x0080)
3246 +#define DMAMP_TX_2_INTERRUPT (DMA_BASE+0x0084)
3247 +#define DMAMP_TX_2_PPALLOC (DMA_BASE+0x0088)
3248 +#define DMAMP_TX_2_STATUS (DMA_BASE+0x008C)
3249 +#define DMAMP_TX_2_REMAIN (DMA_BASE+0x0094)
3250 +#define DMAMP_TX_2_MAXCNT0 (DMA_BASE+0x00A0)
3251 +#define DMAMP_TX_2_BASE0 (DMA_BASE+0x00A4)
3252 +#define DMAMP_TX_2_CURRENT0 (DMA_BASE+0x00A8)
3253 +#define DMAMP_TX_2_MAXCNT1 (DMA_BASE+0x00B0)
3254 +#define DMAMP_TX_2_BASE1 (DMA_BASE+0x00B4)
3255 +#define DMAMP_TX_2_CURRENT1 (DMA_BASE+0x00B8)
3256 +
3257 +#define DMAMP_RX_3_CONTROL (DMA_BASE+0x00C0)
3258 +#define DMAMP_RX_3_INTERRUPT (DMA_BASE+0x00C4)
3259 +#define DMAMP_RX_3_PPALLOC (DMA_BASE+0x00C8)
3260 +#define DMAMP_RX_3_STATUS (DMA_BASE+0x00CC)
3261 +#define DMAMP_RX_3_REMAIN (DMA_BASE+0x00D4)
3262 +#define DMAMP_RX_3_MAXCNT0 (DMA_BASE+0x00E0)
3263 +#define DMAMP_RX_3_BASE0 (DMA_BASE+0x00E4)
3264 +#define DMAMP_RX_3_CURRENT0 (DMA_BASE+0x00E8)
3265 +#define DMAMP_RX_3_MAXCNT1 (DMA_BASE+0x00F0)
3266 +#define DMAMP_RX_3_BASE1 (DMA_BASE+0x00F4)
3267 +#define DMAMP_RX_3_CURRENT1 (DMA_BASE+0x00F8)
3268 +
3269 +#define DMAMM_0_CONTROL (DMA_BASE+0x0100)
3270 +#define DMAMM_0_INTERRUPT (DMA_BASE+0x0104)
3271 +#define DMAMM_0_STATUS (DMA_BASE+0x010C)
3272 +#define DMAMM_0_BCR0 (DMA_BASE+0x0110)
3273 +#define DMAMM_0_BCR1 (DMA_BASE+0x0114)
3274 +#define DMAMM_0_SAR_BASE0 (DMA_BASE+0x0118)
3275 +#define DMAMM_0_SAR_BASE1 (DMA_BASE+0x011C)
3276 +#define DMAMM_0_SAR_CURRENT0 (DMA_BASE+0x0124)
3277 +#define DMAMM_0_SAR_CURRENT1 (DMA_BASE+0x0128)
3278 +#define DMAMM_0_DAR_BASE0 (DMA_BASE+0x012C)
3279 +#define DMAMM_0_DAR_BASE1 (DMA_BASE+0x0130)
3280 +#define DMAMM_0_DAR_CURRENT0 (DMA_BASE+0x0134)
3281 +#define DMAMM_0_DAR_CURRENT1 (DMA_BASE+0x013C)
3282 +
3283 +#define DMAMM_1_CONTROL (DMA_BASE+0x0140)
3284 +#define DMAMM_1_INTERRUPT (DMA_BASE+0x0144)
3285 +#define DMAMM_1_STATUS (DMA_BASE+0x014C)
3286 +#define DMAMM_1_BCR0 (DMA_BASE+0x0150)
3287 +#define DMAMM_1_BCR1 (DMA_BASE+0x0154)
3288 +#define DMAMM_1_SAR_BASE0 (DMA_BASE+0x0158)
3289 +#define DMAMM_1_SAR_BASE1 (DMA_BASE+0x015C)
3290 +#define DMAMM_1_SAR_CURRENT0 (DMA_BASE+0x0164)
3291 +#define DMAMM_1_SAR_CURRENT1 (DMA_BASE+0x0168)
3292 +#define DMAMM_1_DAR_BASE0 (DMA_BASE+0x016C)
3293 +#define DMAMM_1_DAR_BASE1 (DMA_BASE+0x0170)
3294 +#define DMAMM_1_DAR_CURRENT0 (DMA_BASE+0x0174)
3295 +#define DMAMM_1_DAR_CURRENT1 (DMA_BASE+0x017C)
3296 +
3297 +#define DMAMP_RX_5_CONTROL (DMA_BASE+0x0200)
3298 +#define DMAMP_RX_5_INTERRUPT (DMA_BASE+0x0204)
3299 +#define DMAMP_RX_5_PPALLOC (DMA_BASE+0x0208)
3300 +#define DMAMP_RX_5_STATUS (DMA_BASE+0x020C)
3301 +#define DMAMP_RX_5_REMAIN (DMA_BASE+0x0214)
3302 +#define DMAMP_RX_5_MAXCNT0 (DMA_BASE+0x0220)
3303 +#define DMAMP_RX_5_BASE0 (DMA_BASE+0x0224)
3304 +#define DMAMP_RX_5_CURRENT0 (DMA_BASE+0x0228)
3305 +#define DMAMP_RX_5_MAXCNT1 (DMA_BASE+0x0230)
3306 +#define DMAMP_RX_5_BASE1 (DMA_BASE+0x0234)
3307 +#define DMAMP_RX_5_CURRENT1 (DMA_BASE+0x0238)
3308 +
3309 +#define DMAMP_TX_4_CONTROL (DMA_BASE+0x0240)
3310 +#define DMAMP_TX_4_INTERRUPT (DMA_BASE+0x0244)
3311 +#define DMAMP_TX_4_PPALLOC (DMA_BASE+0x0248)
3312 +#define DMAMP_TX_4_STATUS (DMA_BASE+0x024C)
3313 +#define DMAMP_TX_4_REMAIN (DMA_BASE+0x0254)
3314 +#define DMAMP_TX_4_MAXCNT0 (DMA_BASE+0x0260)
3315 +#define DMAMP_TX_4_BASE0 (DMA_BASE+0x0264)
3316 +#define DMAMP_TX_4_CURRENT0 (DMA_BASE+0x0268)
3317 +#define DMAMP_TX_4_MAXCNT1 (DMA_BASE+0x0270)
3318 +#define DMAMP_TX_4_BASE1 (DMA_BASE+0x0274)
3319 +#define DMAMP_TX_4_CURRENT1 (DMA_BASE+0x0278)
3320 +
3321 +#define DMAMP_RX_7_CONTROL (DMA_BASE+0x0280)
3322 +#define DMAMP_RX_7_INTERRUPT (DMA_BASE+0x0284)
3323 +#define DMAMP_RX_7_PPALLOC (DMA_BASE+0x0288)
3324 +#define DMAMP_RX_7_STATUS (DMA_BASE+0x028C)
3325 +#define DMAMP_RX_7_REMAIN (DMA_BASE+0x0294)
3326 +#define DMAMP_RX_7_MAXCNT0 (DMA_BASE+0x02A0)
3327 +#define DMAMP_RX_7_BASE0 (DMA_BASE+0x02A4)
3328 +#define DMAMP_RX_7_CURRENT0 (DMA_BASE+0x02A8)
3329 +#define DMAMP_RX_7_MAXCNT1 (DMA_BASE+0x02B0)
3330 +#define DMAMP_RX_7_BASE1 (DMA_BASE+0x02B4)
3331 +#define DMAMP_RX_7_CURRENT1 (DMA_BASE+0x02B8)
3332 +
3333 +#define DMAMP_TX_6_CONTROL (DMA_BASE+0x02C0)
3334 +#define DMAMP_TX_6_INTERRUPT (DMA_BASE+0x02C4)
3335 +#define DMAMP_TX_6_PPALLOC (DMA_BASE+0x02C8)
3336 +#define DMAMP_TX_6_STATUS (DMA_BASE+0x02CC)
3337 +#define DMAMP_TX_6_REMAIN (DMA_BASE+0x02D4)
3338 +#define DMAMP_TX_6_MAXCNT0 (DMA_BASE+0x02E0)
3339 +#define DMAMP_TX_6_BASE0 (DMA_BASE+0x02E4)
3340 +#define DMAMP_TX_6_CURRENT0 (DMA_BASE+0x02E8)
3341 +#define DMAMP_TX_6_MAXCNT1 (DMA_BASE+0x02F0)
3342 +#define DMAMP_TX_6_BASE1 (DMA_BASE+0x02F4)
3343 +#define DMAMP_TX_6_CURRENT1 (DMA_BASE+0x02F8)
3344 +
3345 +#define DMAMP_RX_9_CONTROL (DMA_BASE+0x0300)
3346 +#define DMAMP_RX_9_INTERRUPT (DMA_BASE+0x0304)
3347 +#define DMAMP_RX_9_PPALLOC (DMA_BASE+0x0308)
3348 +#define DMAMP_RX_9_STATUS (DMA_BASE+0x030C)
3349 +#define DMAMP_RX_9_REMAIN (DMA_BASE+0x0314)
3350 +#define DMAMP_RX_9_MAXCNT0 (DMA_BASE+0x0320)
3351 +#define DMAMP_RX_9_BASE0 (DMA_BASE+0x0324)
3352 +#define DMAMP_RX_9_CURRENT0 (DMA_BASE+0x0328)
3353 +#define DMAMP_RX_9_MAXCNT1 (DMA_BASE+0x0330)
3354 +#define DMAMP_RX_9_BASE1 (DMA_BASE+0x0334)
3355 +#define DMAMP_RX_9_CURRENT1 (DMA_BASE+0x0338)
3356 +
3357 +#define DMAMP_TX_8_CONTROL (DMA_BASE+0x0340)
3358 +#define DMAMP_TX_8_INTERRUPT (DMA_BASE+0x0344)
3359 +#define DMAMP_TX_8_PPALLOC (DMA_BASE+0x0348)
3360 +#define DMAMP_TX_8_STATUS (DMA_BASE+0x034C)
3361 +#define DMAMP_TX_8_REMAIN (DMA_BASE+0x0354)
3362 +#define DMAMP_TX_8_MAXCNT0 (DMA_BASE+0x0360)
3363 +#define DMAMP_TX_8_BASE0 (DMA_BASE+0x0364)
3364 +#define DMAMP_TX_8_CURRENT0 (DMA_BASE+0x0368)
3365 +#define DMAMP_TX_8_MAXCNT1 (DMA_BASE+0x0370)
3366 +#define DMAMP_TX_8_BASE1 (DMA_BASE+0x0374)
3367 +#define DMAMP_TX_8_CURRENT1 (DMA_BASE+0x0378)
3368 +
3369 +#define DMA_ARBITRATION (DMA_BASE+0x0380)
3370 +#define DMA_INTERRUPT (DMA_BASE+0x03C0)
3371 +
3372 +
3373 +/*
3374 + * DMA Register Base addresses and Offsets
3375 + */
3376 +#define DMA_M2P_TX_0_BASE DMAMP_TX_0_CONTROL
3377 +#define DMA_M2P_RX_1_BASE DMAMP_RX_1_CONTROL
3378 +#define DMA_M2P_TX_2_BASE DMAMP_TX_2_CONTROL
3379 +#define DMA_M2P_RX_3_BASE DMAMP_RX_3_CONTROL
3380 +#define DMA_M2M_0_BASE DMAMM_0_CONTROL
3381 +#define DMA_M2M_1_BASE DMAMM_1_CONTROL
3382 +#define DMA_M2P_RX_5_BASE DMAMP_RX_5_CONTROL
3383 +#define DMA_M2P_TX_4_BASE DMAMP_TX_4_CONTROL
3384 +#define DMA_M2P_RX_7_BASE DMAMP_RX_7_CONTROL
3385 +#define DMA_M2P_TX_6_BASE DMAMP_TX_6_CONTROL
3386 +#define DMA_M2P_RX_9_BASE DMAMP_RX_9_CONTROL
3387 +#define DMA_M2P_TX_8_BASE DMAMP_TX_8_CONTROL
3388 +
3389 +#define M2P_OFFSET_CONTROL 0x0000
3390 +#define M2P_OFFSET_INTERRUPT 0x0004
3391 +#define M2P_OFFSET_PPALLOC 0x0008
3392 +#define M2P_OFFSET_STATUS 0x000C
3393 +#define M2P_OFFSET_REMAIN 0x0014
3394 +#define M2P_OFFSET_MAXCNT0 0x0020
3395 +#define M2P_OFFSET_BASE0 0x0024
3396 +#define M2P_OFFSET_CURRENT0 0x0028
3397 +#define M2P_OFFSET_MAXCNT1 0x0030
3398 +#define M2P_OFFSET_BASE1 0x0034
3399 +#define M2P_OFFSET_CURRENT1 0x0038
3400 +
3401 +#define M2M_OFFSET_CONTROL 0x0000
3402 +#define M2M_OFFSET_INTERRUPT 0x0004
3403 +#define M2M_OFFSET_STATUS 0x000C
3404 +#define M2M_OFFSET_BCR0 0x0010
3405 +#define M2M_OFFSET_BCR1 0x0014
3406 +#define M2M_OFFSET_SAR_BASE0 0x0018
3407 +#define M2M_OFFSET_SAR_BASE1 0x001C
3408 +#define M2M_OFFSET_SAR_CURRENT0 0x0024
3409 +#define M2M_OFFSET_SAR_CURRENT1 0x0028
3410 +#define M2M_OFFSET_DAR_BASE0 0x002C
3411 +#define M2M_OFFSET_DAR_BASE1 0x0030
3412 +#define M2M_OFFSET_DAR_CURRENT0 0x0034
3413 +#define M2M_OFFSET_DAR_CURRENT1 0x003C
3414 +
3415 +
3416 +
3417 +//-----------------------------------------------------------------------------
3418 +// PWRCNT Register Defines
3419 +//-----------------------------------------------------------------------------
3420 +#define SYSCON_PWRCNT_FIREN 0x80000000
3421 +#define SYSCON_PWRCNT_UARTBAUD 0x20000000
3422 +#define SYSCON_PWRCNT_USHEN 0x10000000
3423 +#define SYSCON_PWRCNT_DMA_M2MCH1 0x08000000
3424 +#define SYSCON_PWRCNT_DMA_M2MCH0 0x04000000
3425 +#define SYSCON_PWRCNT_DMA_M2PCH8 0x02000000
3426 +#define SYSCON_PWRCNT_DMA_M2PCH9 0x01000000
3427 +#define SYSCON_PWRCNT_DMA_M2PCH6 0x00800000
3428 +#define SYSCON_PWRCNT_DMA_M2PCH7 0x00400000
3429 +#define SYSCON_PWRCNT_DMA_M2PCH4 0x00200000
3430 +#define SYSCON_PWRCNT_DMA_M2PCH5 0x00100000
3431 +#define SYSCON_PWRCNT_DMA_M2PCH2 0x00080000
3432 +#define SYSCON_PWRCNT_DMA_M2PCH3 0x00040000
3433 +#define SYSCON_PWRCNT_DMA_M2PCH0 0x00020000
3434 +#define SYSCON_PWRCNT_DMA_M2PCH1 0x00010000
3435 +
3436 +#ifndef __ASSEMBLY__
3437 +/*
3438 + * DMA Register Base addresses
3439 + */
3440 +static unsigned int const DMAM2PChannelBase[10] =
3441 +{
3442 + DMA_M2P_TX_0_BASE,
3443 + DMA_M2P_RX_1_BASE,
3444 + DMA_M2P_TX_2_BASE,
3445 + DMA_M2P_RX_3_BASE,
3446 + DMA_M2P_TX_4_BASE,
3447 + DMA_M2P_RX_5_BASE,
3448 + DMA_M2P_TX_6_BASE,
3449 + DMA_M2P_RX_7_BASE,
3450 + DMA_M2P_TX_8_BASE,
3451 + DMA_M2P_RX_9_BASE
3452 +};
3453 +
3454 +static unsigned int const DMAM2MChannelBase[2] =
3455 +{
3456 + DMA_M2M_0_BASE,
3457 + DMA_M2M_1_BASE
3458 +};
3459 +
3460 +#endif /* __ASSEMBLY__ */
3461 +
3462 +/*****************************************************************************
3463 + *
3464 + * DMA buffer structure type.
3465 + *
3466 + ****************************************************************************/
3467 +typedef struct ep93xx_dma_buffer_s
3468 +{
3469 + unsigned int source; /* buffer physical source address. */
3470 + unsigned int dest; /* buffer physical destination address, */
3471 + /* only used with the 2 M2M channels. */
3472 + unsigned int size; /* buffer size in bytes */
3473 + unsigned int last; /* 1 if this is the last buffer */
3474 + /* in this transaction. If 1, */
3475 + /* disable the NFBint so we aren't */
3476 + /* interrupted for another buffer */
3477 + /* when we know there won't be another. */
3478 + unsigned int used; /* This field is set to 1 by the DMA */
3479 + /* interface after the buffer is transferred*/
3480 + int buf_id; /* unique identifyer specified by the */
3481 + /* the driver which requested the dma */
3482 +} ep93xx_dma_buffer_t;
3483 +
3484 +typedef ep93xx_dma_buffer_t * ep93xx_dma_buffer_p;
3485 +
3486 +/*****************************************************************************
3487 + *
3488 + * Instance definition for the DMA interface.
3489 + *
3490 + ****************************************************************************/
3491 +typedef struct ep9312_dma_s
3492 +{
3493 + /*
3494 + * This 1 when the instance is in use, and 0 when it's not.
3495 + */
3496 + unsigned int ref_count;
3497 +
3498 + /*
3499 + * This is the last valid handle for this instance. When giving out a
3500 + * new handle this will be incremented and given out.
3501 + */
3502 + int last_valid_handle;
3503 +
3504 + /*
3505 + * device specifies one of the 20 DMA hardware ports this
3506 + * DMA channel will service.
3507 + */
3508 + ep93xx_dma_dev_t device;
3509 +
3510 + /*
3511 + * DMABufferQueue is the queue of buffer structure pointers which the
3512 + * dma channel will use to setup transfers.
3513 + */
3514 + ep93xx_dma_buffer_t buffer_queue[MAX_EP93XX_DMA_BUFFERS];
3515 +
3516 + /*
3517 + * currnt_buffer : This is the buffer currently being transfered on
3518 + * this channel.
3519 + * last_buffer : This is the last buffer for this transfer.
3520 + * Note: current_buffer + 1 is already programmed into the dma
3521 + * channel as the next buffer to transfer. Don't write
3522 + * over either entry.
3523 + */
3524 + int current_buffer;
3525 + int last_buffer;
3526 +
3527 + /*
3528 + * The following 3 fields are buffer counters.
3529 + *
3530 + * iNewBuffers: Buffers in the queue which have not been transfered.
3531 + * iUsedBuffers: Buffers in the queue which have have been tranferred,
3532 + * and are waiting to be returned.
3533 + * iTotalBuffers: Total number of buffers in the queue.
3534 + */
3535 + int new_buffers;
3536 + int used_buffers;
3537 + int total_buffers;
3538 +
3539 + /*
3540 + * uiTotalBytes has the total bytes transfered on the channel since the
3541 + * last flush. This value does not include the bytes tranfered in the
3542 + * current buffer. A byte count is only added after a complete buffer
3543 + * is tranfered.
3544 + */
3545 + unsigned int total_bytes;
3546 +
3547 + /*
3548 + * Interrupt number for this channel
3549 + */
3550 + unsigned int irq;
3551 +
3552 + /*
3553 + * Indicates whether or not the channel is currently enabled to transfer
3554 + * data.
3555 + */
3556 + unsigned int xfer_enable;
3557 +
3558 + /*
3559 + * pause indicates if the dma channel was paused by calling the pause
3560 + * ioctl.
3561 + */
3562 + unsigned int pause;
3563 +
3564 + /*
3565 + * buffer structure used during a pause to capture the current
3566 + * address and remaining bytes for the buffer actively being transferred
3567 + * on the channel. This buffer will be used to reprogram the dma
3568 + * channel upon a resume.
3569 + */
3570 + ep93xx_dma_buffer_t pause_buf;
3571 +
3572 + /*
3573 + * DMACallback is a function pointer which the calling application can
3574 + * use install a function to. this fuction can be used to notify the
3575 + * calling application of an interrupt.
3576 + */
3577 + dma_callback callback;
3578 +
3579 + /*
3580 + * User data used as a parameter for the Callback function. The user
3581 + * sets up the data and sends it with the callback function.
3582 + */
3583 + unsigned int user_data;
3584 +
3585 + /*
3586 + * A string representation of the device attached to the channel.
3587 + */
3588 + const char * device_id;
3589 +
3590 + /*
3591 + * The register base address for this dma channel.
3592 + */
3593 + unsigned int reg_base;
3594 +
3595 + /*
3596 + * terminated indicates
3597 + */
3598 + unsigned int terminated;
3599 +
3600 +
3601 +} ep93xx_dma_t;
3602 +
3603 +/*****************************************************************************
3604 + *
3605 + * DMA macros
3606 + *
3607 + ****************************************************************************/
3608 +#define DMA_HANDLE_SPECIFIER_MASK 0xF0000000
3609 +#define DMA_CH0_HANDLE_SPECIFIER 0x00000000
3610 +#define DMA_CH1_HANDLE_SPECIFIER 0x10000000
3611 +#define DMA_CH2_HANDLE_SPECIFIER 0x20000000
3612 +#define DMA_CH3_HANDLE_SPECIFIER 0x30000000
3613 +#define DMA_CH4_HANDLE_SPECIFIER 0x40000000
3614 +#define DMA_CH5_HANDLE_SPECIFIER 0x50000000
3615 +#define DMA_CH6_HANDLE_SPECIFIER 0x60000000
3616 +#define DMA_CH7_HANDLE_SPECIFIER 0x70000000
3617 +#define DMA_CH8_HANDLE_SPECIFIER 0x80000000
3618 +#define DMA_CH9_HANDLE_SPECIFIER 0x90000000
3619 +#define DMA_CH10_HANDLE_SPECIFIER 0xA0000000
3620 +#define DMA_CH11_HANDLE_SPECIFIER 0xB0000000
3621 +
3622 +#endif // _DMADRV_H_
This page took 0.196805 seconds and 5 git commands to generate.