1 #include <linux/module.h>
2 #include <linux/init.h>
3 #include <linux/sched.h>
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/timer.h>
9 #include <linux/errno.h>
10 #include <linux/stat.h>
12 #include <linux/tty.h>
13 #include <linux/selection.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/uaccess.h>
19 #include <linux/errno.h>
22 #include <asm/ifxmips/ifxmips.h>
23 #include <asm/ifxmips/ifxmips_irq.h>
24 #include <asm/ifxmips/ifxmips_dma.h>
25 #include <asm/ifxmips/ifxmips_pmu.h>
27 /*25 descriptors for each dma channel,4096/8/20=25.xx*/
28 #define IFXMIPS_DMA_DESCRIPTOR_OFFSET 25
30 #define MAX_DMA_DEVICE_NUM 6 /*max ports connecting to dma */
31 #define MAX_DMA_CHANNEL_NUM 20 /*max dma channels */
32 #define DMA_INT_BUDGET 100 /*budget for interrupt handling */
33 #define DMA_POLL_COUNTER 4 /*fix me, set the correct counter value here! */
35 extern void ifxmips_mask_and_ack_irq(unsigned int irq_nr
);
36 extern void ifxmips_enable_irq(unsigned int irq_nr
);
37 extern void ifxmips_disable_irq(unsigned int irq_nr
);
40 struct dma_device_info dma_devs
[MAX_DMA_DEVICE_NUM
];
41 struct dma_channel_info dma_chan
[MAX_DMA_CHANNEL_NUM
];
43 static const char *global_device_name
[MAX_DMA_DEVICE_NUM
] =
44 { "PPE", "DEU", "SPI", "SDIO", "MCTRL0", "MCTRL1" };
46 struct dma_chan_map default_dma_map
[MAX_DMA_CHANNEL_NUM
] = {
47 {"PPE", IFXMIPS_DMA_RX
, 0, IFXMIPS_DMA_CH0_INT
, 0},
48 {"PPE", IFXMIPS_DMA_TX
, 0, IFXMIPS_DMA_CH1_INT
, 0},
49 {"PPE", IFXMIPS_DMA_RX
, 1, IFXMIPS_DMA_CH2_INT
, 1},
50 {"PPE", IFXMIPS_DMA_TX
, 1, IFXMIPS_DMA_CH3_INT
, 1},
51 {"PPE", IFXMIPS_DMA_RX
, 2, IFXMIPS_DMA_CH4_INT
, 2},
52 {"PPE", IFXMIPS_DMA_TX
, 2, IFXMIPS_DMA_CH5_INT
, 2},
53 {"PPE", IFXMIPS_DMA_RX
, 3, IFXMIPS_DMA_CH6_INT
, 3},
54 {"PPE", IFXMIPS_DMA_TX
, 3, IFXMIPS_DMA_CH7_INT
, 3},
55 {"DEU", IFXMIPS_DMA_RX
, 0, IFXMIPS_DMA_CH8_INT
, 0},
56 {"DEU", IFXMIPS_DMA_TX
, 0, IFXMIPS_DMA_CH9_INT
, 0},
57 {"DEU", IFXMIPS_DMA_RX
, 1, IFXMIPS_DMA_CH10_INT
, 1},
58 {"DEU", IFXMIPS_DMA_TX
, 1, IFXMIPS_DMA_CH11_INT
, 1},
59 {"SPI", IFXMIPS_DMA_RX
, 0, IFXMIPS_DMA_CH12_INT
, 0},
60 {"SPI", IFXMIPS_DMA_TX
, 0, IFXMIPS_DMA_CH13_INT
, 0},
61 {"SDIO", IFXMIPS_DMA_RX
, 0, IFXMIPS_DMA_CH14_INT
, 0},
62 {"SDIO", IFXMIPS_DMA_TX
, 0, IFXMIPS_DMA_CH15_INT
, 0},
63 {"MCTRL0", IFXMIPS_DMA_RX
, 0, IFXMIPS_DMA_CH16_INT
, 0},
64 {"MCTRL0", IFXMIPS_DMA_TX
, 0, IFXMIPS_DMA_CH17_INT
, 0},
65 {"MCTRL1", IFXMIPS_DMA_RX
, 1, IFXMIPS_DMA_CH18_INT
, 1},
66 {"MCTRL1", IFXMIPS_DMA_TX
, 1, IFXMIPS_DMA_CH19_INT
, 1}
69 struct dma_chan_map
*chan_map
= default_dma_map
;
70 volatile u32 g_ifxmips_dma_int_status
;
71 volatile int g_ifxmips_dma_in_process
; /* 0=not in process, 1=in process */
73 void do_dma_tasklet(unsigned long);
74 DECLARE_TASKLET(dma_tasklet
, do_dma_tasklet
, 0);
76 u8
*common_buffer_alloc(int len
, int *byte_offset
, void **opt
)
78 u8
*buffer
= kmalloc(len
* sizeof(u8
), GFP_KERNEL
);
85 void common_buffer_free(u8
*dataptr
, void *opt
)
90 void enable_ch_irq(struct dma_channel_info
*pCh
)
92 int chan_no
= (int)(pCh
- dma_chan
);
96 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
97 ifxmips_w32(0x4a, IFXMIPS_DMA_CIE
);
98 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN
) | (1 << chan_no
), IFXMIPS_DMA_IRNEN
);
99 local_irq_restore(flag
);
100 ifxmips_enable_irq(pCh
->irq
);
103 void disable_ch_irq(struct dma_channel_info
*pCh
)
106 int chan_no
= (int) (pCh
- dma_chan
);
108 local_irq_save(flag
);
109 g_ifxmips_dma_int_status
&= ~(1 << chan_no
);
110 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
111 ifxmips_w32(0, IFXMIPS_DMA_CIE
);
112 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN
) & ~(1 << chan_no
), IFXMIPS_DMA_IRNEN
);
113 local_irq_restore(flag
);
114 ifxmips_mask_and_ack_irq(pCh
->irq
);
117 void open_chan(struct dma_channel_info
*pCh
)
120 int chan_no
= (int)(pCh
- dma_chan
);
122 local_irq_save(flag
);
123 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
124 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) | 1, IFXMIPS_DMA_CCTRL
);
125 if (pCh
->dir
== IFXMIPS_DMA_RX
)
127 local_irq_restore(flag
);
130 void close_chan(struct dma_channel_info
*pCh
)
133 int chan_no
= (int) (pCh
- dma_chan
);
135 local_irq_save(flag
);
136 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
137 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) & ~1, IFXMIPS_DMA_CCTRL
);
139 local_irq_restore(flag
);
142 void reset_chan(struct dma_channel_info
*pCh
)
144 int chan_no
= (int) (pCh
- dma_chan
);
146 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
147 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) | 2, IFXMIPS_DMA_CCTRL
);
150 void rx_chan_intr_handler(int chan_no
)
152 struct dma_device_info
*pDev
= (struct dma_device_info
*)dma_chan
[chan_no
].dma_dev
;
153 struct dma_channel_info
*pCh
= &dma_chan
[chan_no
];
154 struct rx_desc
*rx_desc_p
;
158 /*handle command complete interrupt */
159 rx_desc_p
= (struct rx_desc
*)pCh
->desc_base
+ pCh
->curr_desc
;
160 if (rx_desc_p
->status
.field
.OWN
== CPU_OWN
161 && rx_desc_p
->status
.field
.C
162 && rx_desc_p
->status
.field
.data_length
< 1536){
163 /* Every thing is correct, then we inform the upper layer */
164 pDev
->current_rx_chan
= pCh
->rel_chan_no
;
165 if (pDev
->intr_handler
)
166 pDev
->intr_handler(pDev
, RCV_INT
);
169 local_irq_save(flag
);
170 tmp
= ifxmips_r32(IFXMIPS_DMA_CS
);
171 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
172 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CIS
) | 0x7e, IFXMIPS_DMA_CIS
);
173 ifxmips_w32(tmp
, IFXMIPS_DMA_CS
);
174 g_ifxmips_dma_int_status
&= ~(1 << chan_no
);
175 local_irq_restore(flag
);
176 ifxmips_enable_irq(dma_chan
[chan_no
].irq
);
180 inline void tx_chan_intr_handler(int chan_no
)
182 struct dma_device_info
*pDev
= (struct dma_device_info
*)dma_chan
[chan_no
].dma_dev
;
183 struct dma_channel_info
*pCh
= &dma_chan
[chan_no
];
187 local_irq_save(flag
);
188 tmp
= ifxmips_r32(IFXMIPS_DMA_CS
);
189 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
190 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CIS
) | 0x7e, IFXMIPS_DMA_CIS
);
191 ifxmips_w32(tmp
, IFXMIPS_DMA_CS
);
192 g_ifxmips_dma_int_status
&= ~(1 << chan_no
);
193 local_irq_restore(flag
);
194 pDev
->current_tx_chan
= pCh
->rel_chan_no
;
195 if (pDev
->intr_handler
)
196 pDev
->intr_handler(pDev
, TRANSMIT_CPT_INT
);
199 void do_dma_tasklet(unsigned long unused
)
203 int budget
= DMA_INT_BUDGET
;
207 while (g_ifxmips_dma_int_status
) {
209 tasklet_schedule(&dma_tasklet
);
214 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++) {
215 if ((g_ifxmips_dma_int_status
& (1 << i
)) && dma_chan
[i
].weight
> 0) {
216 if (dma_chan
[i
].weight
> weight
) {
218 weight
= dma_chan
[chan_no
].weight
;
224 if (chan_map
[chan_no
].dir
== IFXMIPS_DMA_RX
)
225 rx_chan_intr_handler(chan_no
);
227 tx_chan_intr_handler(chan_no
);
229 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++)
230 dma_chan
[i
].weight
= dma_chan
[i
].default_weight
;
234 local_irq_save(flag
);
235 g_ifxmips_dma_in_process
= 0;
236 if (g_ifxmips_dma_int_status
) {
237 g_ifxmips_dma_in_process
= 1;
238 tasklet_schedule(&dma_tasklet
);
240 local_irq_restore(flag
);
243 irqreturn_t
dma_interrupt(int irq
, void *dev_id
)
245 struct dma_channel_info
*pCh
;
249 pCh
= (struct dma_channel_info
*)dev_id
;
250 chan_no
= (int)(pCh
- dma_chan
);
251 if (chan_no
< 0 || chan_no
> 19)
254 tmp
= ifxmips_r32(IFXMIPS_DMA_IRNEN
);
255 ifxmips_w32(0, IFXMIPS_DMA_IRNEN
);
256 g_ifxmips_dma_int_status
|= 1 << chan_no
;
257 ifxmips_w32(tmp
, IFXMIPS_DMA_IRNEN
);
258 ifxmips_mask_and_ack_irq(irq
);
260 if (!g_ifxmips_dma_in_process
) {
261 g_ifxmips_dma_in_process
= 1;
262 tasklet_schedule(&dma_tasklet
);
268 struct dma_device_info
*dma_device_reserve(char *dev_name
)
272 for (i
= 0; i
< MAX_DMA_DEVICE_NUM
; i
++) {
273 if (strcmp(dev_name
, dma_devs
[i
].device_name
) == 0) {
274 if (dma_devs
[i
].reserved
)
276 dma_devs
[i
].reserved
= 1;
283 EXPORT_SYMBOL(dma_device_reserve
);
285 void dma_device_release(struct dma_device_info
*dev
)
289 EXPORT_SYMBOL(dma_device_release
);
291 void dma_device_register(struct dma_device_info
*dev
)
298 struct dma_device_info
*pDev
;
299 struct dma_channel_info
*pCh
;
300 struct rx_desc
*rx_desc_p
;
301 struct tx_desc
*tx_desc_p
;
303 for (i
= 0; i
< dev
->max_tx_chan_num
; i
++) {
304 pCh
= dev
->tx_chan
[i
];
305 if (pCh
->control
== IFXMIPS_DMA_CH_ON
) {
306 chan_no
= (int)(pCh
- dma_chan
);
307 for (j
= 0; j
< pCh
->desc_len
; j
++) {
308 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ j
;
309 memset(tx_desc_p
, 0, sizeof(struct tx_desc
));
311 local_irq_save(flag
);
312 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
313 /* check if the descriptor length is changed */
314 if (ifxmips_r32(IFXMIPS_DMA_CDLEN
) != pCh
->desc_len
)
315 ifxmips_w32(pCh
->desc_len
, IFXMIPS_DMA_CDLEN
);
317 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) & ~1, IFXMIPS_DMA_CCTRL
);
318 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) | 2, IFXMIPS_DMA_CCTRL
);
319 while (ifxmips_r32(IFXMIPS_DMA_CCTRL
) & 2)
321 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN
) | (1 << chan_no
), IFXMIPS_DMA_IRNEN
);
322 ifxmips_w32(0x30100, IFXMIPS_DMA_CCTRL
); /* reset and enable channel,enable channel later */
323 local_irq_restore(flag
);
327 for (i
= 0; i
< dev
->max_rx_chan_num
; i
++) {
328 pCh
= dev
->rx_chan
[i
];
329 if (pCh
->control
== IFXMIPS_DMA_CH_ON
) {
330 chan_no
= (int)(pCh
- dma_chan
);
332 for (j
= 0; j
< pCh
->desc_len
; j
++) {
333 rx_desc_p
= (struct rx_desc
*)pCh
->desc_base
+ j
;
334 pDev
= (struct dma_device_info
*)(pCh
->dma_dev
);
335 buffer
= pDev
->buffer_alloc(pCh
->packet_size
, &byte_offset
, (void *)&(pCh
->opt
[j
]));
339 dma_cache_inv((unsigned long) buffer
, pCh
->packet_size
);
341 rx_desc_p
->Data_Pointer
= (u32
)CPHYSADDR((u32
)buffer
);
342 rx_desc_p
->status
.word
= 0;
343 rx_desc_p
->status
.field
.byte_offset
= byte_offset
;
344 rx_desc_p
->status
.field
.OWN
= DMA_OWN
;
345 rx_desc_p
->status
.field
.data_length
= pCh
->packet_size
;
348 local_irq_save(flag
);
349 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
350 /* check if the descriptor length is changed */
351 if (ifxmips_r32(IFXMIPS_DMA_CDLEN
) != pCh
->desc_len
)
352 ifxmips_w32(pCh
->desc_len
, IFXMIPS_DMA_CDLEN
);
353 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) & ~1, IFXMIPS_DMA_CCTRL
);
354 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) | 2, IFXMIPS_DMA_CCTRL
);
355 while (ifxmips_r32(IFXMIPS_DMA_CCTRL
) & 2)
357 ifxmips_w32(0x0a, IFXMIPS_DMA_CIE
); /* fix me, should enable all the interrupts here? */
358 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN
) | (1 << chan_no
), IFXMIPS_DMA_IRNEN
);
359 ifxmips_w32(0x30000, IFXMIPS_DMA_CCTRL
);
360 local_irq_restore(flag
);
361 ifxmips_enable_irq(dma_chan
[chan_no
].irq
);
365 EXPORT_SYMBOL(dma_device_register
);
367 void dma_device_unregister(struct dma_device_info
*dev
)
371 struct dma_channel_info
*pCh
;
372 struct rx_desc
*rx_desc_p
;
373 struct tx_desc
*tx_desc_p
;
376 for (i
= 0; i
< dev
->max_tx_chan_num
; i
++) {
377 pCh
= dev
->tx_chan
[i
];
378 if (pCh
->control
== IFXMIPS_DMA_CH_ON
) {
379 chan_no
= (int)(dev
->tx_chan
[i
] - dma_chan
);
380 local_irq_save(flag
);
381 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
384 pCh
->control
= IFXMIPS_DMA_CH_OFF
;
385 ifxmips_w32(0, IFXMIPS_DMA_CIE
); /* fix me, should disable all the interrupts here? */
386 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN
) & ~(1 << chan_no
), IFXMIPS_DMA_IRNEN
); /* disable interrupts */
387 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) & ~1, IFXMIPS_DMA_CCTRL
);
388 while (ifxmips_r32(IFXMIPS_DMA_CCTRL
) & 1)
390 local_irq_restore(flag
);
392 for (j
= 0; j
< pCh
->desc_len
; j
++) {
393 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ j
;
394 if ((tx_desc_p
->status
.field
.OWN
== CPU_OWN
&& tx_desc_p
->status
.field
.C
)
395 || (tx_desc_p
->status
.field
.OWN
== DMA_OWN
&& tx_desc_p
->status
.field
.data_length
> 0)) {
396 dev
->buffer_free((u8
*) __va(tx_desc_p
->Data_Pointer
), (void *)pCh
->opt
[j
]);
398 tx_desc_p
->status
.field
.OWN
= CPU_OWN
;
399 memset(tx_desc_p
, 0, sizeof(struct tx_desc
));
401 /* TODO should free buffer that is not transferred by dma */
405 for (i
= 0; i
< dev
->max_rx_chan_num
; i
++) {
406 pCh
= dev
->rx_chan
[i
];
407 chan_no
= (int)(dev
->rx_chan
[i
] - dma_chan
);
408 ifxmips_disable_irq(pCh
->irq
);
410 local_irq_save(flag
);
411 g_ifxmips_dma_int_status
&= ~(1 << chan_no
);
414 pCh
->control
= IFXMIPS_DMA_CH_OFF
;
416 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
417 ifxmips_w32(0, IFXMIPS_DMA_CIE
); /* fix me, should disable all the interrupts here? */
418 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN
) & ~(1 << chan_no
), IFXMIPS_DMA_IRNEN
); /* disable interrupts */
419 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) & ~1, IFXMIPS_DMA_CCTRL
);
420 while (ifxmips_r32(IFXMIPS_DMA_CCTRL
) & 1)
423 local_irq_restore(flag
);
424 for (j
= 0; j
< pCh
->desc_len
; j
++) {
425 rx_desc_p
= (struct rx_desc
*) pCh
->desc_base
+ j
;
426 if ((rx_desc_p
->status
.field
.OWN
== CPU_OWN
427 && rx_desc_p
->status
.field
.C
)
428 || (rx_desc_p
->status
.field
.OWN
== DMA_OWN
429 && rx_desc_p
->status
.field
.data_length
> 0)) {
430 dev
->buffer_free((u8
*)
431 __va(rx_desc_p
->Data_Pointer
),
432 (void *) pCh
->opt
[j
]);
437 EXPORT_SYMBOL(dma_device_unregister
);
439 int dma_device_read(struct dma_device_info
*dma_dev
, u8
**dataptr
, void **opt
)
445 struct dma_channel_info
*pCh
= dma_dev
->rx_chan
[dma_dev
->current_rx_chan
];
446 struct rx_desc
*rx_desc_p
;
448 /* get the rx data first */
449 rx_desc_p
= (struct rx_desc
*) pCh
->desc_base
+ pCh
->curr_desc
;
450 if (!(rx_desc_p
->status
.field
.OWN
== CPU_OWN
&& rx_desc_p
->status
.field
.C
))
453 buf
= (u8
*) __va(rx_desc_p
->Data_Pointer
);
454 *(u32
*)dataptr
= (u32
)buf
;
455 len
= rx_desc_p
->status
.field
.data_length
;
458 *(int *)opt
= (int)pCh
->opt
[pCh
->curr_desc
];
460 /* replace with a new allocated buffer */
461 buf
= dma_dev
->buffer_alloc(pCh
->packet_size
, &byte_offset
, &p
);
464 dma_cache_inv((unsigned long) buf
, pCh
->packet_size
);
465 pCh
->opt
[pCh
->curr_desc
] = p
;
468 rx_desc_p
->Data_Pointer
= (u32
) CPHYSADDR((u32
) buf
);
469 rx_desc_p
->status
.word
= (DMA_OWN
<< 31) | ((byte_offset
) << 23) | pCh
->packet_size
;
472 *(u32
*) dataptr
= 0;
478 /* increase the curr_desc pointer */
480 if (pCh
->curr_desc
== pCh
->desc_len
)
485 EXPORT_SYMBOL(dma_device_read
);
487 int dma_device_write(struct dma_device_info
*dma_dev
, u8
*dataptr
, int len
, void *opt
)
490 u32 tmp
, byte_offset
;
491 struct dma_channel_info
*pCh
;
493 struct tx_desc
*tx_desc_p
;
494 local_irq_save(flag
);
496 pCh
= dma_dev
->tx_chan
[dma_dev
->current_tx_chan
];
497 chan_no
= (int)(pCh
- (struct dma_channel_info
*) dma_chan
);
499 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ pCh
->prev_desc
;
500 while (tx_desc_p
->status
.field
.OWN
== CPU_OWN
&& tx_desc_p
->status
.field
.C
) {
501 dma_dev
->buffer_free((u8
*) __va(tx_desc_p
->Data_Pointer
), pCh
->opt
[pCh
->prev_desc
]);
502 memset(tx_desc_p
, 0, sizeof(struct tx_desc
));
503 pCh
->prev_desc
= (pCh
->prev_desc
+ 1) % (pCh
->desc_len
);
504 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ pCh
->prev_desc
;
506 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ pCh
->curr_desc
;
507 /* Check whether this descriptor is available */
508 if (tx_desc_p
->status
.field
.OWN
== DMA_OWN
|| tx_desc_p
->status
.field
.C
) {
509 /* if not, the tell the upper layer device */
510 dma_dev
->intr_handler (dma_dev
, TX_BUF_FULL_INT
);
511 local_irq_restore(flag
);
512 printk(KERN_INFO
"%s %d: failed to write!\n", __func__
, __LINE__
);
516 pCh
->opt
[pCh
->curr_desc
] = opt
;
517 /* byte offset----to adjust the starting address of the data buffer, should be multiple of the burst length. */
518 byte_offset
= ((u32
) CPHYSADDR((u32
) dataptr
)) % ((dma_dev
->tx_burst_len
) * 4);
519 dma_cache_wback((unsigned long) dataptr
, len
);
521 tx_desc_p
->Data_Pointer
= (u32
) CPHYSADDR((u32
) dataptr
) - byte_offset
;
523 tx_desc_p
->status
.word
= (DMA_OWN
<< 31) | DMA_DESC_SOP_SET
| DMA_DESC_EOP_SET
| ((byte_offset
) << 23) | len
;
527 if (pCh
->curr_desc
== pCh
->desc_len
)
530 /*Check whether this descriptor is available */
531 tx_desc_p
= (struct tx_desc
*) pCh
->desc_base
+ pCh
->curr_desc
;
532 if (tx_desc_p
->status
.field
.OWN
== DMA_OWN
) {
533 /*if not , the tell the upper layer device */
534 dma_dev
->intr_handler (dma_dev
, TX_BUF_FULL_INT
);
537 ifxmips_w32(chan_no
, IFXMIPS_DMA_CS
);
538 tmp
= ifxmips_r32(IFXMIPS_DMA_CCTRL
);
543 local_irq_restore(flag
);
547 EXPORT_SYMBOL(dma_device_write
);
549 int map_dma_chan(struct dma_chan_map
*map
)
554 for (i
= 0; i
< MAX_DMA_DEVICE_NUM
; i
++)
555 dma_devs
[i
].device_name
= &global_device_name
[i
];
557 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++) {
558 dma_chan
[i
].irq
= map
[i
].irq
;
559 result
= request_irq(dma_chan
[i
].irq
, dma_interrupt
, IRQF_DISABLED
, map
[i
].dev_name
, (void *)&dma_chan
[i
]);
561 printk(KERN_WARNING
"error, cannot get dma_irq!\n");
562 free_irq(dma_chan
[i
].irq
, (void *) &dma_interrupt
);
568 for (i
= 0; i
< MAX_DMA_DEVICE_NUM
; i
++) {
569 dma_devs
[i
].num_tx_chan
= 0; /*set default tx channel number to be one */
570 dma_devs
[i
].num_rx_chan
= 0; /*set default rx channel number to be one */
571 dma_devs
[i
].max_rx_chan_num
= 0;
572 dma_devs
[i
].max_tx_chan_num
= 0;
573 dma_devs
[i
].buffer_alloc
= &common_buffer_alloc
;
574 dma_devs
[i
].buffer_free
= &common_buffer_free
;
575 dma_devs
[i
].intr_handler
= NULL
;
576 dma_devs
[i
].tx_burst_len
= 4;
577 dma_devs
[i
].rx_burst_len
= 4;
579 ifxmips_w32(0, IFXMIPS_DMA_PS
);
580 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_PCTRL
) | ((0xf << 8) | (1 << 6)), IFXMIPS_DMA_PCTRL
); /*enable dma drop */
584 ifxmips_w32(1, IFXMIPS_DMA_PS
);
585 ifxmips_w32(0x14, IFXMIPS_DMA_PCTRL
); /*deu port setting */
588 for (j
= 0; j
< MAX_DMA_CHANNEL_NUM
; j
++) {
589 dma_chan
[j
].byte_offset
= 0;
590 dma_chan
[j
].open
= &open_chan
;
591 dma_chan
[j
].close
= &close_chan
;
592 dma_chan
[j
].reset
= &reset_chan
;
593 dma_chan
[j
].enable_irq
= &enable_ch_irq
;
594 dma_chan
[j
].disable_irq
= &disable_ch_irq
;
595 dma_chan
[j
].rel_chan_no
= map
[j
].rel_chan_no
;
596 dma_chan
[j
].control
= IFXMIPS_DMA_CH_OFF
;
597 dma_chan
[j
].default_weight
= IFXMIPS_DMA_CH_DEFAULT_WEIGHT
;
598 dma_chan
[j
].weight
= dma_chan
[j
].default_weight
;
599 dma_chan
[j
].curr_desc
= 0;
600 dma_chan
[j
].prev_desc
= 0;
603 for (j
= 0; j
< MAX_DMA_CHANNEL_NUM
; j
++) {
604 if (strcmp(dma_devs
[i
].device_name
, map
[j
].dev_name
) == 0) {
605 if (map
[j
].dir
== IFXMIPS_DMA_RX
) {
606 dma_chan
[j
].dir
= IFXMIPS_DMA_RX
;
607 dma_devs
[i
].max_rx_chan_num
++;
608 dma_devs
[i
].rx_chan
[dma_devs
[i
].max_rx_chan_num
- 1] = &dma_chan
[j
];
609 dma_devs
[i
].rx_chan
[dma_devs
[i
].max_rx_chan_num
- 1]->pri
= map
[j
].pri
;
610 dma_chan
[j
].dma_dev
= (void *)&dma_devs
[i
];
611 } else if (map
[j
].dir
== IFXMIPS_DMA_TX
) {
613 dma_chan
[j
].dir
= IFXMIPS_DMA_TX
;
614 dma_devs
[i
].max_tx_chan_num
++;
615 dma_devs
[i
].tx_chan
[dma_devs
[i
].max_tx_chan_num
- 1] = &dma_chan
[j
];
616 dma_devs
[i
].tx_chan
[dma_devs
[i
].max_tx_chan_num
- 1]->pri
= map
[j
].pri
;
617 dma_chan
[j
].dma_dev
= (void *)&dma_devs
[i
];
619 printk(KERN_WARNING
"WRONG DMA MAP!\n");
628 void dma_chip_init(void)
632 /* enable DMA from PMU */
633 ifxmips_pmu_enable(IFXMIPS_PMU_PWDCR_DMA
);
636 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CTRL
) | 1, IFXMIPS_DMA_CTRL
);
638 /* disable all interrupts */
639 ifxmips_w32(0, IFXMIPS_DMA_IRNEN
);
641 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++) {
642 ifxmips_w32(i
, IFXMIPS_DMA_CS
);
643 ifxmips_w32(0x2, IFXMIPS_DMA_CCTRL
);
644 ifxmips_w32(0x80000040, IFXMIPS_DMA_CPOLL
);
645 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL
) & ~0x1, IFXMIPS_DMA_CCTRL
);
649 int ifxmips_dma_init(void)
654 if (map_dma_chan(default_dma_map
))
657 g_desc_list
= (u64
*)KSEG1ADDR(__get_free_page(GFP_DMA
));
659 if (g_desc_list
== NULL
) {
660 printk(KERN_WARNING
"no memory for desriptor\n");
664 memset(g_desc_list
, 0, PAGE_SIZE
);
666 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++) {
667 dma_chan
[i
].desc_base
= (u32
)g_desc_list
+ i
* IFXMIPS_DMA_DESCRIPTOR_OFFSET
* 8;
668 dma_chan
[i
].curr_desc
= 0;
669 dma_chan
[i
].desc_len
= IFXMIPS_DMA_DESCRIPTOR_OFFSET
;
671 ifxmips_w32(i
, IFXMIPS_DMA_CS
);
672 ifxmips_w32((u32
)CPHYSADDR(dma_chan
[i
].desc_base
), IFXMIPS_DMA_CDBA
);
673 ifxmips_w32(dma_chan
[i
].desc_len
, IFXMIPS_DMA_CDLEN
);
679 arch_initcall(ifxmips_dma_init
);
681 void dma_cleanup(void)
685 free_page(KSEG0ADDR((unsigned long) g_desc_list
));
686 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++)
687 free_irq(dma_chan
[i
].irq
, (void *)&dma_interrupt
);
690 MODULE_LICENSE("GPL");