initial merge of danube, pci is still broken and the new dma code still needs to...
[openwrt.git] / target / linux / danube / files / arch / mips / danube / dma-core.c
1 #include <linux/module.h>
2 #include <linux/init.h>
3 #include <linux/sched.h>
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/timer.h>
8 #include <linux/fs.h>
9 #include <linux/errno.h>
10 #include <linux/stat.h>
11 #include <linux/mm.h>
12 #include <linux/tty.h>
13 #include <linux/selection.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <asm/uaccess.h>
19 #include <linux/errno.h>
20 #include <asm/io.h>
21
22 #include <asm/danube/danube.h>
23 #include <asm/danube/danube_irq.h>
24 #include <asm/danube/danube_dma.h>
25
26 /*25 descriptors for each dma channel,4096/8/20=25.xx*/
27 #define DANUBE_DMA_DESCRIPTOR_OFFSET 25
28
29 #define MAX_DMA_DEVICE_NUM 6 /*max ports connecting to dma */
30 #define MAX_DMA_CHANNEL_NUM 20 /*max dma channels */
31 #define DMA_INT_BUDGET 100 /*budget for interrupt handling */
32 #define DMA_POLL_COUNTER 4 /*fix me, set the correct counter value here! */
33
34 extern void mask_and_ack_danube_irq (unsigned int irq_nr);
35 extern void enable_danube_irq (unsigned int irq_nr);
36 extern void disable_danube_irq (unsigned int irq_nr);
37
38 u64 *g_desc_list;
39 _dma_device_info dma_devs[MAX_DMA_DEVICE_NUM];
40 _dma_channel_info dma_chan[MAX_DMA_CHANNEL_NUM];
41
42 char global_device_name[MAX_DMA_DEVICE_NUM][20] =
43 { {"PPE"}, {"DEU"}, {"SPI"}, {"SDIO"}, {"MCTRL0"}, {"MCTRL1"} };
44
45 _dma_chan_map default_dma_map[MAX_DMA_CHANNEL_NUM] = {
46 {"PPE", DANUBE_DMA_RX, 0, DANUBE_DMA_CH0_INT, 0},
47 {"PPE", DANUBE_DMA_TX, 0, DANUBE_DMA_CH1_INT, 0},
48 {"PPE", DANUBE_DMA_RX, 1, DANUBE_DMA_CH2_INT, 1},
49 {"PPE", DANUBE_DMA_TX, 1, DANUBE_DMA_CH3_INT, 1},
50 {"PPE", DANUBE_DMA_RX, 2, DANUBE_DMA_CH4_INT, 2},
51 {"PPE", DANUBE_DMA_TX, 2, DANUBE_DMA_CH5_INT, 2},
52 {"PPE", DANUBE_DMA_RX, 3, DANUBE_DMA_CH6_INT, 3},
53 {"PPE", DANUBE_DMA_TX, 3, DANUBE_DMA_CH7_INT, 3},
54 {"DEU", DANUBE_DMA_RX, 0, DANUBE_DMA_CH8_INT, 0},
55 {"DEU", DANUBE_DMA_TX, 0, DANUBE_DMA_CH9_INT, 0},
56 {"DEU", DANUBE_DMA_RX, 1, DANUBE_DMA_CH10_INT, 1},
57 {"DEU", DANUBE_DMA_TX, 1, DANUBE_DMA_CH11_INT, 1},
58 {"SPI", DANUBE_DMA_RX, 0, DANUBE_DMA_CH12_INT, 0},
59 {"SPI", DANUBE_DMA_TX, 0, DANUBE_DMA_CH13_INT, 0},
60 {"SDIO", DANUBE_DMA_RX, 0, DANUBE_DMA_CH14_INT, 0},
61 {"SDIO", DANUBE_DMA_TX, 0, DANUBE_DMA_CH15_INT, 0},
62 {"MCTRL0", DANUBE_DMA_RX, 0, DANUBE_DMA_CH16_INT, 0},
63 {"MCTRL0", DANUBE_DMA_TX, 0, DANUBE_DMA_CH17_INT, 0},
64 {"MCTRL1", DANUBE_DMA_RX, 1, DANUBE_DMA_CH18_INT, 1},
65 {"MCTRL1", DANUBE_DMA_TX, 1, DANUBE_DMA_CH19_INT, 1}
66 };
67
68 _dma_chan_map *chan_map = default_dma_map;
69 volatile u32 g_danube_dma_int_status = 0;
70 volatile int g_danube_dma_in_process = 0;/*0=not in process,1=in process*/
71
72 void do_dma_tasklet (unsigned long);
73 DECLARE_TASKLET (dma_tasklet, do_dma_tasklet, 0);
74
75 u8*
76 common_buffer_alloc (int len, int *byte_offset, void **opt)
77 {
78 u8 *buffer = (u8 *) kmalloc (len * sizeof (u8), GFP_KERNEL);
79
80 *byte_offset = 0;
81
82 return buffer;
83 }
84
85 void
86 common_buffer_free (u8 *dataptr, void *opt)
87 {
88 if (dataptr)
89 kfree(dataptr);
90 }
91
92 void
93 enable_ch_irq (_dma_channel_info *pCh)
94 {
95 int chan_no = (int)(pCh - dma_chan);
96 int flag;
97
98 local_irq_save(flag);
99 writel(chan_no, DANUBE_DMA_CS);
100 writel(0x4a, DANUBE_DMA_CIE);
101 writel(readl(DANUBE_DMA_IRNEN) | (1 << chan_no), DANUBE_DMA_IRNEN);
102 local_irq_restore(flag);
103 enable_danube_irq(pCh->irq);
104 }
105
106 void
107 disable_ch_irq (_dma_channel_info *pCh)
108 {
109 int flag;
110 int chan_no = (int) (pCh - dma_chan);
111
112 local_irq_save(flag);
113 g_danube_dma_int_status &= ~(1 << chan_no);
114 writel(chan_no, DANUBE_DMA_CS);
115 writel(0, DANUBE_DMA_CIE);
116 writel(readl(DANUBE_DMA_IRNEN) & ~(1 << chan_no), DANUBE_DMA_IRNEN);
117 local_irq_restore(flag);
118 mask_and_ack_danube_irq(pCh->irq);
119 }
120
121 void
122 open_chan (_dma_channel_info *pCh)
123 {
124 int flag;
125 int chan_no = (int)(pCh - dma_chan);
126
127 local_irq_save(flag);
128 writel(chan_no, DANUBE_DMA_CS);
129 writel(readl(DANUBE_DMA_CCTRL) | 1, DANUBE_DMA_CCTRL);
130 if(pCh->dir == DANUBE_DMA_RX)
131 enable_ch_irq(pCh);
132 local_irq_restore(flag);
133 }
134
135 void
136 close_chan(_dma_channel_info *pCh)
137 {
138 int flag;
139 int chan_no = (int) (pCh - dma_chan);
140
141 local_irq_save(flag);
142 writel(chan_no, DANUBE_DMA_CS);
143 writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
144 disable_ch_irq(pCh);
145 local_irq_restore(flag);
146 }
147
148 void
149 reset_chan (_dma_channel_info *pCh)
150 {
151 int chan_no = (int) (pCh - dma_chan);
152
153 writel(chan_no, DANUBE_DMA_CS);
154 writel(readl(DANUBE_DMA_CCTRL) | 2, DANUBE_DMA_CCTRL);
155 }
156
157 void
158 rx_chan_intr_handler (int chan_no)
159 {
160 _dma_device_info *pDev = (_dma_device_info *)dma_chan[chan_no].dma_dev;
161 _dma_channel_info *pCh = &dma_chan[chan_no];
162 struct rx_desc *rx_desc_p;
163 int tmp;
164 int flag;
165
166 /*handle command complete interrupt */
167 rx_desc_p = (struct rx_desc*)pCh->desc_base + pCh->curr_desc;
168 if (rx_desc_p->status.field.OWN == CPU_OWN
169 && rx_desc_p->status.field.C
170 && rx_desc_p->status.field.data_length < 1536){
171 /*Every thing is correct, then we inform the upper layer */
172 pDev->current_rx_chan = pCh->rel_chan_no;
173 if(pDev->intr_handler)
174 pDev->intr_handler(pDev, RCV_INT);
175 pCh->weight--;
176 } else {
177 local_irq_save(flag);
178 tmp = readl(DANUBE_DMA_CS);
179 writel(chan_no, DANUBE_DMA_CS);
180 writel(readl(DANUBE_DMA_CIS) | 0x7e, DANUBE_DMA_CIS);
181 writel(tmp, DANUBE_DMA_CS);
182 g_danube_dma_int_status &= ~(1 << chan_no);
183 local_irq_restore(flag);
184 enable_danube_irq(dma_chan[chan_no].irq);
185 }
186 }
187
188 inline void
189 tx_chan_intr_handler (int chan_no)
190 {
191 _dma_device_info *pDev = (_dma_device_info*)dma_chan[chan_no].dma_dev;
192 _dma_channel_info *pCh = &dma_chan[chan_no];
193 int tmp;
194 int flag;
195
196 local_irq_save(flag);
197 tmp = readl(DANUBE_DMA_CS);
198 writel(chan_no, DANUBE_DMA_CS);
199 writel(readl(DANUBE_DMA_CIS) | 0x7e, DANUBE_DMA_CIS);
200 writel(tmp, DANUBE_DMA_CS);
201 g_danube_dma_int_status &= ~(1 << chan_no);
202 local_irq_restore(flag);
203 pDev->current_tx_chan = pCh->rel_chan_no;
204 if (pDev->intr_handler)
205 pDev->intr_handler(pDev, TRANSMIT_CPT_INT);
206 }
207
208 void
209 do_dma_tasklet (unsigned long unused)
210 {
211 int i;
212 int chan_no = 0;
213 int budget = DMA_INT_BUDGET;
214 int weight = 0;
215 int flag;
216
217 while (g_danube_dma_int_status)
218 {
219 if (budget-- < 0)
220 {
221 tasklet_schedule(&dma_tasklet);
222 return;
223 }
224 chan_no = -1;
225 weight = 0;
226 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
227 {
228 if ((g_danube_dma_int_status & (1 << i)) && dma_chan[i].weight > 0)
229 {
230 if (dma_chan[i].weight > weight)
231 {
232 chan_no = i;
233 weight = dma_chan[chan_no].weight;
234 }
235 }
236 }
237
238 if (chan_no >= 0)
239 {
240 if (chan_map[chan_no].dir == DANUBE_DMA_RX)
241 rx_chan_intr_handler(chan_no);
242 else
243 tx_chan_intr_handler(chan_no);
244 } else {
245 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
246 {
247 dma_chan[i].weight = dma_chan[i].default_weight;
248 }
249 }
250 }
251
252 local_irq_save(flag);
253 g_danube_dma_in_process = 0;
254 if (g_danube_dma_int_status)
255 {
256 g_danube_dma_in_process = 1;
257 tasklet_schedule(&dma_tasklet);
258 }
259 local_irq_restore(flag);
260 }
261
262 irqreturn_t
263 dma_interrupt (int irq, void *dev_id)
264 {
265 _dma_channel_info *pCh;
266 int chan_no = 0;
267 int tmp;
268
269 pCh = (_dma_channel_info*)dev_id;
270 chan_no = (int)(pCh - dma_chan);
271 if (chan_no < 0 || chan_no > 19)
272 BUG();
273
274 tmp = readl(DANUBE_DMA_IRNEN);
275 writel(0, DANUBE_DMA_IRNEN);
276 g_danube_dma_int_status |= 1 << chan_no;
277 writel(tmp, DANUBE_DMA_IRNEN);
278 mask_and_ack_danube_irq(irq);
279
280 if (!g_danube_dma_in_process)
281 {
282 g_danube_dma_in_process = 1;
283 tasklet_schedule(&dma_tasklet);
284 }
285
286 return IRQ_HANDLED;
287 }
288
289 _dma_device_info*
290 dma_device_reserve (char *dev_name)
291 {
292 int i;
293
294 for (i = 0; i < MAX_DMA_DEVICE_NUM; i++)
295 {
296 if (strcmp(dev_name, dma_devs[i].device_name) == 0)
297 {
298 if (dma_devs[i].reserved)
299 return NULL;
300 dma_devs[i].reserved = 1;
301 break;
302 }
303 }
304
305 return &dma_devs[i];
306 }
307
308 void
309 dma_device_release (_dma_device_info *dev)
310 {
311 dev->reserved = 0;
312 }
313
314 void
315 dma_device_register(_dma_device_info *dev)
316 {
317 int i, j;
318 int chan_no = 0;
319 u8 *buffer;
320 int byte_offset;
321 int flag;
322 _dma_device_info *pDev;
323 _dma_channel_info *pCh;
324 struct rx_desc *rx_desc_p;
325 struct tx_desc *tx_desc_p;
326
327 for (i = 0; i < dev->max_tx_chan_num; i++)
328 {
329 pCh = dev->tx_chan[i];
330 if (pCh->control == DANUBE_DMA_CH_ON)
331 {
332 chan_no = (int)(pCh - dma_chan);
333 for (j = 0; j < pCh->desc_len; j++)
334 {
335 tx_desc_p = (struct tx_desc*)pCh->desc_base + j;
336 memset(tx_desc_p, 0, sizeof(struct tx_desc));
337 }
338 local_irq_save(flag);
339 writel(chan_no, DANUBE_DMA_CS);
340 /*check if the descriptor length is changed */
341 if (readl(DANUBE_DMA_CDLEN) != pCh->desc_len)
342 writel(pCh->desc_len, DANUBE_DMA_CDLEN);
343
344 writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
345 writel(readl(DANUBE_DMA_CCTRL) | 2, DANUBE_DMA_CCTRL);
346 while (readl(DANUBE_DMA_CCTRL) & 2){};
347 writel(readl(DANUBE_DMA_IRNEN) | (1 << chan_no), DANUBE_DMA_IRNEN);
348 writel(0x30100, DANUBE_DMA_CCTRL); /*reset and enable channel,enable channel later */
349 local_irq_restore(flag);
350 }
351 }
352
353 for (i = 0; i < dev->max_rx_chan_num; i++)
354 {
355 pCh = dev->rx_chan[i];
356 if (pCh->control == DANUBE_DMA_CH_ON)
357 {
358 chan_no = (int)(pCh - dma_chan);
359
360 for (j = 0; j < pCh->desc_len; j++)
361 {
362 rx_desc_p = (struct rx_desc*)pCh->desc_base + j;
363 pDev = (_dma_device_info*)(pCh->dma_dev);
364 buffer = pDev->buffer_alloc(pCh->packet_size, &byte_offset, (void*)&(pCh->opt[j]));
365 if (!buffer)
366 break;
367
368 dma_cache_inv((unsigned long) buffer, pCh->packet_size);
369
370 rx_desc_p->Data_Pointer = (u32)CPHYSADDR((u32)buffer);
371 rx_desc_p->status.word = 0;
372 rx_desc_p->status.field.byte_offset = byte_offset;
373 rx_desc_p->status.field.OWN = DMA_OWN;
374 rx_desc_p->status.field.data_length = pCh->packet_size;
375 }
376
377 local_irq_save(flag);
378 writel(chan_no, DANUBE_DMA_CS);
379 /*check if the descriptor length is changed */
380 if (readl(DANUBE_DMA_CDLEN) != pCh->desc_len)
381 writel(pCh->desc_len, DANUBE_DMA_CDLEN);
382 writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
383 writel(readl(DANUBE_DMA_CCTRL) | 2, DANUBE_DMA_CCTRL);
384 while (readl(DANUBE_DMA_CCTRL) & 2){};
385 writel(0x0a, DANUBE_DMA_CIE); /*fix me, should enable all the interrupts here? */
386 writel(readl(DANUBE_DMA_IRNEN) | (1 << chan_no), DANUBE_DMA_IRNEN);
387 writel(0x30000, DANUBE_DMA_CCTRL);
388 local_irq_restore(flag);
389 enable_danube_irq(dma_chan[chan_no].irq);
390 }
391 }
392 }
393
394 void
395 dma_device_unregister (_dma_device_info *dev)
396 {
397 int i, j;
398 int chan_no;
399 _dma_channel_info *pCh;
400 struct rx_desc *rx_desc_p;
401 struct tx_desc *tx_desc_p;
402 int flag;
403
404 for (i = 0; i < dev->max_tx_chan_num; i++)
405 {
406 pCh = dev->tx_chan[i];
407 if (pCh->control == DANUBE_DMA_CH_ON)
408 {
409 chan_no = (int)(dev->tx_chan[i] - dma_chan);
410 local_irq_save (flag);
411 writel(chan_no, DANUBE_DMA_CS);
412 pCh->curr_desc = 0;
413 pCh->prev_desc = 0;
414 pCh->control = DANUBE_DMA_CH_OFF;
415 writel(0, DANUBE_DMA_CIE); /*fix me, should disable all the interrupts here? */
416 writel(readl(DANUBE_DMA_IRNEN) & ~(1 << chan_no), DANUBE_DMA_IRNEN); /*disable interrupts */
417 writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
418 while (readl(DANUBE_DMA_CCTRL) & 1) {};
419 local_irq_restore (flag);
420
421 for (j = 0; j < pCh->desc_len; j++)
422 {
423 tx_desc_p = (struct tx_desc*)pCh->desc_base + j;
424 if ((tx_desc_p->status.field.OWN == CPU_OWN && tx_desc_p->status.field.C)
425 || (tx_desc_p->status.field.OWN == DMA_OWN && tx_desc_p->status.field.data_length > 0))
426 {
427 dev->buffer_free ((u8 *) __va (tx_desc_p->Data_Pointer), (void*)pCh->opt[j]);
428 }
429 tx_desc_p->status.field.OWN = CPU_OWN;
430 memset (tx_desc_p, 0, sizeof (struct tx_desc));
431 }
432 //TODO should free buffer that is not transferred by dma
433 }
434 }
435
436 for (i = 0; i < dev->max_rx_chan_num; i++)
437 {
438 pCh = dev->rx_chan[i];
439 chan_no = (int)(dev->rx_chan[i] - dma_chan);
440 disable_danube_irq(pCh->irq);
441
442 local_irq_save(flag);
443 g_danube_dma_int_status &= ~(1 << chan_no);
444 pCh->curr_desc = 0;
445 pCh->prev_desc = 0;
446 pCh->control = DANUBE_DMA_CH_OFF;
447
448 writel(chan_no, DANUBE_DMA_CS);
449 writel(0, DANUBE_DMA_CIE); /*fix me, should disable all the interrupts here? */
450 writel(readl(DANUBE_DMA_IRNEN) & ~(1 << chan_no), DANUBE_DMA_IRNEN); /*disable interrupts */
451 writel(readl(DANUBE_DMA_CCTRL) & ~1, DANUBE_DMA_CCTRL);
452 while (readl(DANUBE_DMA_CCTRL) & 1) {};
453
454 local_irq_restore (flag);
455 for (j = 0; j < pCh->desc_len; j++)
456 {
457 rx_desc_p = (struct rx_desc *) pCh->desc_base + j;
458 if ((rx_desc_p->status.field.OWN == CPU_OWN
459 && rx_desc_p->status.field.C)
460 || (rx_desc_p->status.field.OWN == DMA_OWN
461 && rx_desc_p->status.field.data_length > 0)) {
462 dev->buffer_free ((u8 *)
463 __va (rx_desc_p->
464 Data_Pointer),
465 (void *) pCh->opt[j]);
466 }
467 }
468 }
469 }
470
471 int
472 dma_device_read (struct dma_device_info *dma_dev, u8 ** dataptr, void **opt)
473 {
474 u8 *buf;
475 int len;
476 int byte_offset = 0;
477 void *p = NULL;
478 _dma_channel_info *pCh = dma_dev->rx_chan[dma_dev->current_rx_chan];
479 struct rx_desc *rx_desc_p;
480
481 /*get the rx data first */
482 rx_desc_p = (struct rx_desc *) pCh->desc_base + pCh->curr_desc;
483 if (!(rx_desc_p->status.field.OWN == CPU_OWN && rx_desc_p->status.field.C))
484 {
485 return 0;
486 }
487
488 buf = (u8 *) __va (rx_desc_p->Data_Pointer);
489 *(u32*)dataptr = (u32)buf;
490 len = rx_desc_p->status.field.data_length;
491
492 if (opt)
493 {
494 *(int*)opt = (int)pCh->opt[pCh->curr_desc];
495 }
496
497 /*replace with a new allocated buffer */
498 buf = dma_dev->buffer_alloc(pCh->packet_size, &byte_offset, &p);
499
500 if (buf)
501 {
502 dma_cache_inv ((unsigned long) buf,
503 pCh->packet_size);
504 pCh->opt[pCh->curr_desc] = p;
505 wmb ();
506
507 rx_desc_p->Data_Pointer = (u32) CPHYSADDR ((u32) buf);
508 rx_desc_p->status.word = (DMA_OWN << 31) | ((byte_offset) << 23) | pCh->packet_size;
509 wmb ();
510 } else {
511 *(u32 *) dataptr = 0;
512 if (opt)
513 *(int *) opt = 0;
514 len = 0;
515 }
516
517 /*increase the curr_desc pointer */
518 pCh->curr_desc++;
519 if (pCh->curr_desc == pCh->desc_len)
520 pCh->curr_desc = 0;
521
522 return len;
523 }
524
525 int
526 dma_device_write (struct dma_device_info *dma_dev, u8 * dataptr, int len, void *opt)
527 {
528 int flag;
529 u32 tmp, byte_offset;
530 _dma_channel_info *pCh;
531 int chan_no;
532 struct tx_desc *tx_desc_p;
533 local_irq_save (flag);
534
535 pCh = dma_dev->tx_chan[dma_dev->current_tx_chan];
536 chan_no = (int)(pCh - (_dma_channel_info *) dma_chan);
537
538 tx_desc_p = (struct tx_desc*)pCh->desc_base + pCh->prev_desc;
539 while (tx_desc_p->status.field.OWN == CPU_OWN && tx_desc_p->status.field.C)
540 {
541 dma_dev->buffer_free((u8 *) __va (tx_desc_p->Data_Pointer), pCh->opt[pCh->prev_desc]);
542 memset(tx_desc_p, 0, sizeof (struct tx_desc));
543 pCh->prev_desc = (pCh->prev_desc + 1) % (pCh->desc_len);
544 tx_desc_p = (struct tx_desc*)pCh->desc_base + pCh->prev_desc;
545 }
546 tx_desc_p = (struct tx_desc*)pCh->desc_base + pCh->curr_desc;
547 /*Check whether this descriptor is available */
548 if (tx_desc_p->status.field.OWN == DMA_OWN || tx_desc_p->status.field.C)
549 {
550 /*if not , the tell the upper layer device */
551 dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT);
552 local_irq_restore(flag);
553 printk (KERN_INFO "%s %d: failed to write!\n", __func__, __LINE__);
554
555 return 0;
556 }
557 pCh->opt[pCh->curr_desc] = opt;
558 /*byte offset----to adjust the starting address of the data buffer, should be multiple of the burst length. */
559 byte_offset = ((u32) CPHYSADDR ((u32) dataptr)) % ((dma_dev->tx_burst_len) * 4);
560 dma_cache_wback ((unsigned long) dataptr, len);
561 wmb ();
562 tx_desc_p->Data_Pointer = (u32) CPHYSADDR ((u32) dataptr) - byte_offset;
563 wmb ();
564 tx_desc_p->status.word = (DMA_OWN << 31) | DMA_DESC_SOP_SET | DMA_DESC_EOP_SET | ((byte_offset) << 23) | len;
565 wmb ();
566
567 pCh->curr_desc++;
568 if (pCh->curr_desc == pCh->desc_len)
569 pCh->curr_desc = 0;
570
571 /*Check whether this descriptor is available */
572 tx_desc_p = (struct tx_desc *) pCh->desc_base + pCh->curr_desc;
573 if (tx_desc_p->status.field.OWN == DMA_OWN)
574 {
575 /*if not , the tell the upper layer device */
576 dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT);
577 }
578
579 writel(chan_no, DANUBE_DMA_CS);
580 tmp = readl(DANUBE_DMA_CCTRL);
581
582 if (!(tmp & 1))
583 pCh->open (pCh);
584
585 local_irq_restore (flag);
586
587 return len;
588 }
589
590 int
591 map_dma_chan(_dma_chan_map *map)
592 {
593 int i, j;
594 int result;
595
596 for (i = 0; i < MAX_DMA_DEVICE_NUM; i++)
597 {
598 strcpy(dma_devs[i].device_name, global_device_name[i]);
599 }
600
601 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
602 {
603 dma_chan[i].irq = map[i].irq;
604 result = request_irq(dma_chan[i].irq, dma_interrupt, SA_INTERRUPT, "dma-core", (void*)&dma_chan[i]);
605 if (result)
606 {
607 printk("error, cannot get dma_irq!\n");
608 free_irq(dma_chan[i].irq, (void *) &dma_interrupt);
609
610 return -EFAULT;
611 }
612 }
613
614 for (i = 0; i < MAX_DMA_DEVICE_NUM; i++)
615 {
616 dma_devs[i].num_tx_chan = 0; /*set default tx channel number to be one */
617 dma_devs[i].num_rx_chan = 0; /*set default rx channel number to be one */
618 dma_devs[i].max_rx_chan_num = 0;
619 dma_devs[i].max_tx_chan_num = 0;
620 dma_devs[i].buffer_alloc = &common_buffer_alloc;
621 dma_devs[i].buffer_free = &common_buffer_free;
622 dma_devs[i].intr_handler = NULL;
623 dma_devs[i].tx_burst_len = 4;
624 dma_devs[i].rx_burst_len = 4;
625 if (i == 0)
626 {
627 writel(0, DANUBE_DMA_PS);
628 writel(readl(DANUBE_DMA_PCTRL) | ((0xf << 8) | (1 << 6)), DANUBE_DMA_PCTRL); /*enable dma drop */
629 }
630
631 if (i == 1)
632 {
633 writel(1, DANUBE_DMA_PS);
634 writel(0x14, DANUBE_DMA_PCTRL); /*deu port setting */
635 }
636
637 for (j = 0; j < MAX_DMA_CHANNEL_NUM; j++)
638 {
639 dma_chan[j].byte_offset = 0;
640 dma_chan[j].open = &open_chan;
641 dma_chan[j].close = &close_chan;
642 dma_chan[j].reset = &reset_chan;
643 dma_chan[j].enable_irq = &enable_ch_irq;
644 dma_chan[j].disable_irq = &disable_ch_irq;
645 dma_chan[j].rel_chan_no = map[j].rel_chan_no;
646 dma_chan[j].control = DANUBE_DMA_CH_OFF;
647 dma_chan[j].default_weight = DANUBE_DMA_CH_DEFAULT_WEIGHT;
648 dma_chan[j].weight = dma_chan[j].default_weight;
649 dma_chan[j].curr_desc = 0;
650 dma_chan[j].prev_desc = 0;
651 }
652
653 for (j = 0; j < MAX_DMA_CHANNEL_NUM; j++)
654 {
655 if (strcmp(dma_devs[i].device_name, map[j].dev_name) == 0)
656 {
657 if (map[j].dir == DANUBE_DMA_RX)
658 {
659 dma_chan[j].dir = DANUBE_DMA_RX;
660 dma_devs[i].max_rx_chan_num++;
661 dma_devs[i].rx_chan[dma_devs[i].max_rx_chan_num - 1] = &dma_chan[j];
662 dma_devs[i].rx_chan[dma_devs[i].max_rx_chan_num - 1]->pri = map[j].pri;
663 dma_chan[j].dma_dev = (void*)&dma_devs[i];
664 } else if(map[j].dir == DANUBE_DMA_TX)
665 { /*TX direction */
666 dma_chan[j].dir = DANUBE_DMA_TX;
667 dma_devs[i].max_tx_chan_num++;
668 dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num - 1] = &dma_chan[j];
669 dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num - 1]->pri = map[j].pri;
670 dma_chan[j].dma_dev = (void*)&dma_devs[i];
671 } else {
672 printk ("WRONG DMA MAP!\n");
673 }
674 }
675 }
676 }
677
678 return 0;
679 }
680
681 void
682 dma_chip_init(void)
683 {
684 int i;
685
686 // enable DMA from PMU
687 writel(readl(DANUBE_PMU_PWDCR) & ~DANUBE_PMU_PWDCR_DMA, DANUBE_PMU_PWDCR);
688
689 // reset DMA
690 writel(readl(DANUBE_DMA_CTRL) | 1, DANUBE_DMA_CTRL);
691
692 // diable all interrupts
693 writel(0, DANUBE_DMA_IRNEN);
694
695 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
696 {
697 writel(i, DANUBE_DMA_CS);
698 writel(0x2, DANUBE_DMA_CCTRL);
699 writel(0x80000040, DANUBE_DMA_CPOLL);
700 writel(readl(DANUBE_DMA_CCTRL) & ~0x1, DANUBE_DMA_CCTRL);
701
702 }
703 }
704
705 int
706 danube_dma_init (void)
707 {
708 int i;
709
710 dma_chip_init();
711 if (map_dma_chan(default_dma_map))
712 BUG();
713
714 g_desc_list = (u64*)KSEG1ADDR(__get_free_page(GFP_DMA));
715
716 if (g_desc_list == NULL)
717 {
718 printk("no memory for desriptor\n");
719 return -ENOMEM;
720 }
721
722 memset(g_desc_list, 0, PAGE_SIZE);
723
724 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
725 {
726 dma_chan[i].desc_base = (u32)g_desc_list + i * DANUBE_DMA_DESCRIPTOR_OFFSET * 8;
727 dma_chan[i].curr_desc = 0;
728 dma_chan[i].desc_len = DANUBE_DMA_DESCRIPTOR_OFFSET;
729
730 writel(i, DANUBE_DMA_CS);
731 writel((u32)CPHYSADDR(dma_chan[i].desc_base), DANUBE_DMA_CDBA);
732 writel(dma_chan[i].desc_len, DANUBE_DMA_CDLEN);
733 }
734
735 return 0;
736 }
737
738 arch_initcall(danube_dma_init);
739
740 void
741 dma_cleanup(void)
742 {
743 int i;
744
745 free_page(KSEG0ADDR((unsigned long) g_desc_list));
746 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
747 free_irq(dma_chan[i].irq, (void*)&dma_interrupt);
748 }
749
750 EXPORT_SYMBOL (dma_device_reserve);
751 EXPORT_SYMBOL (dma_device_release);
752 EXPORT_SYMBOL (dma_device_register);
753 EXPORT_SYMBOL (dma_device_unregister);
754 EXPORT_SYMBOL (dma_device_read);
755 EXPORT_SYMBOL (dma_device_write);
756
757 MODULE_LICENSE ("GPL");
This page took 0.100304 seconds and 5 git commands to generate.