2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 //-----------------------------------------------------------------------
19 * Driver for Infineon Amazon DMA
21 //-----------------------------------------------------------------------
22 /* Author: Wu Qi Ming[Qi-Ming.Wu@infineon.com]
23 * Created: 7-April-2004
25 //-----------------------------------------------------------------------
27 * Last changed on: 4-May-2004
28 * Last changed by: <peng.liu@infineon.com>
31 //-----------------------------------------------------------------------
32 /* Last changed on: 03-Dec-2004
33 * Last changed by: peng.liu@infineon.com
34 * Reason: recover from TPE bug
37 //000004:fchang 2005/6/2 Modified by Linpeng as described below
38 //-----------------------------------------------------------------------
39 /* Last changed on: 28-Jan-2004
40 * Last changed by: peng.liu@infineon.com
42 * - handle "out of memory" bug
44 //000003:tc.chen 2005/06/16 fix memory leak when Tx buffer full (heaving traffic).
45 //507261:tc.chen 2005/07/26 re-organize code address map to improve performance.
47 #if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
51 #if defined(MODVERSIONS) && !defined(__GENKSYMS__)
52 #include <linux/modversions.h>
56 #define EXPORT_SYMTAB /* need this one 'cause we export symbols */
61 /* no TX interrupt handling */
63 /* need for DMA workaround */
64 #undef AMAZON_DMA_TPE_AAL5_RECOVERY
66 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
67 #define MAX_SYNC_FAILS 1000000 // 000004:fchang
68 unsigned int dma_sync_fails
= 0;
69 unsigned int total_dma_tpe_reset
= 0;
70 int (*tpe_reset
) (void);
71 int (*tpe_start
) (void);
72 int (*tpe_inject
) (void);
73 #endif // AMAZON_DMA_TPE_AAL5_RECOVERY
76 #include <linux/module.h>
77 #include <linux/init.h>
78 #include <linux/sched.h>
79 #include <linux/kernel.h>
80 #include <linux/slab.h>
81 #include <linux/string.h>
82 #include <linux/timer.h>
84 #include <linux/errno.h>
85 #include <linux/proc_fs.h>
86 #include <linux/stat.h>
88 #include <linux/tty.h>
89 #include <linux/selection.h>
90 #include <linux/kmod.h>
91 #include <linux/vmalloc.h>
92 #include <linux/interrupt.h>
93 #include <linux/delay.h>
94 #include <asm/uaccess.h>
95 #include <linux/errno.h>
98 #include <asm/amazon/amazon.h>
99 #include <asm/amazon/irq.h>
100 #include <asm/amazon/amazon_dma.h>
101 #include "dma-core.h"
103 #define AMAZON_DMA_EMSG(fmt, args...) printk( KERN_ERR "%s: " fmt,__FUNCTION__, ## args)
105 static irqreturn_t
dma_interrupt(int irq
, void *dev_id
);
106 extern void mask_and_ack_amazon_irq(unsigned int irq_nr
);
108 /***************************************** global data *******************************************/
110 dev_list
*g_current_dev
= NULL
;
111 dev_list
*g_head_dev
= NULL
;
112 dev_list
*g_tail_dev
= NULL
;
113 channel_info g_log_chan
[CHAN_TOTAL_NUM
+ 1];
114 struct proc_dir_entry
*g_amazon_dma_dir
;
115 static u8 rx_chan_list_len
= 0;
116 static u8 tx_chan_list_len
= 0;
117 static int rx_chan_list
[RX_CHAN_NUM
+ 1];
118 static int tx_chan_list
[TX_CHAN_NUM
+ 1];
119 static u32 comb_isr_mask
[CHAN_TOTAL_NUM
];
121 static inline int is_rx_chan(int chan_no
)
122 /*judge if this is an rx channel*/
125 if (chan_no
< RX_CHAN_NUM
)
130 /* Ugly, Channel ON register is badly mapped to channel no. */
131 static u8 ch_on_mapping
[CHAN_TOTAL_NUM
] =
132 { 0, 1, 2, 3, 6, 7, 10, 4, 5, 8, 9, 11 };
134 /* Brief: check wether the chan_no is legal
135 * Parameter: chan_no: logical channel number
136 * Return: 0 if is not valid
139 static inline int is_valid_dma_ch(int chan_no
)
141 return ((chan_no
>= 0) && (chan_no
< CHAN_TOTAL_NUM
));
144 /* Brief: check whether a channel is open through Channel ON register
145 * Parameter: chan_no: logical channel number
146 * Return: 1 channel is open
148 * EINVAL: invalid parameter
150 static inline int is_channel_open(int chan_no
)
152 return (AMAZON_DMA_REG32(AMAZON_DMA_CH_ON
) &
153 (1 << ch_on_mapping
[chan_no
]));
156 /* Brief: add a list entry
158 * always add to the tail and no redundancy allowed. (i.e. entries are unique)
160 * <0 : not deleted (due to not unique)
162 static inline int _add_list_entry(int *list
, int size_of_list
, int entry
)
165 for (i
= 0; i
< size_of_list
; i
++) {
166 if (list
[i
] == entry
)
176 /* Brief: delete a list entry
178 * find the entry and remove it. shift all entries behind it one step forward if necessary\
181 * <0 : not deleted (due to not found?)
183 static inline int _delete_list_entry(int *list
, int size_of_list
,
187 for (i
= 0; i
< size_of_list
; i
++) {
188 if (list
[i
] == entry
) {
189 for (j
= i
; j
< size_of_list
; j
++) {
190 list
[j
] = list
[j
+ 1];
191 if (list
[j
+ 1] < 0) {
201 /* Brief: enable a channel through Channel ON register
202 * Parameter: chan_no: logical channel number
204 * Please don't open a channel without a valid descriptor (hardware pitfall)
206 static inline void open_channel(int chan_no
)
208 AMAZON_DMA_REG32(AMAZON_DMA_CH_ON
) |= (1 << ch_on_mapping
[chan_no
]);
209 if (is_rx_chan(chan_no
)) {
210 if (_add_list_entry(rx_chan_list
, RX_CHAN_NUM
, chan_no
) == 0) {
213 AMAZON_DMA_DMSG("cannot add chan %d to open list\n", chan_no
);
216 if (_add_list_entry(tx_chan_list
, TX_CHAN_NUM
, chan_no
) == 0) {
219 AMAZON_DMA_DMSG("cannot add chan %d to open list\n", chan_no
);
224 /* Brief: disable a channel through Channel ON register
225 * Parameter: chan_no: logical channel number
228 static inline void close_channel(int chan_no
)
230 AMAZON_DMA_REG32(AMAZON_DMA_CH_ON
) &= ~(1 << ch_on_mapping
[chan_no
]);
231 if (is_rx_chan(chan_no
)) {
232 if (_delete_list_entry(rx_chan_list
, RX_CHAN_NUM
, chan_no
) == 0) {
235 AMAZON_DMA_DMSG("cannot remove chan %d from open list \n",
239 if (_delete_list_entry(tx_chan_list
, TX_CHAN_NUM
, chan_no
) == 0) {
242 AMAZON_DMA_DMSG("cannot remove chan %d from open list \n",
248 /* Brief: clear RX interrupt
250 inline void rx_chan_clear_isr(int chan_no
)
252 #ifdef DMA_NO_POLLING
253 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR
+ chan_no
* AMAZON_DMA_CH_STEP
) =
255 (AMAZON_DMA_CH0_ISR
+
257 AMAZON_DMA_CH_STEP
) & (DMA_ISR_CPT
| DMA_ISR_EOP
| DMA_ISR_CMDCPT
260 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR
+ chan_no
* AMAZON_DMA_CH_STEP
) =
262 (AMAZON_DMA_CH0_ISR
+
264 AMAZON_DMA_CH_STEP
) & (DMA_ISR_CPT
| DMA_ISR_EOP
|
270 /* Brief: hacking function, this will reset all descriptors back to DMA
272 static void dma_reset_all_descriptors(int chan_no
)
274 volatile struct rx_desc
*rx_desc_p
= NULL
;
277 (struct rx_desc
*) g_desc_list
+
278 g_log_chan
[chan_no
].offset_from_base
;
279 for (i
= 0; i
< g_log_chan
[chan_no
].desc_len
; i
++) {
280 rx_desc_p
->status
.word
&=
281 (~(DMA_DESC_SOP_SET
| DMA_DESC_EOP_SET
| DMA_DESC_CPT_SET
));
282 rx_desc_p
->status
.word
|=
283 (DMA_DESC_OWN_DMA
| g_log_chan
[chan_no
].packet_size
);
288 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
289 /* Brief: Reset DMA descriptors
291 static void amazon_dma_reset_tpe_rx(int chan_no
)
293 struct tx_desc
*tx_desc_p
= NULL
;
296 // wait until all TX channels stop transmitting
297 for (j
= 9; j
<= 10; j
++) {
299 (struct tx_desc
*) g_desc_list
+
300 g_log_chan
[j
].offset_from_base
;
301 for (i
= 0; i
< g_log_chan
[j
].desc_len
; i
++) {
302 while ((tx_desc_p
->status
.field
.OWN
!= CPU_OWN
)) {
303 AMAZON_DMA_DMSG("DMA TX in progress\n"); // 000004:fchang
311 total_dma_tpe_reset
++;
313 ("\n===============resetting TPE========================== \n");
314 if ((*tpe_reset
) ()) {
315 panic("cannot reset TPE engien\n"); // 000004:fchang
318 panic("no tpe_reset function\n"); // 000004:fchang
321 dma_reset_all_descriptors(chan_no
);
322 rx_chan_clear_isr(chan_no
);
327 if ((*tpe_inject
) ()) {
328 panic("cannot inject a cell\n"); // 000004:fchang
331 AMAZON_DMA_EMSG("no tpe_inject function\n");
337 (AMAZON_DMA_CH0_ISR
+
338 chan_no
* AMAZON_DMA_CH_STEP
) & (DMA_ISR_CPT
)) {
339 rx_chan_clear_isr(chan_no
);
341 dma_reset_all_descriptors(chan_no
);
342 if (g_log_chan
[chan_no
].current_desc
==
343 (g_log_chan
[chan_no
].desc_len
- 1)) {
344 g_log_chan
[chan_no
].current_desc
= 0;
346 g_log_chan
[chan_no
].current_desc
++;
354 AMAZON_DMA_REG32(AMAZON_DMA_CH_ON
) &= ~(1 << ch_on_mapping
[chan_no
]);
355 while (AMAZON_DMA_REG32(AMAZON_DMA_CH_ON
) &
356 (1 << ch_on_mapping
[chan_no
])) {
357 printk("TPE channel still on\n");
361 // AMAZON_DMA_REG32(AMAZON_DMA_CH_RST) = (1<<chan_no);
363 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK
+ chan_no
* AMAZON_DMA_CH_STEP
) =
366 rx_chan_clear_isr(chan_no
);
367 dma_reset_all_descriptors(chan_no
);
369 AMAZON_DMA_REG32(AMAZON_DMA_CH_ON
) |= (1 << ch_on_mapping
[chan_no
]);
370 // g_log_chan[chan_no].current_desc=0;
377 AMAZON_DMA_EMSG("cannot restart TPE engien\n");
380 #endif // AMAZON_DMA_TPE_AAL5_RECOVERY
383 /* Brief: RX channel interrupt handler
384 * Parameter: RX channel no
385 * Description: the interrupt handler for each RX channel
386 * 1. check descriptor, clear ISR if no incoming packet
387 * 2. inform upper layer to receive packet (and update descriptors)
389 inline void rx_chan_intr_handler(int chan_no
)
391 volatile struct rx_desc
*rx_desc_p
= NULL
;
393 /* fetch the current descriptor */
395 (struct rx_desc
*) g_desc_list
+
396 g_log_chan
[chan_no
].offset_from_base
+
397 g_log_chan
[chan_no
].current_desc
;
399 g_log_chan
[chan_no
].dma_dev
->current_rx_chan
=
400 chan_no
- g_log_chan
[chan_no
].dma_dev
->logic_rx_chan_base
;
402 // workaround for DMA pitfall: complete bit set happends before the
403 // other two bits (own,eop) are ready
404 if ((rx_desc_p
->status
.field
.EoP
!= 1)
405 || (rx_desc_p
->status
.field
.OWN
!= CPU_OWN
)
406 || (rx_desc_p
->status
.field
.data_length
==
407 g_log_chan
[chan_no
].packet_size
)) {
408 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
409 if (chan_no
== 4 || chan_no
== 5) {
411 if (dma_sync_fails
> MAX_SYNC_FAILS
) {
414 (struct rx_desc
*) g_desc_list
+
415 g_log_chan
[chan_no
].offset_from_base
;
417 (struct rx_desc
*) g_desc_list
+
418 g_log_chan
[chan_no
].offset_from_base
+ 1;
419 if ((rx_desc_p0
->status
.field
.OWN
== CPU_OWN
420 && rx_desc_p0
->status
.field
.EoP
!= 1)
421 && (rx_desc_p1
->status
.field
.OWN
== CPU_OWN
422 && rx_desc_p1
->status
.field
.EoP
!= 1)) {
423 amazon_dma_reset_tpe_rx(chan_no
);
428 AMAZON_DMA_DMSG("too many times ch:%d\n", chan_no
); // 000004:fchang
431 udelay(10); // 000004:fchang
433 #endif // //AMAZON_DMA_TPE_AAL5_RECOVERY
437 /* inform the upper layer to receive the packet */
438 g_log_chan
[chan_no
].intr_handler(g_log_chan
[chan_no
].dma_dev
, RCV_INT
);
439 /* check the next descriptor, if still contains the incoming packet,
440 then do not clear the interrupt status */
442 (struct rx_desc
*) g_desc_list
+
443 g_log_chan
[chan_no
].offset_from_base
+
444 g_log_chan
[chan_no
].current_desc
;
446 ((rx_desc_p
->status
.field
.OWN
== CPU_OWN
)
447 && (rx_desc_p
->status
.field
.C
== 1))) {
448 rx_chan_clear_isr(chan_no
);
453 /* Brief: TX channel interrupt handler
454 * Parameter: TX channel no
455 * Description: the interrupt handler for each TX channel
456 * 1. check all the descripters,if any of them had transmitted a packet, then free buffer
457 * because we cannot garantee the which one has already transmitted out, we have to go through all the descriptors here
458 * 2. clear the interrupt status bit
460 inline void tx_chan_intr_handler(int chan_no
)
462 struct tx_desc
*tx_desc_p
= NULL
;
466 (struct tx_desc
*) g_desc_list
+
467 g_log_chan
[chan_no
].offset_from_base
;
469 for (i
= 0; i
< g_log_chan
[chan_no
].desc_len
; i
++) {
470 if ((tx_desc_p
->status
.field
.OWN
== CPU_OWN
)
471 && (tx_desc_p
->status
.field
.C
== 1)) {
472 /* if already transmitted, then free the buffer */
474 buffer_free((u8
*) __va(tx_desc_p
->Data_Pointer
),
475 g_log_chan
[chan_no
].opt
[i
]);
476 tx_desc_p
->status
.field
.C
= 0;
477 /* inform the upper layer about the completion of the
478 transmitted packet, the upper layer may want to free the
480 g_log_chan
[chan_no
].intr_handler(g_log_chan
[chan_no
].dma_dev
,
486 /* after all these operations, clear the interrupt status bit */
487 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR
+ chan_no
* AMAZON_DMA_CH_STEP
) =
489 (AMAZON_DMA_CH0_ISR
+
491 AMAZON_DMA_CH_STEP
) & (DMA_ISR_CPT
| DMA_ISR_EOP
|
495 /* Brief: DMA interrupt handler
497 static irqreturn_t
dma_interrupt(int irq
, void *dev_id
)
502 #ifdef NO_TX_INT // 000004:fchang
503 static int cnt
= 0; // 000004:fchang
504 #endif // 000004:fchang
506 AMAZON_DMA_REG32(AMAZON_DMA_COMB_ISR
)) & (COMB_ISR_RX_MASK
|
508 if (isr
& COMB_ISR_RX_MASK
) {
509 // RX Channels: start WFQ algorithm
510 chan_no
= CHAN_TOTAL_NUM
;
511 for (i
= 0; i
< RX_CHAN_NUM
; i
++) {
512 if ((isr
& (comb_isr_mask
[i
]))
513 && (g_log_chan
[i
].weight
> 0)) {
514 if (g_log_chan
[chan_no
].weight
< g_log_chan
[i
].weight
) {
519 if (chan_no
< CHAN_TOTAL_NUM
) {
520 rx_chan_intr_handler(chan_no
);
522 for (i
= 0; i
< RX_CHAN_NUM
; i
++) {
523 g_log_chan
[i
].weight
= g_log_chan
[i
].default_weight
;
531 for (i
= 0; i
< tx_chan_list_len
; i
++) {
533 (AMAZON_DMA_CH0_ISR
+
535 AMAZON_DMA_CH_STEP
) & (DMA_ISR_CPT
| DMA_ISR_EOP
)) {
536 tx_chan_intr_handler(tx_chan_list
[i
]);
541 if (isr
& COMB_ISR_TX_MASK
) {
543 for (i
= 0; i
< tx_chan_list_len
; i
++) {
544 if (isr
& (comb_isr_mask
[tx_chan_list
[i
]])) {
545 tx_chan_intr_handler(tx_chan_list
[i
]);
555 /* Brief: read a packet from DMA RX channel
557 * Return: packet length
559 * This is called back in a context of DMA interrupt
560 * 1. prepare new descriptor
562 * 3. update WFQ weight
564 //507261:tc.chen int dma_device_read(struct dma_device_info* dma_dev, u8** dataptr, void** opt)
565 int asmlinkage
dma_device_read(struct dma_device_info
*dma_dev
,
566 u8
** dataptr
, void **opt
)
573 struct rx_desc
*rx_desc_p
;
577 chan_no
= dma_dev
->logic_rx_chan_base
+ dma_dev
->current_rx_chan
;
578 current_desc
= g_log_chan
[chan_no
].current_desc
;
580 (struct rx_desc
*) (g_desc_list
+
581 g_log_chan
[chan_no
].offset_from_base
+
583 buf
= (u8
*) __va(rx_desc_p
->Data_Pointer
); /* extract the virtual
586 len
= rx_desc_p
->status
.field
.data_length
; /* extract the data length */
587 #ifndef CONFIG_MIPS_UNCACHED
588 dma_cache_inv((unsigned long) buf
, len
);
589 #endif // CONFIG_MIPS_UNCACHED
590 *(u32
*) dataptr
= (u32
) buf
;
592 *(int *) opt
= (int) g_log_chan
[chan_no
].opt
[current_desc
]; /* read
600 (u8
*) g_log_chan
[chan_no
].buffer_alloc(g_log_chan
[chan_no
].
601 packet_size
, &byte_offset
,
603 // should check null!!!!
604 if (buf
== NULL
|| p
== NULL
) {
605 *(u32
*) dataptr
= 0;
609 g_log_chan
[chan_no
].opt
[current_desc
] = p
;
610 /* reduce the weight for WFQ algorithm */
611 g_log_chan
[chan_no
].weight
-= len
;
612 rx_desc_p
->Data_Pointer
= (u32
) CPHYSADDR((u32
) buf
);
614 if (current_desc
== g_log_chan
[chan_no
].desc_len
- 1) {
619 g_log_chan
[chan_no
].current_desc
= current_desc
;
621 rx_desc_p
->status
.word
= DMA_DESC_OWN_DMA
622 | (byte_offset
<< DMA_DESC_BYTEOFF_SHIFT
)
623 | g_log_chan
[chan_no
].packet_size
;
627 /* Brief: write a packet through DMA RX channel to peripheral
629 * Return: packet length
634 //507261:tc.chen int dma_device_write(struct dma_device_info* dma_dev, u8* dataptr, int len,void* opt)
635 int asmlinkage
dma_device_write(struct dma_device_info
*dma_dev
,
636 u8
* dataptr
, int len
, void *opt
)
639 struct tx_desc
*tx_desc_p
;
643 static int cnt
= 0; // 000004:fchang
646 local_irq_save(flag
);
648 chan_no
= dma_dev
->logic_tx_chan_base
+ dma_dev
->current_tx_chan
;
649 current_desc
= g_log_chan
[chan_no
].current_desc
;
651 (struct tx_desc
*) (g_desc_list
+
652 g_log_chan
[chan_no
].offset_from_base
+
654 // 000003:tc.chen if(tx_desc_p->status.field.OWN==DMA_OWN){
655 if (tx_desc_p
->status
.field
.OWN
== DMA_OWN
|| tx_desc_p
->status
.field
.C
== 1) { // 000003:tc.chen
656 AMAZON_DMA_DMSG("no TX desc for CPU, drop packet\n");
658 g_log_chan
[chan_no
].intr_handler(dma_dev
, TX_BUF_FULL_INT
);
659 local_irq_restore(flag
);
662 g_log_chan
[chan_no
].opt
[current_desc
] = opt
;
664 /* byte offset----to adjust the starting address of the data buffer,
665 should be multiple of the burst length. */
667 ((u32
) CPHYSADDR((u32
) dataptr
)) % (g_log_chan
[chan_no
].burst_len
*
669 #ifndef CONFIG_MIPS_UNCACHED
670 dma_cache_wback((unsigned long) dataptr
, len
);
672 #endif // CONFIG_MIPS_UNCACHED
674 tx_desc_p
->Data_Pointer
= (u32
) CPHYSADDR((u32
) dataptr
) - byte_offset
;
676 tx_desc_p
->status
.word
= DMA_DESC_OWN_DMA
678 | DMA_DESC_EOP_SET
| (byte_offset
<< DMA_DESC_BYTEOFF_SHIFT
)
681 if (is_channel_open(chan_no
) == 0) {
682 // turn on if necessary
683 open_channel(chan_no
);
685 #ifdef DMA_NO_POLLING
686 if ((AMAZON_DMA_REG32
687 (AMAZON_DMA_CH0_ISR
+
688 chan_no
* AMAZON_DMA_CH_STEP
) & (DMA_ISR_DURR
| DMA_ISR_CPT
)) ==
690 // clear DURR if (CPT is AND set and DURR is set)
691 AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR
+
692 chan_no
* AMAZON_DMA_CH_STEP
) = DMA_ISR_DURR
;
696 if (current_desc
== (g_log_chan
[chan_no
].desc_len
- 1)) {
703 g_log_chan
[chan_no
].current_desc
= current_desc
;
705 (struct tx_desc
*) (g_desc_list
+
706 g_log_chan
[chan_no
].offset_from_base
+
708 // 000003:tc.chen if(tx_desc_p->status.field.OWN==DMA_OWN){
709 if (tx_desc_p
->status
.field
.OWN
== DMA_OWN
|| tx_desc_p
->status
.field
.C
== 1) { // 000003:tc.chen
710 g_log_chan
[chan_no
].intr_handler(dma_dev
, TX_BUF_FULL_INT
);
713 //000004:fchang Start
717 tx_chan_intr_handler(chan_no
);
721 local_irq_restore(flag
); // 000004:fchang
727 int desc_list_proc_read(char *buf
, char **start
, off_t offset
,
728 int count
, int *eof
, void *data
)
731 u32
*p
= (u32
*) g_desc_list
;
733 len
+= sprintf(buf
+ len
, "descriptor list:\n");
734 for (i
= 0; i
< 120; i
++) {
735 len
+= sprintf(buf
+ len
, "%d\n", i
);
736 len
+= sprintf(buf
+ len
, "%08x\n", *(p
+ i
* 2 + 1));
737 len
+= sprintf(buf
+ len
, "%08x\n", *(p
+ i
* 2));
745 int channel_weight_proc_read(char *buf
, char **start
, off_t offset
,
746 int count
, int *eof
, void *data
)
751 len
+= sprintf(buf
+ len
, "Qos dma channel weight list\n");
754 "channel_num default_weight current_weight device Tx/Rx\n");
757 " 0 %08x %08x Switch Rx0\n",
758 g_log_chan
[0].default_weight
, g_log_chan
[0].weight
);
761 " 1 %08x %08x Switch Rx1\n",
762 g_log_chan
[1].default_weight
, g_log_chan
[1].weight
);
765 " 2 %08x %08x Switch Rx2\n",
766 g_log_chan
[2].default_weight
, g_log_chan
[2].weight
);
769 " 3 %08x %08x Switch Rx3\n",
770 g_log_chan
[3].default_weight
, g_log_chan
[3].weight
);
773 " 4 %08x %08x Switch Tx0\n",
774 g_log_chan
[4].default_weight
, g_log_chan
[4].weight
);
777 " 5 %08x %08x Switch Tx1\n",
778 g_log_chan
[5].default_weight
, g_log_chan
[5].weight
);
780 len+=sprintf(buf+len," 6 %08x %08x TPE
781 Rx0\n",g_log_chan[6].default_weight, g_log_chan[6].weight);
782 len+=sprintf(buf+len," 7 %08x %08x TPE
783 Rx0\n",g_log_chan[7].default_weight, g_log_chan[7].weight);
784 len+=sprintf(buf+len," 8 %08x %08x TPE
785 Tx0\n",g_log_chan[8].default_weight, g_log_chan[8].weight);
786 len+=sprintf(buf+len," 9 %08x %08x TPE
787 Rx0\n",g_log_chan[9].default_weight, g_log_chan[9].weight);
788 len+=sprintf(buf+len," 10 %08x %08x DPLUS
789 Rx0\n",g_log_chan[10].default_weight, g_log_chan[10].weight);
790 len+=sprintf(buf+len," 11 %08x %08x DPLUS
791 Rx0\n",g_log_chan[11].default_weight, g_log_chan[11].weight); */
795 int dma_register_proc_read(char *buf
, char **start
, off_t offset
,
796 int count
, int *eof
, void *data
)
801 len
+= sprintf(buf
+ len
, "amazon dma driver\n");
802 len
+= sprintf(buf
+ len
, "version 1.0\n");
803 len
+= sprintf(buf
+ len
, "devices registered:\n");
804 for (temp_dev
= g_head_dev
; temp_dev
; temp_dev
= temp_dev
->next
) {
805 len
+= sprintf(buf
+ len
, "%s ", temp_dev
->dev
->device_name
);
807 len
+= sprintf(buf
+ len
, "\n");
808 len
+= sprintf(buf
+ len
, "CH_ON=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH_ON
));
809 len
+= sprintf(buf
+ len
, "CH_RST=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH_RST
));
810 len
+= sprintf(buf
+ len
, "CH0_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH0_ISR
));
811 len
+= sprintf(buf
+ len
, "CH1_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH1_ISR
));
812 len
+= sprintf(buf
+ len
, "CH2_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH2_ISR
));
813 len
+= sprintf(buf
+ len
, "CH3_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH3_ISR
));
814 len
+= sprintf(buf
+ len
, "CH4_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH4_ISR
));
815 len
+= sprintf(buf
+ len
, "CH5_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH5_ISR
));
816 len
+= sprintf(buf
+ len
, "CH6_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH6_ISR
));
817 len
+= sprintf(buf
+ len
, "CH7_ISR=%08x\n", AMAZON_DMA_REG32(AMAZON_DMA_CH7_ISR
));
818 len
+= sprintf(buf
+ len
, "CH8_ISR=%08x\n",
819 AMAZON_DMA_REG32(AMAZON_DMA_CH8_ISR
));
821 sprintf(buf
+ len
, "CH9_ISR=%08x\n",
822 AMAZON_DMA_REG32(AMAZON_DMA_CH9_ISR
));
824 sprintf(buf
+ len
, "CH10_ISR=%08x\n",
825 AMAZON_DMA_REG32(AMAZON_DMA_CH10_ISR
));
827 sprintf(buf
+ len
, "CH11_ISR=%08x\n",
828 AMAZON_DMA_REG32(AMAZON_DMA_CH11_ISR
));
830 sprintf(buf
+ len
, "LCH0_MSK=%08x\n",
831 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK
));
833 sprintf(buf
+ len
, "LCH1_MSK=%08x\n",
834 AMAZON_DMA_REG32(AMAZON_DMA_CH1_MSK
));
836 sprintf(buf
+ len
, "LCH2_MSK=%08x\n",
837 AMAZON_DMA_REG32(AMAZON_DMA_CH2_MSK
));
839 sprintf(buf
+ len
, "LCH3_MSK=%08x\n",
840 AMAZON_DMA_REG32(AMAZON_DMA_CH3_MSK
));
842 sprintf(buf
+ len
, "LCH4_MSK=%08x\n",
843 AMAZON_DMA_REG32(AMAZON_DMA_CH4_MSK
));
845 sprintf(buf
+ len
, "LCH5_MSK=%08x\n",
846 AMAZON_DMA_REG32(AMAZON_DMA_CH5_MSK
));
848 sprintf(buf
+ len
, "LCH6_MSK=%08x\n",
849 AMAZON_DMA_REG32(AMAZON_DMA_CH6_MSK
));
851 sprintf(buf
+ len
, "LCH7_MSK=%08x\n",
852 AMAZON_DMA_REG32(AMAZON_DMA_CH7_MSK
));
854 sprintf(buf
+ len
, "LCH8_MSK=%08x\n",
855 AMAZON_DMA_REG32(AMAZON_DMA_CH8_MSK
));
857 sprintf(buf
+ len
, "LCH9_MSK=%08x\n",
858 AMAZON_DMA_REG32(AMAZON_DMA_CH9_MSK
));
860 sprintf(buf
+ len
, "LCH10_MSK=%08x\n",
861 AMAZON_DMA_REG32(AMAZON_DMA_CH10_MSK
));
863 sprintf(buf
+ len
, "LCH11_MSK=%08x\n",
864 AMAZON_DMA_REG32(AMAZON_DMA_CH11_MSK
));
866 sprintf(buf
+ len
, "Desc_BA=%08x\n",
867 AMAZON_DMA_REG32(AMAZON_DMA_Desc_BA
));
869 sprintf(buf
+ len
, "LCH0_DES_LEN=%08x\n",
870 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN
));
872 sprintf(buf
+ len
, "LCH1_DES_LEN=%08x\n",
873 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_LEN
));
875 sprintf(buf
+ len
, "LCH2_DES_LEN=%08x\n",
876 AMAZON_DMA_REG32(AMAZON_DMA_CH2_DES_LEN
));
878 sprintf(buf
+ len
, "LCH3_DES_LEN=%08x\n",
879 AMAZON_DMA_REG32(AMAZON_DMA_CH3_DES_LEN
));
881 sprintf(buf
+ len
, "LCH4_DES_LEN=%08x\n",
882 AMAZON_DMA_REG32(AMAZON_DMA_CH4_DES_LEN
));
884 sprintf(buf
+ len
, "LCH5_DES_LEN=%08x\n",
885 AMAZON_DMA_REG32(AMAZON_DMA_CH5_DES_LEN
));
887 sprintf(buf
+ len
, "LCH6_DES_LEN=%08x\n",
888 AMAZON_DMA_REG32(AMAZON_DMA_CH6_DES_LEN
));
890 sprintf(buf
+ len
, "LCH7_DES_LEN=%08x\n",
891 AMAZON_DMA_REG32(AMAZON_DMA_CH7_DES_LEN
));
893 sprintf(buf
+ len
, "LCH8_DES_LEN=%08x\n",
894 AMAZON_DMA_REG32(AMAZON_DMA_CH8_DES_LEN
));
896 sprintf(buf
+ len
, "LCH9_DES_LEN=%08x\n",
897 AMAZON_DMA_REG32(AMAZON_DMA_CH9_DES_LEN
));
899 sprintf(buf
+ len
, "LCH10_DES_LEN=%08x\n",
900 AMAZON_DMA_REG32(AMAZON_DMA_CH10_DES_LEN
));
902 sprintf(buf
+ len
, "LCH11_DES_LEN=%08x\n",
903 AMAZON_DMA_REG32(AMAZON_DMA_CH11_DES_LEN
));
905 sprintf(buf
+ len
, "LCH1_DES_OFST=%08x\n",
906 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_OFST
));
908 sprintf(buf
+ len
, "LCH2_DES_OFST=%08x\n",
909 AMAZON_DMA_REG32(AMAZON_DMA_CH2_DES_OFST
));
911 sprintf(buf
+ len
, "LCH3_DES_OFST=%08x\n",
912 AMAZON_DMA_REG32(AMAZON_DMA_CH3_DES_OFST
));
914 sprintf(buf
+ len
, "LCH4_DES_OFST=%08x\n",
915 AMAZON_DMA_REG32(AMAZON_DMA_CH4_DES_OFST
));
917 sprintf(buf
+ len
, "LCH5_DES_OFST=%08x\n",
918 AMAZON_DMA_REG32(AMAZON_DMA_CH5_DES_OFST
));
920 sprintf(buf
+ len
, "LCH6_DES_OFST=%08x\n",
921 AMAZON_DMA_REG32(AMAZON_DMA_CH6_DES_OFST
));
923 sprintf(buf
+ len
, "LCH7_DES_OFST=%08x\n",
924 AMAZON_DMA_REG32(AMAZON_DMA_CH7_DES_OFST
));
926 sprintf(buf
+ len
, "LCH8_DES_OFST=%08x\n",
927 AMAZON_DMA_REG32(AMAZON_DMA_CH8_DES_OFST
));
929 sprintf(buf
+ len
, "LCH9_DES_OFST=%08x\n",
930 AMAZON_DMA_REG32(AMAZON_DMA_CH9_DES_OFST
));
932 sprintf(buf
+ len
, "LCH10_DES_OFST=%08x\n",
933 AMAZON_DMA_REG32(AMAZON_DMA_CH10_DES_OFST
));
935 sprintf(buf
+ len
, "LCH11_DES_OFST=%08x\n",
936 AMAZON_DMA_REG32(AMAZON_DMA_CH11_DES_OFST
));
938 sprintf(buf
+ len
, "AMAZON_DMA_SW_BL=%08x\n",
939 AMAZON_DMA_REG32(AMAZON_DMA_SW_BL
));
941 sprintf(buf
+ len
, "AMAZON_DMA_TPE_BL=%08x\n",
942 AMAZON_DMA_REG32(AMAZON_DMA_TPE_BL
));
944 sprintf(buf
+ len
, "DPlus2FPI_BL=%08x\n",
945 AMAZON_DMA_REG32(AMAZON_DMA_DPlus2FPI_BL
));
947 sprintf(buf
+ len
, "GRX_BUF_LEN=%08x\n",
948 AMAZON_DMA_REG32(AMAZON_DMA_GRX_BUF_LEN
));
950 sprintf(buf
+ len
, "DMA_ECON_REG=%08x\n",
951 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG
));
953 sprintf(buf
+ len
, "POLLING_REG=%08x\n",
954 AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG
));
956 sprintf(buf
+ len
, "CH_WGT=%08x\n",
957 AMAZON_DMA_REG32(AMAZON_DMA_CH_WGT
));
959 sprintf(buf
+ len
, "TX_WGT=%08x\n",
960 AMAZON_DMA_REG32(AMAZON_DMA_TX_WGT
));
962 sprintf(buf
+ len
, "DPlus2FPI_CLASS=%08x\n",
963 AMAZON_DMA_REG32(AMAZON_DMA_DPLus2FPI_CLASS
));
965 sprintf(buf
+ len
, "COMB_ISR=%08x\n",
966 AMAZON_DMA_REG32(AMAZON_DMA_COMB_ISR
));
967 #ifdef AMAZON_DMA_TPE_AAL5_RECOVERY
968 len
+= sprintf(buf
+ len
, "TPE fails:%u\n", total_dma_tpe_reset
); // 000004:fchang
973 /* Brief: initialize DMA registers
976 static void dma_chip_init(void)
979 for (i
= 0; i
< CHAN_TOTAL_NUM
; i
++) {
980 AMAZON_DMA_REG32(AMAZON_DMA_CH1_DES_OFST
+
981 i
* AMAZON_DMA_CH_STEP
) = DEFAULT_OFFSET
;
983 #ifdef DMA_NO_POLLING
984 AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG
) = 0;
986 // enable poll mode and set polling counter
987 AMAZON_DMA_REG32(AMAZON_DMA_POLLING_REG
) = DMA_POLLING_CNT
| DMA_POLLING_ENABLE
;
989 // to enable DMA drop
990 AMAZON_DMA_REG32(AMAZON_DMA_GRX_BUF_LEN
) = 0x10000;
993 int insert_dev_list(dev_list
* dev
)
996 if (g_head_dev
== NULL
) {
1002 for (temp_dev
= g_head_dev
; temp_dev
; temp_dev
= temp_dev
->next
) {
1003 if (temp_dev
->weight
< dev
->weight
) {
1005 temp_dev
->prev
->next
= dev
;
1007 dev
->prev
= temp_dev
->prev
;
1008 dev
->next
= temp_dev
;
1009 temp_dev
->prev
= dev
;
1010 if (temp_dev
== g_head_dev
)
1017 g_tail_dev
->next
= dev
;
1018 dev
->prev
= g_tail_dev
;
1028 u8
*common_buffer_alloc(int len
, int *byte_offset
, void **opt
)
1030 u8
*buffer
= (u8
*) kmalloc(len
* sizeof(u8
), GFP_KERNEL
);
1036 int common_buffer_free(u8
* dataptr
, void *opt
)
1044 int register_dev(struct dma_device_info
*dma_dev
)
1050 int byte_offset
= 0;
1052 struct rx_desc
*rx_desc_p
;
1053 struct tx_desc
*tx_desc_p
;
1054 if (strcmp(dma_dev
->device_name
, "switch1") == 0) {
1055 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST
) = SWITCH1_RST_MASK
; // resest
1058 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG
) |= 0x3; // endian
1061 burst_reg
= AMAZON_DMA_SW_BL
;
1062 dma_dev
->logic_rx_chan_base
= switch_rx_chan_base
;
1063 dma_dev
->logic_tx_chan_base
= switch_tx_chan_base
;
1066 else if (strcmp(dma_dev
->device_name
, "switch2") == 0) {
1067 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST
) = SWITCH2_RST_MASK
; // resest
1070 AMAZON_DMA_REG32(AMAZON_DMA_DMA_ECON_REG
) |= 0x3; // endian
1073 burst_reg
= AMAZON_DMA_SW_BL
;
1074 dma_dev
->logic_rx_chan_base
= switch2_rx_chan_base
;
1075 dma_dev
->logic_tx_chan_base
= switch2_tx_chan_base
;
1077 } else if (strcmp(dma_dev
->device_name
, "TPE") == 0) {
1078 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST
) = TPE_RST_MASK
; // resest
1081 burst_reg
= AMAZON_DMA_TPE_BL
;
1082 dma_dev
->logic_rx_chan_base
= TPE_rx_chan_base
;
1083 dma_dev
->logic_tx_chan_base
= TPE_tx_chan_base
;
1086 else if (strcmp(dma_dev
->device_name
, "DPlus") == 0) {
1087 AMAZON_DMA_REG32(AMAZON_DMA_CH_RST
) = DPlus2FPI_RST_MASK
; // resest
1090 dma_dev
->logic_rx_chan_base
= DPLus2FPI_rx_chan_base
;
1091 dma_dev
->logic_tx_chan_base
= DPLus2FPI_tx_chan_base
;
1096 for (temp
= dma_dev
->tx_burst_len
; temp
> 2; temp
/= 2) {
1101 AMAZON_DMA_REG32(burst_reg
) = i
<< 1;
1103 for (temp
= dma_dev
->rx_burst_len
; temp
> 2; temp
/= 2) {
1106 AMAZON_DMA_REG32(burst_reg
) += i
;
1108 for (i
= 0; i
< dma_dev
->num_rx_chan
; i
++) {
1110 temp
= dma_dev
->logic_rx_chan_base
+ i
;
1111 g_log_chan
[temp
].dma_dev
= dma_dev
;
1112 g_log_chan
[temp
].weight
= dma_dev
->rx_chan
[i
].weight
;
1113 g_log_chan
[temp
].default_weight
= dma_dev
->rx_chan
[i
].weight
;
1114 g_log_chan
[temp
].current_desc
= 0;
1115 g_log_chan
[temp
].desc_ofst
= DEFAULT_OFFSET
;
1116 g_log_chan
[temp
].desc_len
= dma_dev
->rx_chan
[i
].desc_num
;
1117 g_log_chan
[temp
].offset_from_base
= temp
* DEFAULT_OFFSET
;
1118 g_log_chan
[temp
].packet_size
= dma_dev
->rx_chan
[i
].packet_size
;
1120 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN
+ temp
* AMAZON_DMA_CH_STEP
) = dma_dev
->rx_chan
[i
].desc_num
;
1121 // enable interrupt mask
1122 if (temp
== 4 || temp
== 5) {
1123 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK
+ temp
* AMAZON_DMA_CH_STEP
) = 0x32;
1125 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK
+ temp
* AMAZON_DMA_CH_STEP
) = 0x36;
1127 strcpy(g_log_chan
[temp
].device_name
, dma_dev
->device_name
);
1128 g_log_chan
[temp
].burst_len
= dma_dev
->rx_burst_len
;
1129 g_log_chan
[temp
].control
= dma_dev
->rx_chan
[i
].control
;
1132 /* specify the buffer allocation and free method */
1133 if (dma_dev
->buffer_alloc
)
1134 g_log_chan
[temp
].buffer_alloc
= dma_dev
->buffer_alloc
;
1136 g_log_chan
[temp
].buffer_alloc
= common_buffer_alloc
;
1138 if (dma_dev
->buffer_free
)
1139 g_log_chan
[temp
].buffer_free
= dma_dev
->buffer_free
;
1141 g_log_chan
[temp
].buffer_free
= common_buffer_free
;
1143 if (dma_dev
->intr_handler
)
1144 g_log_chan
[temp
].intr_handler
= dma_dev
->intr_handler
;
1146 g_log_chan
[temp
].intr_handler
= NULL
;
1148 for (j
= 0; j
< g_log_chan
[temp
].desc_len
; j
++) {
1149 rx_desc_p
= (struct rx_desc
*) (g_desc_list
+ g_log_chan
[temp
].offset_from_base
+ j
);
1150 rx_desc_p
->status
.word
= 0;
1151 rx_desc_p
->status
.field
.data_length
= g_log_chan
[temp
].packet_size
;
1152 buffer
= (u8
*) g_log_chan
[temp
].buffer_alloc(g_log_chan
[temp
].packet_size
, &byte_offset
, &p
);
1153 rx_desc_p
->Data_Pointer
= (u32
) CPHYSADDR((u32
) buffer
);
1154 rx_desc_p
->status
.field
.byte_offset
= byte_offset
;
1155 /* fix me, should check if the addresss comply with the burst
1156 lenght requirment */
1157 g_log_chan
[temp
].opt
[j
] = p
;
1158 rx_desc_p
->status
.field
.OWN
= DMA_OWN
;
1161 /* open or close the channel */
1162 if (g_log_chan
[temp
].control
)
1165 close_channel(temp
);
1168 for (i
= 0; i
< dma_dev
->num_tx_chan
; i
++) {
1169 temp
= dma_dev
->logic_tx_chan_base
+ i
;
1170 g_log_chan
[temp
].dma_dev
= dma_dev
;
1171 g_log_chan
[temp
].weight
= dma_dev
->tx_chan
[i
].weight
;
1172 g_log_chan
[temp
].default_weight
= dma_dev
->tx_chan
[i
].weight
;
1173 g_log_chan
[temp
].current_desc
= 0;
1174 g_log_chan
[temp
].desc_ofst
= DEFAULT_OFFSET
;
1175 g_log_chan
[temp
].desc_len
= dma_dev
->tx_chan
[i
].desc_num
;
1176 g_log_chan
[temp
].offset_from_base
= temp
* DEFAULT_OFFSET
;
1177 g_log_chan
[temp
].packet_size
= dma_dev
->tx_chan
[i
].packet_size
;
1179 AMAZON_DMA_REG32(AMAZON_DMA_CH0_DES_LEN
+ temp
* AMAZON_DMA_CH_STEP
) = dma_dev
->tx_chan
[i
].desc_num
;
1180 // enable interrupt mask
1182 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK
+ temp
* AMAZON_DMA_CH_STEP
) = 0x3e;
1184 AMAZON_DMA_REG32(AMAZON_DMA_CH0_MSK
+ temp
* AMAZON_DMA_CH_STEP
) = 0x36;
1187 strcpy(g_log_chan
[temp
].device_name
, dma_dev
->device_name
);
1188 g_log_chan
[temp
].burst_len
= dma_dev
->tx_burst_len
;
1189 g_log_chan
[temp
].control
= dma_dev
->tx_chan
[i
].control
;
1191 if (dma_dev
->buffer_alloc
)
1192 g_log_chan
[temp
].buffer_alloc
= dma_dev
->buffer_alloc
;
1194 g_log_chan
[temp
].buffer_alloc
= common_buffer_alloc
;
1196 if (dma_dev
->buffer_free
)
1197 g_log_chan
[temp
].buffer_free
= dma_dev
->buffer_free
;
1199 g_log_chan
[temp
].buffer_free
= common_buffer_free
;
1201 if (dma_dev
->intr_handler
)
1202 g_log_chan
[temp
].intr_handler
= dma_dev
->intr_handler
;
1204 g_log_chan
[temp
].intr_handler
= NULL
;
1206 for (j
= 0; j
< g_log_chan
[temp
].desc_len
; j
++) {
1209 (struct tx_desc
*) (g_desc_list
+
1210 g_log_chan
[temp
].offset_from_base
+ j
);
1211 tx_desc_p
->status
.word
= 0;
1212 tx_desc_p
->status
.field
.data_length
=
1213 g_log_chan
[temp
].packet_size
;
1214 tx_desc_p
->status
.field
.OWN
= CPU_OWN
;
1217 /* workaround DMA pitfall, we never turn on channel if we don't
1218 have proper descriptors */
1219 if (!g_log_chan
[temp
].control
) {
1220 close_channel(temp
);
1228 int dma_device_register(struct dma_device_info
*dma_dev
)
1231 temp_dev
= (dev_list
*) kmalloc(sizeof(dev_list
), GFP_KERNEL
);
1232 temp_dev
->dev
= dma_dev
;
1233 temp_dev
->weight
= dma_dev
->weight
;
1234 insert_dev_list(temp_dev
);
1235 /* check whether this is a known device */
1236 if ((strcmp(dma_dev
->device_name
, "switch1") == 0)
1237 || (strcmp(dma_dev
->device_name
, "TPE") == 0)
1238 || (strcmp(dma_dev
->device_name
, "switch2") == 0)
1239 || (strcmp(dma_dev
->device_name
, "DPlus") == 0)) {
1240 register_dev(dma_dev
);
1247 int unregister_dev(struct dma_device_info
*dma_dev
)
1251 struct rx_desc
*rx_desc_p
;
1253 for (i
= 0; i
< dma_dev
->num_rx_chan
; i
++) {
1254 temp
= dma_dev
->logic_rx_chan_base
+ i
;
1255 close_channel(temp
);
1256 for (j
= 0; j
< g_log_chan
[temp
].desc_len
; j
++) {
1258 (struct rx_desc
*) (g_desc_list
+
1259 g_log_chan
[temp
].offset_from_base
+ j
);
1260 buffer
= (u8
*) __va(rx_desc_p
->Data_Pointer
);
1261 g_log_chan
[temp
].buffer_free(buffer
, g_log_chan
[temp
].opt
[j
]);
1264 for (i
= 0; i
< dma_dev
->num_tx_chan
; i
++) {
1265 temp
= dma_dev
->logic_tx_chan_base
+ i
;
1266 close_channel(temp
);
1271 int dma_device_unregister(struct dma_device_info
*dev
)
1274 for (temp_dev
= g_head_dev
; temp_dev
; temp_dev
= temp_dev
->next
) {
1275 if (strcmp(dev
->device_name
, temp_dev
->dev
->device_name
) == 0) {
1276 if ((strcmp(dev
->device_name
, "switch1") == 0)
1277 || (strcmp(dev
->device_name
, "TPE") == 0)
1278 || (strcmp(dev
->device_name
, "switch2") == 0)
1279 || (strcmp(dev
->device_name
, "DPlus") == 0))
1280 unregister_dev(dev
);
1281 if (temp_dev
== g_head_dev
) {
1282 g_head_dev
= temp_dev
->next
;
1285 if (temp_dev
== g_tail_dev
)
1286 g_tail_dev
= temp_dev
->prev
;
1288 temp_dev
->prev
->next
= temp_dev
->next
;
1290 temp_dev
->next
->prev
= temp_dev
->prev
;
1300 void dma_device_update_rx(struct dma_device_info
*dma_dev
)
1303 for (i
= 0; i
< dma_dev
->num_rx_chan
; i
++) {
1304 temp
= dma_dev
->logic_rx_chan_base
+ i
;
1305 g_log_chan
[temp
].control
= dma_dev
->rx_chan
[i
].control
;
1307 if (g_log_chan
[temp
].control
)
1310 close_channel(temp
);
1315 void dma_device_update_tx(struct dma_device_info
*dma_dev
)
1318 for (i
= 0; i
< dma_dev
->num_tx_chan
; i
++) {
1319 temp
= dma_dev
->logic_tx_chan_base
+ i
;
1320 g_log_chan
[temp
].control
= dma_dev
->tx_chan
[i
].control
;
1321 if (g_log_chan
[temp
].control
) {
1322 /* we turn on channel when send out the very first packet */
1323 // open_channel(temp);
1325 close_channel(temp
);
1329 int dma_device_update(struct dma_device_info
*dma_dev
)
1331 dma_device_update_rx(dma_dev
);
1332 dma_device_update_tx(dma_dev
);
1336 static int dma_open(struct inode
*inode
, struct file
*file
)
1341 static int dma_release(struct inode
*inode
, struct file
*file
)
1343 /* release the resources */
1347 static int dma_ioctl(struct inode
*inode
, struct file
*file
, unsigned int cmd
, unsigned long arg
)
1354 case 0: /* get register value */
1356 case 1: /* return channel weight */
1357 chan_no
= *((int *) arg
);
1358 *((int *) arg
+ 1) = g_log_chan
[chan_no
].default_weight
;
1360 case 2: /* set channel weight */
1361 chan_no
= *((int *) arg
);
1362 value
= *((int *) arg
+ 1);
1363 printk("new weight=%08x\n", value
);
1364 g_log_chan
[chan_no
].default_weight
= value
;
1373 static struct file_operations dma_fops
= {
1376 release
:dma_release
,
1380 static int dma_init(void)
1384 printk("initialising dma core\n");
1385 result
= register_chrdev(DMA_MAJOR
, "dma-core", &dma_fops
);
1387 AMAZON_DMA_EMSG("cannot register device dma-core!\n");
1390 result
= request_irq(AMAZON_DMA_INT
, dma_interrupt
, IRQF_DISABLED
, "dma-core", (void *) &dma_interrupt
);
1392 AMAZON_DMA_EMSG("error, cannot get dma_irq!\n");
1393 free_irq(AMAZON_DMA_INT
, (void *) &dma_interrupt
);
1397 g_desc_list
= (u64
*) KSEG1ADDR(__get_free_page(GFP_DMA
));
1399 if (g_desc_list
== NULL
) {
1400 AMAZON_DMA_EMSG("no memory for desriptor\n");
1403 memset(g_desc_list
, 0, PAGE_SIZE
);
1404 AMAZON_DMA_REG32(AMAZON_DMA_Desc_BA
) = (u32
) CPHYSADDR((u32
) g_desc_list
);
1405 g_amazon_dma_dir
= proc_mkdir("amazon_dma", NULL
);
1406 create_proc_read_entry("dma_register", 0, g_amazon_dma_dir
, dma_register_proc_read
, NULL
);
1407 create_proc_read_entry("g_desc_list", 0, g_amazon_dma_dir
, desc_list_proc_read
, NULL
);
1408 create_proc_read_entry("channel_weight", 0, g_amazon_dma_dir
, channel_weight_proc_read
, NULL
);
1411 for (i
= 0; i
< (RX_CHAN_NUM
+ 1); i
++) {
1412 rx_chan_list
[i
] = -1;
1414 for (i
= 0; i
< (TX_CHAN_NUM
+ 1); i
++) {
1415 tx_chan_list
[i
] = -1;
1418 for (i
= 0; i
< CHAN_TOTAL_NUM
; i
++) {
1419 comb_isr_mask
[i
] = 0x80000000 >> (i
);
1422 g_log_chan
[CHAN_TOTAL_NUM
].weight
= 0;
1423 printk("initialising dma core ... done\n");
1428 arch_initcall(dma_init
);
1431 void dma_cleanup(void)
1435 unregister_chrdev(DMA_MAJOR
, "dma-core");
1436 for (temp_dev
= g_head_dev
; temp_dev
; temp_dev
= temp_dev
->next
) {
1439 free_page(KSEG0ADDR((unsigned long) g_desc_list
));
1440 remove_proc_entry("channel_weight", g_amazon_dma_dir
);
1441 remove_proc_entry("dma_list", g_amazon_dma_dir
);
1442 remove_proc_entry("dma_register", g_amazon_dma_dir
);
1443 remove_proc_entry("amazon_dma", NULL
);
1444 /* release the resources */
1445 free_irq(AMAZON_DMA_INT
, (void *) &dma_interrupt
);
1448 EXPORT_SYMBOL(dma_device_register
);
1449 EXPORT_SYMBOL(dma_device_unregister
);
1450 EXPORT_SYMBOL(dma_device_read
);
1451 EXPORT_SYMBOL(dma_device_write
);
1452 EXPORT_SYMBOL(dma_device_update
);
1453 EXPORT_SYMBOL(dma_device_update_rx
);
1455 MODULE_LICENSE("GPL");