1 /******************************************************************************
3 ** FILE NAME : ifxmips_atm_core.c
9 ** DESCRIPTION : ATM driver common source file (core functions)
10 ** COPYRIGHT : Copyright (c) 2006
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
20 ** $Date $Author $Comment
21 ** 07 JUL 2009 Xu Liang Init Version
22 *******************************************************************************/
27 * ####################################
29 * ####################################
32 #define IFX_ATM_VER_MAJOR 1
33 #define IFX_ATM_VER_MID 0
34 #define IFX_ATM_VER_MINOR 8
39 * ####################################
41 * ####################################
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/version.h>
50 #include <linux/types.h>
51 #include <linux/errno.h>
52 #include <linux/proc_fs.h>
53 #include <linux/init.h>
54 #include <linux/ioctl.h>
55 #include <linux/atmdev.h>
56 #include <linux/atm.h>
57 #include <linux/clk.h>
60 * Chip Specific Head File
63 #include <lantiq_regs.h>
64 #include "ifxmips_atm_core.h"
69 * ####################################
70 * Kernel Version Adaption
71 * ####################################
73 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
74 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
75 #define MODULE_PARM(a, b) module_param(a, int, 0)
77 #define MODULE_PARM_ARRAY(a, b) MODULE_PARM(a, b)
83 \addtogroup IFXMIPS_ATM_MODULE_PARAMS
87 * ####################################
88 * Parameters to Configure PPE
89 * ####################################
92 \brief QSB cell delay variation due to concurrency
94 static int qsb_tau
= 1; /* QSB cell delay variation due to concurrency */
96 \brief QSB scheduler burst length
98 static int qsb_srvm
= 0x0F; /* QSB scheduler burst length */
100 \brief QSB time step, all legal values are 1, 2, 4
102 static int qsb_tstep
= 4 ; /* QSB time step, all legal values are 1, 2, 4 */
105 \brief Write descriptor delay
107 static int write_descriptor_delay
= 0x20; /* Write descriptor delay */
110 \brief AAL5 padding byte ('~')
112 static int aal5_fill_pattern
= 0x007E; /* AAL5 padding byte ('~') */
114 \brief Max frame size for RX
116 static int aal5r_max_packet_size
= 0x0700; /* Max frame size for RX */
118 \brief Min frame size for RX
120 static int aal5r_min_packet_size
= 0x0000; /* Min frame size for RX */
122 \brief Max frame size for TX
124 static int aal5s_max_packet_size
= 0x0700; /* Max frame size for TX */
126 \brief Min frame size for TX
128 static int aal5s_min_packet_size
= 0x0000; /* Min frame size for TX */
130 \brief Drop error packet in RX path
132 static int aal5r_drop_error_packet
= 1; /* Drop error packet in RX path */
135 \brief Number of descriptors per DMA RX channel
137 static int dma_rx_descriptor_length
= 128; /* Number of descriptors per DMA RX channel */
139 \brief Number of descriptors per DMA TX channel
141 static int dma_tx_descriptor_length
= 64; /* Number of descriptors per DMA TX channel */
143 \brief PPE core clock cycles between descriptor write and effectiveness in external RAM
145 static int dma_rx_clp1_descriptor_threshold
= 38;
148 MODULE_PARM(qsb_tau
, "i");
149 MODULE_PARM_DESC(qsb_tau
, "Cell delay variation. Value must be > 0");
150 MODULE_PARM(qsb_srvm
, "i");
151 MODULE_PARM_DESC(qsb_srvm
, "Maximum burst size");
152 MODULE_PARM(qsb_tstep
, "i");
153 MODULE_PARM_DESC(qsb_tstep
, "n*32 cycles per sbs cycles n=1,2,4");
155 MODULE_PARM(write_descriptor_delay
, "i");
156 MODULE_PARM_DESC(write_descriptor_delay
, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
158 MODULE_PARM(aal5_fill_pattern
, "i");
159 MODULE_PARM_DESC(aal5_fill_pattern
, "Filling pattern (PAD) for AAL5 frames");
160 MODULE_PARM(aal5r_max_packet_size
, "i");
161 MODULE_PARM_DESC(aal5r_max_packet_size
, "Max packet size in byte for downstream AAL5 frames");
162 MODULE_PARM(aal5r_min_packet_size
, "i");
163 MODULE_PARM_DESC(aal5r_min_packet_size
, "Min packet size in byte for downstream AAL5 frames");
164 MODULE_PARM(aal5s_max_packet_size
, "i");
165 MODULE_PARM_DESC(aal5s_max_packet_size
, "Max packet size in byte for upstream AAL5 frames");
166 MODULE_PARM(aal5s_min_packet_size
, "i");
167 MODULE_PARM_DESC(aal5s_min_packet_size
, "Min packet size in byte for upstream AAL5 frames");
168 MODULE_PARM(aal5r_drop_error_packet
, "i");
169 MODULE_PARM_DESC(aal5r_drop_error_packet
, "Non-zero value to drop error packet for downstream");
171 MODULE_PARM(dma_rx_descriptor_length
, "i");
172 MODULE_PARM_DESC(dma_rx_descriptor_length
, "Number of descriptor assigned to DMA RX channel (>16)");
173 MODULE_PARM(dma_tx_descriptor_length
, "i");
174 MODULE_PARM_DESC(dma_tx_descriptor_length
, "Number of descriptor assigned to DMA TX channel (>16)");
175 MODULE_PARM(dma_rx_clp1_descriptor_threshold
, "i");
176 MODULE_PARM_DESC(dma_rx_clp1_descriptor_threshold
, "Descriptor threshold for cells with cell loss priority 1");
181 * ####################################
183 * ####################################
186 #define DUMP_SKB_LEN ~0
191 * ####################################
193 * ####################################
199 static int ppe_ioctl(struct atm_dev
*, unsigned int, void *);
200 static int ppe_open(struct atm_vcc
*);
201 static void ppe_close(struct atm_vcc
*);
202 static int ppe_send(struct atm_vcc
*, struct sk_buff
*);
203 static int ppe_send_oam(struct atm_vcc
*, void *, int);
204 static int ppe_change_qos(struct atm_vcc
*, struct atm_qos
*, int);
209 static INLINE
int adsl_led_flash(void);
212 * 64-bit operation used by MIB calculation
214 static INLINE
void u64_add_u32(ppe_u64_t
, unsigned int, ppe_u64_t
*);
217 * buffer manage functions
219 static INLINE
struct sk_buff
* alloc_skb_rx(void);
220 static INLINE
struct sk_buff
* alloc_skb_tx(unsigned int);
221 struct sk_buff
* atm_alloc_tx(struct atm_vcc
*, unsigned int);
222 static INLINE
void atm_free_tx_skb_vcc(struct sk_buff
*, struct atm_vcc
*);
223 static INLINE
struct sk_buff
*get_skb_rx_pointer(unsigned int);
224 static INLINE
int get_tx_desc(unsigned int);
227 * mailbox handler and signal function
229 static INLINE
void mailbox_oam_rx_handler(void);
230 static INLINE
void mailbox_aal_rx_handler(void);
231 #if defined(ENABLE_TASKLET) && ENABLE_TASKLET
232 static void do_ppe_tasklet(unsigned long);
234 static irqreturn_t
mailbox_irq_handler(int, void *);
235 static INLINE
void mailbox_signal(unsigned int, int);
238 * QSB & HTU setting functions
240 static void set_qsb(struct atm_vcc
*, struct atm_qos
*, unsigned int);
241 static void qsb_global_set(void);
242 static INLINE
void set_htu_entry(unsigned int, unsigned int, unsigned int, int, int);
243 static INLINE
void clear_htu_entry(unsigned int);
244 static void validate_oam_htu_entry(void);
245 static void invalidate_oam_htu_entry(void);
248 * look up for connection ID
250 static INLINE
int find_vpi(unsigned int);
251 static INLINE
int find_vpivci(unsigned int, unsigned int);
252 static INLINE
int find_vcc(struct atm_vcc
*);
257 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
258 static void dump_skb(struct sk_buff
*, u32
, char *, int, int, int);
260 #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
264 * Proc File Functions
266 static INLINE
void proc_file_create(void);
267 static INLINE
void proc_file_delete(void);
268 static int proc_read_version(char *, char **, off_t
, int, int *, void *);
269 static int proc_read_mib(char *, char **, off_t
, int, int *, void *);
270 static int proc_write_mib(struct file
*, const char *, unsigned long, void *);
271 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
272 static int proc_read_dbg(char *, char **, off_t
, int, int *, void *);
273 static int proc_write_dbg(struct file
*, const char *, unsigned long, void *);
275 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
276 static int proc_read_htu(char *, char **, off_t
, int, int *, void *);
277 static int proc_read_txq(char *, char **, off_t
, int, int *, void *);
281 * Proc Help Functions
283 static int stricmp(const char *, const char *);
284 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
285 static int strincmp(const char *, const char *, int);
287 static INLINE
int ifx_atm_version(char *);
288 //static INLINE int print_reset_domain(char *, int);
289 //static INLINE int print_reset_handler(char *, int, ifx_rcu_handler_t *);
292 * Init & clean-up functions
295 static INLINE
void reset_ppe(void);
297 static INLINE
void check_parameters(void);
298 static INLINE
int init_priv_data(void);
299 static INLINE
void clear_priv_data(void);
300 static INLINE
void init_rx_tables(void);
301 static INLINE
void init_tx_tables(void);
306 #if defined(CONFIG_IFX_OAM) || defined(CONFIG_IFX_OAM_MODULE)
307 extern void ifx_push_oam(unsigned char *);
309 static inline void ifx_push_oam(unsigned char *dummy
) {}
311 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
312 extern int ifx_mei_atm_led_blink(void);
313 extern int ifx_mei_atm_showtime_check(int *is_showtime
, struct port_cell_info
*port_cell
, void **xdata_addr
);
315 static inline int ifx_mei_atm_led_blink(void) { return IFX_SUCCESS
; }
316 static inline int ifx_mei_atm_showtime_check(int *is_showtime
, struct port_cell_info
*port_cell
, void **xdata_addr
)
318 if ( is_showtime
!= NULL
)
327 extern struct sk_buff
* (*ifx_atm_alloc_tx
)(struct atm_vcc
*, unsigned int);
328 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
329 extern int (*ifx_mei_atm_showtime_enter
)(struct port_cell_info
*, void *);
330 extern int (*ifx_mei_atm_showtime_exit
)(void);
332 int (*ifx_mei_atm_showtime_enter
)(struct port_cell_info
*, void *) = NULL
;
333 EXPORT_SYMBOL(ifx_mei_atm_showtime_enter
);
334 int (*ifx_mei_atm_showtime_exit
)(void) = NULL
;
335 EXPORT_SYMBOL(ifx_mei_atm_showtime_exit
);
341 * ####################################
343 * ####################################
346 static struct atm_priv_data g_atm_priv_data
;
348 static struct atmdev_ops g_ifx_atm_ops
= {
353 .send_oam
= ppe_send_oam
,
354 .change_qos
= ppe_change_qos
,
355 .owner
= THIS_MODULE
,
358 #if defined(ENABLE_TASKLET) && ENABLE_TASKLET
359 DECLARE_TASKLET(g_dma_tasklet
, do_ppe_tasklet
, 0);
362 static int g_showtime
= 0;
363 static void *g_xdata_addr
= NULL
;
365 unsigned int ifx_atm_dbg_enable
= 0;
367 static struct proc_dir_entry
* g_atm_dir
= NULL
;
372 * ####################################
374 * ####################################
377 static int ppe_ioctl(struct atm_dev
*dev
, unsigned int cmd
, void *arg
)
380 atm_cell_ifEntry_t mib_cell
;
381 atm_aal5_ifEntry_t mib_aal5
;
382 atm_aal5_vcc_x_t mib_vcc
;
386 if ( _IOC_TYPE(cmd
) != PPE_ATM_IOC_MAGIC
387 || _IOC_NR(cmd
) >= PPE_ATM_IOC_MAXNR
)
390 if ( _IOC_DIR(cmd
) & _IOC_READ
)
391 ret
= !access_ok(VERIFY_WRITE
, arg
, _IOC_SIZE(cmd
));
392 else if ( _IOC_DIR(cmd
) & _IOC_WRITE
)
393 ret
= !access_ok(VERIFY_READ
, arg
, _IOC_SIZE(cmd
));
399 case PPE_ATM_MIB_CELL
: /* cell level MIB */
400 /* These MIB should be read at ARC side, now put zero only. */
401 mib_cell
.ifHCInOctets_h
= 0;
402 mib_cell
.ifHCInOctets_l
= 0;
403 mib_cell
.ifHCOutOctets_h
= 0;
404 mib_cell
.ifHCOutOctets_l
= 0;
405 mib_cell
.ifInErrors
= 0;
406 mib_cell
.ifInUnknownProtos
= WAN_MIB_TABLE
->wrx_drophtu_cell
;
407 mib_cell
.ifOutErrors
= 0;
409 ret
= sizeof(mib_cell
) - copy_to_user(arg
, &mib_cell
, sizeof(mib_cell
));
412 case PPE_ATM_MIB_AAL5
: /* AAL5 MIB */
413 value
= WAN_MIB_TABLE
->wrx_total_byte
;
414 u64_add_u32(g_atm_priv_data
.wrx_total_byte
, value
- g_atm_priv_data
.prev_wrx_total_byte
, &g_atm_priv_data
.wrx_total_byte
);
415 g_atm_priv_data
.prev_wrx_total_byte
= value
;
416 mib_aal5
.ifHCInOctets_h
= g_atm_priv_data
.wrx_total_byte
.h
;
417 mib_aal5
.ifHCInOctets_l
= g_atm_priv_data
.wrx_total_byte
.l
;
419 value
= WAN_MIB_TABLE
->wtx_total_byte
;
420 u64_add_u32(g_atm_priv_data
.wtx_total_byte
, value
- g_atm_priv_data
.prev_wtx_total_byte
, &g_atm_priv_data
.wtx_total_byte
);
421 g_atm_priv_data
.prev_wtx_total_byte
= value
;
422 mib_aal5
.ifHCOutOctets_h
= g_atm_priv_data
.wtx_total_byte
.h
;
423 mib_aal5
.ifHCOutOctets_l
= g_atm_priv_data
.wtx_total_byte
.l
;
425 mib_aal5
.ifInUcastPkts
= g_atm_priv_data
.wrx_pdu
;
426 mib_aal5
.ifOutUcastPkts
= WAN_MIB_TABLE
->wtx_total_pdu
;
427 mib_aal5
.ifInErrors
= WAN_MIB_TABLE
->wrx_err_pdu
;
428 mib_aal5
.ifInDiscards
= WAN_MIB_TABLE
->wrx_dropdes_pdu
+ g_atm_priv_data
.wrx_drop_pdu
;
429 mib_aal5
.ifOutErros
= g_atm_priv_data
.wtx_err_pdu
;
430 mib_aal5
.ifOutDiscards
= g_atm_priv_data
.wtx_drop_pdu
;
432 ret
= sizeof(mib_aal5
) - copy_to_user(arg
, &mib_aal5
, sizeof(mib_aal5
));
435 case PPE_ATM_MIB_VCC
: /* VCC related MIB */
436 copy_from_user(&mib_vcc
, arg
, sizeof(mib_vcc
));
437 conn
= find_vpivci(mib_vcc
.vpi
, mib_vcc
.vci
);
440 mib_vcc
.mib_vcc
.aal5VccCrcErrors
= g_atm_priv_data
.conn
[conn
].aal5_vcc_crc_err
;
441 mib_vcc
.mib_vcc
.aal5VccOverSizedSDUs
= g_atm_priv_data
.conn
[conn
].aal5_vcc_oversize_sdu
;
442 mib_vcc
.mib_vcc
.aal5VccSarTimeOuts
= 0; /* no timer support */
443 ret
= sizeof(mib_vcc
) - copy_to_user(arg
, &mib_vcc
, sizeof(mib_vcc
));
456 static int ppe_open(struct atm_vcc
*vcc
)
459 short vpi
= vcc
->vpi
;
461 struct port
*port
= &g_atm_priv_data
.port
[(int)vcc
->dev
->dev_data
];
463 int f_enable_irq
= 0;
464 #if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
468 if ( vcc
->qos
.aal
!= ATM_AAL5
&& vcc
->qos
.aal
!= ATM_AAL0
)
469 return -EPROTONOSUPPORT
;
471 /* check bandwidth */
472 if ( (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
&& vcc
->qos
.txtp
.max_pcr
> (port
->tx_max_cell_rate
- port
->tx_current_cell_rate
))
473 || (vcc
->qos
.txtp
.traffic_class
== ATM_VBR_RT
&& vcc
->qos
.txtp
.max_pcr
> (port
->tx_max_cell_rate
- port
->tx_current_cell_rate
))
474 || (vcc
->qos
.txtp
.traffic_class
== ATM_VBR_NRT
&& vcc
->qos
.txtp
.scr
> (port
->tx_max_cell_rate
- port
->tx_current_cell_rate
))
475 || (vcc
->qos
.txtp
.traffic_class
== ATM_UBR_PLUS
&& vcc
->qos
.txtp
.min_pcr
> (port
->tx_max_cell_rate
- port
->tx_current_cell_rate
)) )
481 /* check existing vpi,vci */
482 conn
= find_vpivci(vpi
, vci
);
488 /* check whether it need to enable irq */
489 if ( g_atm_priv_data
.conn_table
== 0 )
492 /* allocate connection */
493 for ( conn
= 0; conn
< MAX_PVC_NUMBER
; conn
++ ) {
494 if ( test_and_set_bit(conn
, &g_atm_priv_data
.conn_table
) == 0 ) {
495 g_atm_priv_data
.conn
[conn
].vcc
= vcc
;
499 if ( conn
== MAX_PVC_NUMBER
)
505 /* reserve bandwidth */
506 switch ( vcc
->qos
.txtp
.traffic_class
) {
509 port
->tx_current_cell_rate
+= vcc
->qos
.txtp
.max_pcr
;
512 port
->tx_current_cell_rate
+= vcc
->qos
.txtp
.scr
;
515 port
->tx_current_cell_rate
+= vcc
->qos
.txtp
.min_pcr
;
520 set_qsb(vcc
, &vcc
->qos
, conn
);
522 /* update atm_vcc structure */
523 vcc
->itf
= (int)vcc
->dev
->dev_data
;
526 set_bit(ATM_VF_READY
, &vcc
->flags
);
530 ifx_atm_alloc_tx
= atm_alloc_tx
;
532 *MBOX_IGU1_ISRC
= (1 << RX_DMA_CH_AAL
) | (1 << RX_DMA_CH_OAM
);
533 *MBOX_IGU1_IER
= (1 << RX_DMA_CH_AAL
) | (1 << RX_DMA_CH_OAM
);
535 enable_irq(PPE_MAILBOX_IGU1_INT
);
539 WTX_QUEUE_CONFIG(conn
)->sbid
= (int)vcc
->dev
->dev_data
;
542 set_htu_entry(vpi
, vci
, conn
, vcc
->qos
.aal
== ATM_AAL5
? 1 : 0, 0);
544 #if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
545 // ReTX: occupy second QID
546 local_irq_save(sys_flag
);
547 if ( g_retx_htu
&& vcc
->qos
.aal
== ATM_AAL5
)
549 int retx_conn
= (conn
+ 8) % 16; // ReTX queue
551 if ( retx_conn
< MAX_PVC_NUMBER
&& test_and_set_bit(retx_conn
, &g_atm_priv_data
.conn_table
) == 0 ) {
552 g_atm_priv_data
.conn
[retx_conn
].vcc
= vcc
;
553 set_htu_entry(vpi
, vci
, retx_conn
, vcc
->qos
.aal
== ATM_AAL5
? 1 : 0, 1);
556 local_irq_restore(sys_flag
);
565 static void ppe_close(struct atm_vcc
*vcc
)
569 struct connection
*connection
;
574 /* get connection id */
575 conn
= find_vcc(vcc
);
577 err("can't find vcc");
580 connection
= &g_atm_priv_data
.conn
[conn
];
581 port
= &g_atm_priv_data
.port
[connection
->port
];
584 clear_htu_entry(conn
);
586 /* release connection */
587 clear_bit(conn
, &g_atm_priv_data
.conn_table
);
588 connection
->vcc
= NULL
;
589 connection
->aal5_vcc_crc_err
= 0;
590 connection
->aal5_vcc_oversize_sdu
= 0;
593 if ( g_atm_priv_data
.conn_table
== 0 ) {
594 disable_irq(PPE_MAILBOX_IGU1_INT
);
595 ifx_atm_alloc_tx
= NULL
;
598 /* release bandwidth */
599 switch ( vcc
->qos
.txtp
.traffic_class
)
603 port
->tx_current_cell_rate
-= vcc
->qos
.txtp
.max_pcr
;
606 port
->tx_current_cell_rate
-= vcc
->qos
.txtp
.scr
;
609 port
->tx_current_cell_rate
-= vcc
->qos
.txtp
.min_pcr
;
617 static int ppe_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
622 struct tx_descriptor reg_desc
= {0};
624 if ( vcc
== NULL
|| skb
== NULL
)
628 atm_free_tx_skb_vcc(skb
, vcc
);
630 conn
= find_vcc(vcc
);
637 err("not in showtime");
642 if ( vcc
->qos
.aal
== ATM_AAL5
) {
645 struct tx_inband_header
*header
;
648 byteoff
= (unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1);
650 if ( skb_headroom(skb
) < byteoff
+ TX_INBAND_HEADER_LENGTH
) {
651 struct sk_buff
*new_skb
;
653 new_skb
= alloc_skb_tx(datalen
);
654 if ( new_skb
== NULL
) {
655 err("ALLOC_SKB_TX_FAIL");
659 skb_put(new_skb
, datalen
);
660 memcpy(new_skb
->data
, skb
->data
, datalen
);
661 dev_kfree_skb_any(skb
);
663 byteoff
= (unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1);
666 skb_push(skb
, byteoff
+ TX_INBAND_HEADER_LENGTH
);
668 header
= (struct tx_inband_header
*)skb
->data
;
670 /* setup inband trailer */
673 header
->pad
= aal5_fill_pattern
;
676 /* setup cell header */
677 header
->clp
= (vcc
->atm_options
& ATM_ATMOPT_CLP
) ? 1 : 0;
678 header
->pti
= ATM_PTI_US0
;
679 header
->vci
= vcc
->vci
;
680 header
->vpi
= vcc
->vpi
;
683 /* setup descriptor */
684 reg_desc
.dataptr
= (unsigned int)skb
->data
>> 2;
685 reg_desc
.datalen
= datalen
;
686 reg_desc
.byteoff
= byteoff
;
690 /* if data pointer is not aligned, allocate new sk_buff */
691 if ( ((unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1)) != 0 ) {
692 struct sk_buff
*new_skb
;
694 err("skb->data not aligned");
696 new_skb
= alloc_skb_tx(skb
->len
);
697 if ( new_skb
== NULL
) {
698 err("ALLOC_SKB_TX_FAIL");
702 skb_put(new_skb
, skb
->len
);
703 memcpy(new_skb
->data
, skb
->data
, skb
->len
);
704 dev_kfree_skb_any(skb
);
708 reg_desc
.dataptr
= (unsigned int)skb
->data
>> 2;
709 reg_desc
.datalen
= skb
->len
;
710 reg_desc
.byteoff
= 0;
716 reg_desc
.sop
= reg_desc
.eop
= 1;
718 desc_base
= get_tx_desc(conn
);
719 if ( desc_base
< 0 ) {
720 err("ALLOC_TX_CONNECTION_FAIL");
726 atomic_inc(&vcc
->stats
->tx
);
727 if ( vcc
->qos
.aal
== ATM_AAL5
)
728 g_atm_priv_data
.wtx_pdu
++;
730 /* update descriptor send pointer */
731 if ( g_atm_priv_data
.conn
[conn
].tx_skb
[desc_base
] != NULL
)
732 dev_kfree_skb_any(g_atm_priv_data
.conn
[conn
].tx_skb
[desc_base
]);
733 g_atm_priv_data
.conn
[conn
].tx_skb
[desc_base
] = skb
;
735 /* write discriptor to memory and write back cache */
736 g_atm_priv_data
.conn
[conn
].tx_desc
[desc_base
] = reg_desc
;
737 dma_cache_wback((unsigned long)skb
->data
, skb
->len
);
739 dump_skb(skb
, DUMP_SKB_LEN
, (char *)__func__
, 0, conn
, 1);
741 mailbox_signal(conn
, 1);
748 err("FIND_VCC_FAIL");
749 g_atm_priv_data
.wtx_err_pdu
++;
750 dev_kfree_skb_any(skb
);
754 if ( vcc
->qos
.aal
== ATM_AAL5
)
755 g_atm_priv_data
.wtx_drop_pdu
++;
757 atomic_inc(&vcc
->stats
->tx_err
);
758 dev_kfree_skb_any(skb
);
762 static int ppe_send_oam(struct atm_vcc
*vcc
, void *cell
, int flags
)
765 struct uni_cell_header
*uni_cell_header
= (struct uni_cell_header
*)cell
;
768 struct tx_descriptor reg_desc
= {0};
770 if ( ((uni_cell_header
->pti
== ATM_PTI_SEGF5
|| uni_cell_header
->pti
== ATM_PTI_E2EF5
)
771 && find_vpivci(uni_cell_header
->vpi
, uni_cell_header
->vci
) < 0)
772 || ((uni_cell_header
->vci
== 0x03 || uni_cell_header
->vci
== 0x04)
773 && find_vpi(uni_cell_header
->vpi
) < 0) )
777 err("not in showtime");
781 conn
= find_vcc(vcc
);
783 err("FIND_VCC_FAIL");
787 skb
= alloc_skb_tx(CELL_SIZE
);
789 err("ALLOC_SKB_TX_FAIL");
792 memcpy(skb
->data
, cell
, CELL_SIZE
);
794 reg_desc
.dataptr
= (unsigned int)skb
->data
>> 2;
795 reg_desc
.datalen
= CELL_SIZE
;
796 reg_desc
.byteoff
= 0;
801 reg_desc
.sop
= reg_desc
.eop
= 1;
803 desc_base
= get_tx_desc(conn
);
804 if ( desc_base
< 0 ) {
805 dev_kfree_skb_any(skb
);
806 err("ALLOC_TX_CONNECTION_FAIL");
811 atomic_inc(&vcc
->stats
->tx
);
813 /* update descriptor send pointer */
814 if ( g_atm_priv_data
.conn
[conn
].tx_skb
[desc_base
] != NULL
)
815 dev_kfree_skb_any(g_atm_priv_data
.conn
[conn
].tx_skb
[desc_base
]);
816 g_atm_priv_data
.conn
[conn
].tx_skb
[desc_base
] = skb
;
818 /* write discriptor to memory and write back cache */
819 g_atm_priv_data
.conn
[conn
].tx_desc
[desc_base
] = reg_desc
;
820 dma_cache_wback((unsigned long)skb
->data
, CELL_SIZE
);
822 dump_skb(skb
, DUMP_SKB_LEN
, (char *)__func__
, 0, conn
, 1);
824 mailbox_signal(conn
, 1);
831 static int ppe_change_qos(struct atm_vcc
*vcc
, struct atm_qos
*qos
, int flags
)
835 if ( vcc
== NULL
|| qos
== NULL
)
838 conn
= find_vcc(vcc
);
842 set_qsb(vcc
, qos
, conn
);
847 static INLINE
int adsl_led_flash(void)
849 return ifx_mei_atm_led_blink();
854 * Add a 32-bit value to 64-bit value, and put result in a 64-bit variable.
856 * opt1 --- ppe_u64_t, first operand, a 64-bit unsigned integer value
857 * opt2 --- unsigned int, second operand, a 32-bit unsigned integer value
858 * ret --- ppe_u64_t, pointer to a variable to hold result
862 static INLINE
void u64_add_u32(ppe_u64_t opt1
, unsigned int opt2
, ppe_u64_t
*ret
)
864 ret
->l
= opt1
.l
+ opt2
;
865 if ( ret
->l
< opt1
.l
|| ret
->l
< opt2
)
869 static INLINE
struct sk_buff
* alloc_skb_rx(void)
873 skb
= dev_alloc_skb(RX_DMA_CH_AAL_BUF_SIZE
+ DATA_BUFFER_ALIGNMENT
);
875 /* must be burst length alignment */
876 if ( ((unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1)) != 0 )
877 skb_reserve(skb
, ~((unsigned int)skb
->data
+ (DATA_BUFFER_ALIGNMENT
- 1)) & (DATA_BUFFER_ALIGNMENT
- 1));
878 /* pub skb in reserved area "skb->data - 4" */
879 *((struct sk_buff
**)skb
->data
- 1) = skb
;
880 /* write back and invalidate cache */
881 dma_cache_wback_inv((unsigned long)skb
->data
- sizeof(skb
), sizeof(skb
));
882 /* invalidate cache */
883 dma_cache_inv((unsigned long)skb
->data
, (unsigned int)skb
->end
- (unsigned int)skb
->data
);
889 static INLINE
struct sk_buff
* alloc_skb_tx(unsigned int size
)
893 /* allocate memory including header and padding */
894 size
+= TX_INBAND_HEADER_LENGTH
+ MAX_TX_PACKET_ALIGN_BYTES
+ MAX_TX_PACKET_PADDING_BYTES
;
895 size
&= ~(DATA_BUFFER_ALIGNMENT
- 1);
896 skb
= dev_alloc_skb(size
+ DATA_BUFFER_ALIGNMENT
);
897 /* must be burst length alignment */
899 skb_reserve(skb
, (~((unsigned int)skb
->data
+ (DATA_BUFFER_ALIGNMENT
- 1)) & (DATA_BUFFER_ALIGNMENT
- 1)) + TX_INBAND_HEADER_LENGTH
);
903 struct sk_buff
* atm_alloc_tx(struct atm_vcc
*vcc
, unsigned int size
)
908 /* oversize packet */
909 if ( size
> aal5s_max_packet_size
) {
910 err("atm_alloc_tx: oversize packet");
913 /* send buffer overflow */
914 if ( atomic_read(&sk_atm(vcc
)->sk_wmem_alloc
) && !atm_may_send(vcc
, size
) ) {
915 err("atm_alloc_tx: send buffer overflow");
918 conn
= find_vcc(vcc
);
920 err("atm_alloc_tx: unknown VCC");
924 skb
= dev_alloc_skb(size
);
926 err("atm_alloc_tx: sk buffer is used up");
930 atomic_add(skb
->truesize
, &sk_atm(vcc
)->sk_wmem_alloc
);
935 static INLINE
void atm_free_tx_skb_vcc(struct sk_buff
*skb
, struct atm_vcc
*vcc
)
937 if ( vcc
->pop
!= NULL
)
940 dev_kfree_skb_any(skb
);
943 static INLINE
struct sk_buff
*get_skb_rx_pointer(unsigned int dataptr
)
945 unsigned int skb_dataptr
;
948 skb_dataptr
= ((dataptr
- 1) << 2) | KSEG1
;
949 skb
= *(struct sk_buff
**)skb_dataptr
;
951 ASSERT((unsigned int)skb
>= KSEG0
, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb
, dataptr
);
952 ASSERT(((unsigned int)skb
->data
| KSEG1
) == ((dataptr
<< 2) | KSEG1
), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb
, (unsigned int)skb
->data
, dataptr
);
957 static INLINE
int get_tx_desc(unsigned int conn
)
960 struct connection
*p_conn
= &g_atm_priv_data
.conn
[conn
];
962 if ( p_conn
->tx_desc
[p_conn
->tx_desc_pos
].own
== 0 ) {
963 desc_base
= p_conn
->tx_desc_pos
;
964 if ( ++(p_conn
->tx_desc_pos
) == dma_tx_descriptor_length
)
965 p_conn
->tx_desc_pos
= 0;
971 static INLINE
void mailbox_oam_rx_handler(void)
973 unsigned int vlddes
= WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_OAM
)->vlddes
;
974 struct rx_descriptor reg_desc
;
975 struct uni_cell_header
*header
;
980 for ( i
= 0; i
< vlddes
; i
++ ) {
982 reg_desc
= g_atm_priv_data
.oam_desc
[g_atm_priv_data
.oam_desc_pos
];
983 } while ( reg_desc
.own
|| !reg_desc
.c
); // keep test OWN and C bit until data is ready
985 header
= (struct uni_cell_header
*)&g_atm_priv_data
.oam_buf
[g_atm_priv_data
.oam_desc_pos
* RX_DMA_CH_OAM_BUF_SIZE
];
987 if ( header
->pti
== ATM_PTI_SEGF5
|| header
->pti
== ATM_PTI_E2EF5
)
988 conn
= find_vpivci(header
->vpi
, header
->vci
);
989 else if ( header
->vci
== 0x03 || header
->vci
== 0x04 )
990 conn
= find_vpi(header
->vpi
);
994 if ( conn
>= 0 && g_atm_priv_data
.conn
[conn
].vcc
!= NULL
) {
995 vcc
= g_atm_priv_data
.conn
[conn
].vcc
;
997 if ( vcc
->push_oam
!= NULL
)
998 vcc
->push_oam(vcc
, header
);
1000 ifx_push_oam((unsigned char *)header
);
1005 reg_desc
.byteoff
= 0;
1006 reg_desc
.datalen
= RX_DMA_CH_OAM_BUF_SIZE
;
1010 g_atm_priv_data
.oam_desc
[g_atm_priv_data
.oam_desc_pos
] = reg_desc
;
1011 if ( ++g_atm_priv_data
.oam_desc_pos
== RX_DMA_CH_OAM_DESC_LEN
)
1012 g_atm_priv_data
.oam_desc_pos
= 0;
1014 mailbox_signal(RX_DMA_CH_OAM
, 0);
1018 static INLINE
void mailbox_aal_rx_handler(void)
1020 unsigned int vlddes
= WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_AAL
)->vlddes
;
1021 struct rx_descriptor reg_desc
;
1023 struct atm_vcc
*vcc
;
1024 struct sk_buff
*skb
, *new_skb
;
1025 struct rx_inband_trailer
*trailer
;
1028 for ( i
= 0; i
< vlddes
; i
++ ) {
1030 reg_desc
= g_atm_priv_data
.aal_desc
[g_atm_priv_data
.aal_desc_pos
];
1031 } while ( reg_desc
.own
|| !reg_desc
.c
); // keep test OWN and C bit until data is ready
1035 if ( g_atm_priv_data
.conn
[conn
].vcc
!= NULL
) {
1036 vcc
= g_atm_priv_data
.conn
[conn
].vcc
;
1038 skb
= get_skb_rx_pointer(reg_desc
.dataptr
);
1040 if ( reg_desc
.err
) {
1041 if ( vcc
->qos
.aal
== ATM_AAL5
) {
1042 trailer
= (struct rx_inband_trailer
*)((unsigned int)skb
->data
+ ((reg_desc
.byteoff
+ reg_desc
.datalen
+ MAX_RX_PACKET_PADDING_BYTES
) & ~MAX_RX_PACKET_PADDING_BYTES
));
1043 if ( trailer
->stw_crc
)
1044 g_atm_priv_data
.conn
[conn
].aal5_vcc_crc_err
++;
1045 if ( trailer
->stw_ovz
)
1046 g_atm_priv_data
.conn
[conn
].aal5_vcc_oversize_sdu
++;
1047 g_atm_priv_data
.wrx_drop_pdu
++;
1050 atomic_inc(&vcc
->stats
->rx_drop
);
1051 atomic_inc(&vcc
->stats
->rx_err
);
1054 else if ( atm_charge(vcc
, skb
->truesize
) ) {
1055 new_skb
= alloc_skb_rx();
1056 if ( new_skb
!= NULL
) {
1057 skb_reserve(skb
, reg_desc
.byteoff
);
1058 skb_put(skb
, reg_desc
.datalen
);
1059 ATM_SKB(skb
)->vcc
= vcc
;
1061 dump_skb(skb
, DUMP_SKB_LEN
, (char *)__func__
, 0, conn
, 0);
1063 vcc
->push(vcc
, skb
);
1065 if ( vcc
->qos
.aal
== ATM_AAL5
)
1066 g_atm_priv_data
.wrx_pdu
++;
1068 atomic_inc(&vcc
->stats
->rx
);
1071 reg_desc
.dataptr
= (unsigned int)new_skb
->data
>> 2;
1074 atm_return(vcc
, skb
->truesize
);
1075 if ( vcc
->qos
.aal
== ATM_AAL5
)
1076 g_atm_priv_data
.wrx_drop_pdu
++;
1078 atomic_inc(&vcc
->stats
->rx_drop
);
1082 if ( vcc
->qos
.aal
== ATM_AAL5
)
1083 g_atm_priv_data
.wrx_drop_pdu
++;
1085 atomic_inc(&vcc
->stats
->rx_drop
);
1089 g_atm_priv_data
.wrx_drop_pdu
++;
1092 reg_desc
.byteoff
= 0;
1093 reg_desc
.datalen
= RX_DMA_CH_AAL_BUF_SIZE
;
1097 g_atm_priv_data
.aal_desc
[g_atm_priv_data
.aal_desc_pos
] = reg_desc
;
1098 if ( ++g_atm_priv_data
.aal_desc_pos
== dma_rx_descriptor_length
)
1099 g_atm_priv_data
.aal_desc_pos
= 0;
1101 mailbox_signal(RX_DMA_CH_AAL
, 0);
1105 #if defined(ENABLE_TASKLET) && ENABLE_TASKLET
1106 static void do_ppe_tasklet(unsigned long arg
)
1108 *MBOX_IGU1_ISRC
= *MBOX_IGU1_ISR
;
1109 mailbox_oam_rx_handler();
1110 mailbox_aal_rx_handler();
1111 if ( (*MBOX_IGU1_ISR
& ((1 << RX_DMA_CH_AAL
) | (1 << RX_DMA_CH_OAM
))) != 0 )
1112 tasklet_schedule(&g_dma_tasklet
);
1114 enable_irq(PPE_MAILBOX_IGU1_INT
);
1118 static irqreturn_t
mailbox_irq_handler(int irq
, void *dev_id
)
1120 if ( !*MBOX_IGU1_ISR
)
1123 #if defined(ENABLE_TASKLET) && ENABLE_TASKLET
1124 disable_irq(PPE_MAILBOX_IGU1_INT
);
1125 tasklet_schedule(&g_dma_tasklet
);
1127 *MBOX_IGU1_ISRC
= *MBOX_IGU1_ISR
;
1128 mailbox_oam_rx_handler();
1129 mailbox_aal_rx_handler();
1135 static INLINE
void mailbox_signal(unsigned int queue
, int is_tx
)
1138 while ( MBOX_IGU3_ISR_ISR(queue
+ FIRST_QSB_QID
+ 16) );
1139 *MBOX_IGU3_ISRS
= MBOX_IGU3_ISRS_SET(queue
+ FIRST_QSB_QID
+ 16);
1142 while ( MBOX_IGU3_ISR_ISR(queue
) );
1143 *MBOX_IGU3_ISRS
= MBOX_IGU3_ISRS_SET(queue
);
1147 static void set_qsb(struct atm_vcc
*vcc
, struct atm_qos
*qos
, unsigned int queue
)
1149 struct clk
*clk
= clk_get(0, "fpi");
1150 unsigned int qsb_clk
= clk_get_rate(clk
);
1151 unsigned int qsb_qid
= queue
+ FIRST_QSB_QID
;
1152 union qsb_queue_parameter_table qsb_queue_parameter_table
= {{0}};
1153 union qsb_queue_vbr_parameter_table qsb_queue_vbr_parameter_table
= {{0}};
1156 #if defined(DEBUG_QOS) && DEBUG_QOS
1157 if ( (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_QOS
) ) {
1158 static char *str_traffic_class
[9] = {
1169 printk(KERN_INFO
"QoS Parameters:\n");
1170 printk(KERN_INFO
"\tAAL : %d\n", qos
->aal
);
1171 printk(KERN_INFO
"\tTX Traffic Class: %s\n", str_traffic_class
[qos
->txtp
.traffic_class
]);
1172 printk(KERN_INFO
"\tTX Max PCR : %d\n", qos
->txtp
.max_pcr
);
1173 printk(KERN_INFO
"\tTX Min PCR : %d\n", qos
->txtp
.min_pcr
);
1174 printk(KERN_INFO
"\tTX PCR : %d\n", qos
->txtp
.pcr
);
1175 printk(KERN_INFO
"\tTX Max CDV : %d\n", qos
->txtp
.max_cdv
);
1176 printk(KERN_INFO
"\tTX Max SDU : %d\n", qos
->txtp
.max_sdu
);
1177 printk(KERN_INFO
"\tTX SCR : %d\n", qos
->txtp
.scr
);
1178 printk(KERN_INFO
"\tTX MBS : %d\n", qos
->txtp
.mbs
);
1179 printk(KERN_INFO
"\tTX CDV : %d\n", qos
->txtp
.cdv
);
1180 printk(KERN_INFO
"\tRX Traffic Class: %s\n", str_traffic_class
[qos
->rxtp
.traffic_class
]);
1181 printk(KERN_INFO
"\tRX Max PCR : %d\n", qos
->rxtp
.max_pcr
);
1182 printk(KERN_INFO
"\tRX Min PCR : %d\n", qos
->rxtp
.min_pcr
);
1183 printk(KERN_INFO
"\tRX PCR : %d\n", qos
->rxtp
.pcr
);
1184 printk(KERN_INFO
"\tRX Max CDV : %d\n", qos
->rxtp
.max_cdv
);
1185 printk(KERN_INFO
"\tRX Max SDU : %d\n", qos
->rxtp
.max_sdu
);
1186 printk(KERN_INFO
"\tRX SCR : %d\n", qos
->rxtp
.scr
);
1187 printk(KERN_INFO
"\tRX MBS : %d\n", qos
->rxtp
.mbs
);
1188 printk(KERN_INFO
"\tRX CDV : %d\n", qos
->rxtp
.cdv
);
1190 #endif // defined(DEBUG_QOS) && DEBUG_QOS
1193 * Peak Cell Rate (PCR) Limiter
1195 if ( qos
->txtp
.max_pcr
== 0 )
1196 qsb_queue_parameter_table
.bit
.tp
= 0; /* disable PCR limiter */
1198 /* peak cell rate would be slightly lower than requested [maximum_rate / pcr = (qsb_clock / 8) * (time_step / 4) / pcr] */
1199 tmp
= ((qsb_clk
* qsb_tstep
) >> 5) / qos
->txtp
.max_pcr
+ 1;
1200 /* check if overflow takes place */
1201 qsb_queue_parameter_table
.bit
.tp
= tmp
> QSB_TP_TS_MAX
? QSB_TP_TS_MAX
: tmp
;
1204 // A funny issue. Create two PVCs, one UBR and one UBR with max_pcr.
1205 // Send packets to these two PVCs at same time, it trigger strange behavior.
1206 // In A1, RAM from 0x80000000 to 0x0x8007FFFF was corrupted with fixed pattern 0x00000000 0x40000000.
1207 // In A4, PPE firmware keep emiting unknown cell and do not respond to driver.
1208 // To work around, create UBR always with max_pcr.
1209 // If user want to create UBR without max_pcr, we give a default one larger than line-rate.
1210 if ( qos
->txtp
.traffic_class
== ATM_UBR
&& qsb_queue_parameter_table
.bit
.tp
== 0 ) {
1211 int port
= g_atm_priv_data
.conn
[queue
].port
;
1212 unsigned int max_pcr
= g_atm_priv_data
.port
[port
].tx_max_cell_rate
+ 1000;
1214 tmp
= ((qsb_clk
* qsb_tstep
) >> 5) / max_pcr
+ 1;
1215 if ( tmp
> QSB_TP_TS_MAX
)
1216 tmp
= QSB_TP_TS_MAX
;
1219 qsb_queue_parameter_table
.bit
.tp
= tmp
;
1223 * Weighted Fair Queueing Factor (WFQF)
1225 switch ( qos
->txtp
.traffic_class
) {
1228 /* real time queue gets weighted fair queueing bypass */
1229 qsb_queue_parameter_table
.bit
.wfqf
= 0;
1233 /* WFQF calculation here is based on virtual cell rates, to reduce granularity for high rates */
1234 /* WFQF is maximum cell rate / garenteed cell rate */
1235 /* wfqf = qsb_minimum_cell_rate * QSB_WFQ_NONUBR_MAX / requested_minimum_peak_cell_rate */
1236 if ( qos
->txtp
.min_pcr
== 0 )
1237 qsb_queue_parameter_table
.bit
.wfqf
= QSB_WFQ_NONUBR_MAX
;
1240 tmp
= QSB_GCR_MIN
* QSB_WFQ_NONUBR_MAX
/ qos
->txtp
.min_pcr
;
1242 qsb_queue_parameter_table
.bit
.wfqf
= 1;
1243 else if ( tmp
> QSB_WFQ_NONUBR_MAX
)
1244 qsb_queue_parameter_table
.bit
.wfqf
= QSB_WFQ_NONUBR_MAX
;
1246 qsb_queue_parameter_table
.bit
.wfqf
= tmp
;
1251 qsb_queue_parameter_table
.bit
.wfqf
= QSB_WFQ_UBR_BYPASS
;
1255 * Sustained Cell Rate (SCR) Leaky Bucket Shaper VBR.0/VBR.1
1257 if ( qos
->txtp
.traffic_class
== ATM_VBR_RT
|| qos
->txtp
.traffic_class
== ATM_VBR_NRT
) {
1258 if ( qos
->txtp
.scr
== 0 ) {
1259 /* disable shaper */
1260 qsb_queue_vbr_parameter_table
.bit
.taus
= 0;
1261 qsb_queue_vbr_parameter_table
.bit
.ts
= 0;
1264 /* Cell Loss Priority (CLP) */
1265 if ( (vcc
->atm_options
& ATM_ATMOPT_CLP
) )
1267 qsb_queue_parameter_table
.bit
.vbr
= 1;
1270 qsb_queue_parameter_table
.bit
.vbr
= 0;
1271 /* Rate Shaper Parameter (TS) and Burst Tolerance Parameter for SCR (tauS) */
1272 tmp
= ((qsb_clk
* qsb_tstep
) >> 5) / qos
->txtp
.scr
+ 1;
1273 qsb_queue_vbr_parameter_table
.bit
.ts
= tmp
> QSB_TP_TS_MAX
? QSB_TP_TS_MAX
: tmp
;
1274 tmp
= (qos
->txtp
.mbs
- 1) * (qsb_queue_vbr_parameter_table
.bit
.ts
- qsb_queue_parameter_table
.bit
.tp
) / 64;
1276 qsb_queue_vbr_parameter_table
.bit
.taus
= 1;
1277 else if ( tmp
> QSB_TAUS_MAX
)
1278 qsb_queue_vbr_parameter_table
.bit
.taus
= QSB_TAUS_MAX
;
1280 qsb_queue_vbr_parameter_table
.bit
.taus
= tmp
;
1284 qsb_queue_vbr_parameter_table
.bit
.taus
= 0;
1285 qsb_queue_vbr_parameter_table
.bit
.ts
= 0;
1288 /* Queue Parameter Table (QPT) */
1289 *QSB_RTM
= QSB_RTM_DM_SET(QSB_QPT_SET_MASK
);
1290 *QSB_RTD
= QSB_RTD_TTV_SET(qsb_queue_parameter_table
.dword
);
1291 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_QPT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(qsb_qid
);
1292 #if defined(DEBUG_QOS) && DEBUG_QOS
1293 if ( (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_QOS
) )
1294 printk("QPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM
, *QSB_RTM
, (unsigned int)QSB_RTD
, *QSB_RTD
, (unsigned int)QSB_RAMAC
, *QSB_RAMAC
);
1296 /* Queue VBR Paramter Table (QVPT) */
1297 *QSB_RTM
= QSB_RTM_DM_SET(QSB_QVPT_SET_MASK
);
1298 *QSB_RTD
= QSB_RTD_TTV_SET(qsb_queue_vbr_parameter_table
.dword
);
1299 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_VBR
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(qsb_qid
);
1300 #if defined(DEBUG_QOS) && DEBUG_QOS
1301 if ( (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_QOS
) )
1302 printk("QVPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM
, *QSB_RTM
, (unsigned int)QSB_RTD
, *QSB_RTD
, (unsigned int)QSB_RAMAC
, *QSB_RAMAC
);
1305 #if defined(DEBUG_QOS) && DEBUG_QOS
1306 if ( (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_QOS
) ) {
1307 printk("set_qsb\n");
1308 printk(" qsb_clk = %lu\n", (unsigned long)qsb_clk
);
1309 printk(" qsb_queue_parameter_table.bit.tp = %d\n", (int)qsb_queue_parameter_table
.bit
.tp
);
1310 printk(" qsb_queue_parameter_table.bit.wfqf = %d (0x%08X)\n", (int)qsb_queue_parameter_table
.bit
.wfqf
, (int)qsb_queue_parameter_table
.bit
.wfqf
);
1311 printk(" qsb_queue_parameter_table.bit.vbr = %d\n", (int)qsb_queue_parameter_table
.bit
.vbr
);
1312 printk(" qsb_queue_parameter_table.dword = 0x%08X\n", (int)qsb_queue_parameter_table
.dword
);
1313 printk(" qsb_queue_vbr_parameter_table.bit.ts = %d\n", (int)qsb_queue_vbr_parameter_table
.bit
.ts
);
1314 printk(" qsb_queue_vbr_parameter_table.bit.taus = %d\n", (int)qsb_queue_vbr_parameter_table
.bit
.taus
);
1315 printk(" qsb_queue_vbr_parameter_table.dword = 0x%08X\n", (int)qsb_queue_vbr_parameter_table
.dword
);
1320 static void qsb_global_set(void)
1322 struct clk
*clk
= clk_get(0, "fpi");
1323 unsigned int qsb_clk
= clk_get_rate(clk
);
1325 unsigned int tmp1
, tmp2
, tmp3
;
1327 *QSB_ICDV
= QSB_ICDV_TAU_SET(qsb_tau
);
1328 *QSB_SBL
= QSB_SBL_SBL_SET(qsb_srvm
);
1329 *QSB_CFG
= QSB_CFG_TSTEPC_SET(qsb_tstep
>> 1);
1330 #if defined(DEBUG_QOS) && DEBUG_QOS
1331 if ( (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_QOS
) ) {
1332 printk("qsb_clk = %u\n", qsb_clk
);
1333 printk("QSB_ICDV (%08X) = %d (%d), QSB_SBL (%08X) = %d (%d), QSB_CFG (%08X) = %d (%d)\n", (unsigned int)QSB_ICDV
, *QSB_ICDV
, QSB_ICDV_TAU_SET(qsb_tau
), (unsigned int)QSB_SBL
, *QSB_SBL
, QSB_SBL_SBL_SET(qsb_srvm
), (unsigned int)QSB_CFG
, *QSB_CFG
, QSB_CFG_TSTEPC_SET(qsb_tstep
>> 1));
1338 * set SCT and SPT per port
1340 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ ) {
1341 if ( g_atm_priv_data
.port
[i
].tx_max_cell_rate
!= 0 ) {
1342 tmp1
= ((qsb_clk
* qsb_tstep
) >> 1) / g_atm_priv_data
.port
[i
].tx_max_cell_rate
;
1343 tmp2
= tmp1
>> 6; /* integer value of Tsb */
1344 tmp3
= (tmp1
& ((1 << 6) - 1)) + 1; /* fractional part of Tsb */
1345 /* carry over to integer part (?) */
1346 if ( tmp3
== (1 << 6) )
1354 /* 2. write value to data transfer register */
1355 /* 3. start the tranfer */
1356 /* SCT (FracRate) */
1357 *QSB_RTM
= QSB_RTM_DM_SET(QSB_SET_SCT_MASK
);
1358 *QSB_RTD
= QSB_RTD_TTV_SET(tmp3
);
1359 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SCT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(i
& 0x01);
1360 #if defined(DEBUG_QOS) && DEBUG_QOS
1361 if ( (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_QOS
) )
1362 printk("SCT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM
, *QSB_RTM
, (unsigned int)QSB_RTD
, *QSB_RTD
, (unsigned int)QSB_RAMAC
, *QSB_RAMAC
);
1364 /* SPT (SBV + PN + IntRage) */
1365 *QSB_RTM
= QSB_RTM_DM_SET(QSB_SET_SPT_MASK
);
1366 *QSB_RTD
= QSB_RTD_TTV_SET(QSB_SPT_SBV_VALID
| QSB_SPT_PN_SET(i
& 0x01) | QSB_SPT_INTRATE_SET(tmp2
));
1367 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SPT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(i
& 0x01);
1368 #if defined(DEBUG_QOS) && DEBUG_QOS
1369 if ( (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_QOS
) )
1370 printk("SPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM
, *QSB_RTM
, (unsigned int)QSB_RTD
, *QSB_RTD
, (unsigned int)QSB_RAMAC
, *QSB_RAMAC
);
1376 static INLINE
void set_htu_entry(unsigned int vpi
, unsigned int vci
, unsigned int queue
, int aal5
, int is_retx
)
1378 struct htu_entry htu_entry
= { res1
: 0x00,
1379 clp
: is_retx
? 0x01 : 0x00,
1380 pid
: g_atm_priv_data
.conn
[queue
].port
& 0x01,
1386 struct htu_mask htu_mask
= { set
: 0x01,
1387 #if !defined(ENABLE_ATM_RETX) || !ENABLE_ATM_RETX
1391 clp
: g_retx_htu
? 0x00 : 0x01,
1392 pid_mask
: RETX_MODE_CFG
->retx_en
? 0x03 : 0x02,
1395 #if !defined(ENABLE_ATM_RETX) || !ENABLE_ATM_RETX
1398 vci_mask
: RETX_MODE_CFG
->retx_en
? 0xFF00 : 0x0000,
1400 pti_mask
: 0x03, // 0xx, user data
1403 struct htu_result htu_result
= {res1
: 0x00,
1406 type
: aal5
? 0x00 : 0x01,
1411 *HTU_RESULT(queue
+ OAM_HTU_ENTRY_NUMBER
) = htu_result
;
1412 *HTU_MASK(queue
+ OAM_HTU_ENTRY_NUMBER
) = htu_mask
;
1413 *HTU_ENTRY(queue
+ OAM_HTU_ENTRY_NUMBER
) = htu_entry
;
1416 static INLINE
void clear_htu_entry(unsigned int queue
)
1418 HTU_ENTRY(queue
+ OAM_HTU_ENTRY_NUMBER
)->vld
= 0;
1421 static void validate_oam_htu_entry(void)
1423 HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY
)->vld
= 1;
1424 HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY
)->vld
= 1;
1425 HTU_ENTRY(OAM_F5_HTU_ENTRY
)->vld
= 1;
1426 #if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
1427 HTU_ENTRY(OAM_ARQ_HTU_ENTRY
)->vld
= 1;
1431 static void invalidate_oam_htu_entry(void)
1433 HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY
)->vld
= 0;
1434 HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY
)->vld
= 0;
1435 HTU_ENTRY(OAM_F5_HTU_ENTRY
)->vld
= 0;
1436 #if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
1437 HTU_ENTRY(OAM_ARQ_HTU_ENTRY
)->vld
= 0;
1441 static INLINE
int find_vpi(unsigned int vpi
)
1446 for ( i
= 0, bit
= 1; i
< MAX_PVC_NUMBER
; i
++, bit
<<= 1 ) {
1447 if ( (g_atm_priv_data
.conn_table
& bit
) != 0
1448 && g_atm_priv_data
.conn
[i
].vcc
!= NULL
1449 && vpi
== g_atm_priv_data
.conn
[i
].vcc
->vpi
)
1456 static INLINE
int find_vpivci(unsigned int vpi
, unsigned int vci
)
1461 for ( i
= 0, bit
= 1; i
< MAX_PVC_NUMBER
; i
++, bit
<<= 1 ) {
1462 if ( (g_atm_priv_data
.conn_table
& bit
) != 0
1463 && g_atm_priv_data
.conn
[i
].vcc
!= NULL
1464 && vpi
== g_atm_priv_data
.conn
[i
].vcc
->vpi
1465 && vci
== g_atm_priv_data
.conn
[i
].vcc
->vci
)
1472 static INLINE
int find_vcc(struct atm_vcc
*vcc
)
1477 for ( i
= 0, bit
= 1; i
< MAX_PVC_NUMBER
; i
++, bit
<<= 1 ) {
1478 if ( (g_atm_priv_data
.conn_table
& bit
) != 0
1479 && g_atm_priv_data
.conn
[i
].vcc
== vcc
)
1486 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
1487 static void dump_skb(struct sk_buff
*skb
, u32 len
, char *title
, int port
, int ch
, int is_tx
)
1491 if ( !(ifx_atm_dbg_enable
& (is_tx
? DBG_ENABLE_MASK_DUMP_SKB_TX
: DBG_ENABLE_MASK_DUMP_SKB_RX
)) )
1494 if ( skb
->len
< len
)
1497 if ( len
> RX_DMA_CH_AAL_BUF_SIZE
) {
1498 printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32
)skb
, (u32
)skb
->data
, skb
->len
);
1503 printk("%s (port %d, ch %d)\n", title
, port
, ch
);
1505 printk("%s\n", title
);
1506 printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32
)skb
->data
, (u32
)skb
->tail
, (int)skb
->len
);
1507 for ( i
= 1; i
<= len
; i
++ ) {
1509 printk(" %4d:", i
- 1);
1510 printk(" %02X", (int)(*((char*)skb
->data
+ i
- 1) & 0xFF));
1514 if ( (i
- 1) % 16 != 0 )
1519 static INLINE
void proc_file_create(void)
1521 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1522 struct proc_dir_entry
*res
;
1525 g_atm_dir
= proc_mkdir("driver/ifx_atm", NULL
);
1527 create_proc_read_entry("version",
1533 res
= create_proc_entry("mib",
1536 if ( res
!= NULL
) {
1537 res
->read_proc
= proc_read_mib
;
1538 res
->write_proc
= proc_write_mib
;
1541 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1542 res
= create_proc_entry("dbg",
1545 if ( res
!= NULL
) {
1546 res
->read_proc
= proc_read_dbg
;
1547 res
->write_proc
= proc_write_dbg
;
1551 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1552 create_proc_read_entry("htu",
1558 create_proc_read_entry("txq",
1566 static INLINE
void proc_file_delete(void)
1568 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1569 remove_proc_entry("txq", g_atm_dir
);
1571 remove_proc_entry("htu", g_atm_dir
);
1574 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1575 remove_proc_entry("dbg", g_atm_dir
);
1578 remove_proc_entry("version", g_atm_dir
);
1580 remove_proc_entry("driver/ifx_atm", NULL
);
1583 static int proc_read_version(char *buf
, char **start
, off_t offset
, int count
, int *eof
, void *data
)
1587 len
+= ifx_atm_version(buf
+ len
);
1589 if ( offset
>= len
) {
1594 *start
= buf
+ offset
;
1595 if ( (len
-= offset
) > count
)
1601 static int proc_read_mib(char *page
, char **start
, off_t off
, int count
, int *eof
, void *data
)
1605 len
+= sprintf(page
+ off
+ len
, "Firmware\n");
1606 len
+= sprintf(page
+ off
+ len
, " wrx_drophtu_cell = %u\n", WAN_MIB_TABLE
->wrx_drophtu_cell
);
1607 len
+= sprintf(page
+ off
+ len
, " wrx_dropdes_pdu = %u\n", WAN_MIB_TABLE
->wrx_dropdes_pdu
);
1608 len
+= sprintf(page
+ off
+ len
, " wrx_correct_pdu = %u\n", WAN_MIB_TABLE
->wrx_correct_pdu
);
1609 len
+= sprintf(page
+ off
+ len
, " wrx_err_pdu = %u\n", WAN_MIB_TABLE
->wrx_err_pdu
);
1610 len
+= sprintf(page
+ off
+ len
, " wrx_dropdes_cell = %u\n", WAN_MIB_TABLE
->wrx_dropdes_cell
);
1611 len
+= sprintf(page
+ off
+ len
, " wrx_correct_cell = %u\n", WAN_MIB_TABLE
->wrx_correct_cell
);
1612 len
+= sprintf(page
+ off
+ len
, " wrx_err_cell = %u\n", WAN_MIB_TABLE
->wrx_err_cell
);
1613 len
+= sprintf(page
+ off
+ len
, " wrx_total_byte = %u\n", WAN_MIB_TABLE
->wrx_total_byte
);
1614 len
+= sprintf(page
+ off
+ len
, " wtx_total_pdu = %u\n", WAN_MIB_TABLE
->wtx_total_pdu
);
1615 len
+= sprintf(page
+ off
+ len
, " wtx_total_cell = %u\n", WAN_MIB_TABLE
->wtx_total_cell
);
1616 len
+= sprintf(page
+ off
+ len
, " wtx_total_byte = %u\n", WAN_MIB_TABLE
->wtx_total_byte
);
1617 len
+= sprintf(page
+ off
+ len
, "Driver\n");
1618 len
+= sprintf(page
+ off
+ len
, " wrx_pdu = %u\n", g_atm_priv_data
.wrx_pdu
);
1619 len
+= sprintf(page
+ off
+ len
, " wrx_drop_pdu = %u\n", g_atm_priv_data
.wrx_drop_pdu
);
1620 len
+= sprintf(page
+ off
+ len
, " wtx_pdu = %u\n", g_atm_priv_data
.wtx_pdu
);
1621 len
+= sprintf(page
+ off
+ len
, " wtx_err_pdu = %u\n", g_atm_priv_data
.wtx_err_pdu
);
1622 len
+= sprintf(page
+ off
+ len
, " wtx_drop_pdu = %u\n", g_atm_priv_data
.wtx_drop_pdu
);
1629 static int proc_write_mib(struct file
*file
, const char *buf
, unsigned long count
, void *data
)
1635 len
= count
< sizeof(str
) ? count
: sizeof(str
) - 1;
1636 rlen
= len
- copy_from_user(str
, buf
, len
);
1637 while ( rlen
&& str
[rlen
- 1] <= ' ' )
1640 for ( p
= str
; *p
&& *p
<= ' '; p
++, rlen
-- );
1644 if ( stricmp(p
, "clear") == 0 || stricmp(p
, "clear all") == 0
1645 || stricmp(p
, "clean") == 0 || stricmp(p
, "clean all") == 0 ) {
1646 memset(WAN_MIB_TABLE
, 0, sizeof(*WAN_MIB_TABLE
));
1647 g_atm_priv_data
.wrx_pdu
= 0;
1648 g_atm_priv_data
.wrx_drop_pdu
= 0;
1649 g_atm_priv_data
.wtx_pdu
= 0;
1650 g_atm_priv_data
.wtx_err_pdu
= 0;
1651 g_atm_priv_data
.wtx_drop_pdu
= 0;
1657 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1659 static int proc_read_dbg(char *page
, char **start
, off_t off
, int count
, int *eof
, void *data
)
1663 len
+= sprintf(page
+ off
+ len
, "error print - %s\n", (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_ERR
) ? "enabled" : "disabled");
1664 len
+= sprintf(page
+ off
+ len
, "debug print - %s\n", (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DEBUG_PRINT
) ? "enabled" : "disabled");
1665 len
+= sprintf(page
+ off
+ len
, "assert - %s\n", (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_ASSERT
) ? "enabled" : "disabled");
1666 len
+= sprintf(page
+ off
+ len
, "dump rx skb - %s\n", (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_SKB_RX
) ? "enabled" : "disabled");
1667 len
+= sprintf(page
+ off
+ len
, "dump tx skb - %s\n", (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_SKB_TX
) ? "enabled" : "disabled");
1668 len
+= sprintf(page
+ off
+ len
, "qos - %s\n", (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_QOS
) ? "enabled" : "disabled");
1669 len
+= sprintf(page
+ off
+ len
, "dump init - %s\n", (ifx_atm_dbg_enable
& DBG_ENABLE_MASK_DUMP_INIT
) ? "enabled" : "disabled");
1676 static int proc_write_dbg(struct file
*file
, const char *buf
, unsigned long count
, void *data
)
1678 static const char *dbg_enable_mask_str
[] = {
1695 static const int dbg_enable_mask_str_len
[] = {
1705 u32 dbg_enable_mask
[] = {
1706 DBG_ENABLE_MASK_ERR
,
1707 DBG_ENABLE_MASK_DEBUG_PRINT
,
1708 DBG_ENABLE_MASK_ASSERT
,
1709 DBG_ENABLE_MASK_DUMP_SKB_RX
,
1710 DBG_ENABLE_MASK_DUMP_SKB_TX
,
1711 DBG_ENABLE_MASK_DUMP_QOS
,
1712 DBG_ENABLE_MASK_DUMP_INIT
,
1724 len
= count
< sizeof(str
) ? count
: sizeof(str
) - 1;
1725 rlen
= len
- copy_from_user(str
, buf
, len
);
1726 while ( rlen
&& str
[rlen
- 1] <= ' ' )
1729 for ( p
= str
; *p
&& *p
<= ' '; p
++, rlen
-- );
1733 if ( strincmp(p
, "enable", 6) == 0 ) {
1737 else if ( strincmp(p
, "disable", 7) == 0 ) {
1741 else if ( strincmp(p
, "help", 4) == 0 || *p
== '?' ) {
1742 printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/all] > /proc/eth/dbg\n");
1748 ifx_atm_dbg_enable
|= DBG_ENABLE_MASK_ALL
;
1750 ifx_atm_dbg_enable
&= ~DBG_ENABLE_MASK_ALL
;
1754 for ( i
= 0; i
< NUM_ENTITY(dbg_enable_mask_str
); i
++ )
1755 if ( strincmp(p
, dbg_enable_mask_str
[i
], dbg_enable_mask_str_len
[i
]) == 0 ) {
1757 ifx_atm_dbg_enable
|= dbg_enable_mask
[i
>> 1];
1759 ifx_atm_dbg_enable
&= ~dbg_enable_mask
[i
>> 1];
1760 p
+= dbg_enable_mask_str_len
[i
];
1763 } while ( i
< NUM_ENTITY(dbg_enable_mask_str
) );
1772 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1774 static INLINE
int print_htu(char *buf
, int i
)
1778 if ( HTU_ENTRY(i
)->vld
) {
1779 len
+= sprintf(buf
+ len
, "%2d. valid\n", i
);
1780 len
+= sprintf(buf
+ len
, " entry 0x%08x - pid %01x vpi %02x vci %04x pti %01x\n", *(u32
*)HTU_ENTRY(i
), HTU_ENTRY(i
)->pid
, HTU_ENTRY(i
)->vpi
, HTU_ENTRY(i
)->vci
, HTU_ENTRY(i
)->pti
);
1781 len
+= sprintf(buf
+ len
, " mask 0x%08x - pid %01x vpi %02x vci %04x pti %01x\n", *(u32
*)HTU_MASK(i
), HTU_MASK(i
)->pid_mask
, HTU_MASK(i
)->vpi_mask
, HTU_MASK(i
)->vci_mask
, HTU_MASK(i
)->pti_mask
);
1782 len
+= sprintf(buf
+ len
, " result 0x%08x - type: %s, qid: %d", *(u32
*)HTU_RESULT(i
), HTU_RESULT(i
)->type
? "cell" : "AAL5", HTU_RESULT(i
)->qid
);
1783 if ( HTU_RESULT(i
)->type
)
1784 len
+= sprintf(buf
+ len
, ", cell id: %d, verification: %s", HTU_RESULT(i
)->cellid
, HTU_RESULT(i
)->ven
? "on" : "off");
1785 len
+= sprintf(buf
+ len
, "\n");
1788 len
+= sprintf(buf
+ len
, "%2d. invalid\n", i
);
1793 static int proc_read_htu(char *page
, char **start
, off_t off
, int count
, int *eof
, void *data
)
1796 int len_max
= off
+ count
;
1801 int htuts
= *CFG_WRX_HTUTS
;
1804 pstr
= *start
= page
;
1806 llen
= sprintf(pstr
, "HTU Table (Max %d):\n", htuts
);
1810 for ( i
= 0; i
< htuts
; i
++ ) {
1811 llen
= print_htu(str
, i
);
1812 if ( len
<= off
&& len
+ llen
> off
) {
1813 memcpy(pstr
, str
+ off
- len
, len
+ llen
- off
);
1814 pstr
+= len
+ llen
- off
;
1816 else if ( len
> off
) {
1817 memcpy(pstr
, str
, llen
);
1821 if ( len
>= len_max
)
1822 goto PROC_READ_HTU_OVERRUN_END
;
1829 PROC_READ_HTU_OVERRUN_END
:
1831 return len
- llen
- off
;
1834 static INLINE
int print_tx_queue(char *buf
, int i
)
1838 if ( (*WTX_DMACH_ON
& (1 << i
)) ) {
1839 len
+= sprintf(buf
+ len
, "%2d. valid\n", i
);
1840 len
+= sprintf(buf
+ len
, " queue 0x%08x - sbid %u, qsb %s\n", *(u32
*)WTX_QUEUE_CONFIG(i
), (unsigned int)WTX_QUEUE_CONFIG(i
)->sbid
, WTX_QUEUE_CONFIG(i
)->qsben
? "enable" : "disable");
1841 len
+= sprintf(buf
+ len
, " dma 0x%08x - base %08x, len %u, vlddes %u\n", *(u32
*)WTX_DMA_CHANNEL_CONFIG(i
), WTX_DMA_CHANNEL_CONFIG(i
)->desba
, WTX_DMA_CHANNEL_CONFIG(i
)->deslen
, WTX_DMA_CHANNEL_CONFIG(i
)->vlddes
);
1844 len
+= sprintf(buf
+ len
, "%2d. invalid\n", i
);
1849 static int proc_read_txq(char *page
, char **start
, off_t off
, int count
, int *eof
, void *data
)
1852 int len_max
= off
+ count
;
1859 pstr
= *start
= page
;
1861 llen
= sprintf(pstr
, "TX Queue Config (Max %d):\n", *CFG_WTX_DCHNUM
);
1865 for ( i
= 0; i
< 16; i
++ ) {
1866 llen
= print_tx_queue(str
, i
);
1867 if ( len
<= off
&& len
+ llen
> off
) {
1868 memcpy(pstr
, str
+ off
- len
, len
+ llen
- off
);
1869 pstr
+= len
+ llen
- off
;
1871 else if ( len
> off
) {
1872 memcpy(pstr
, str
, llen
);
1876 if ( len
>= len_max
)
1877 goto PROC_READ_HTU_OVERRUN_END
;
1884 PROC_READ_HTU_OVERRUN_END
:
1886 return len
- llen
- off
;
1891 static int stricmp(const char *p1
, const char *p2
)
1895 while ( *p1
&& *p2
)
1897 c1
= *p1
>= 'A' && *p1
<= 'Z' ? *p1
+ 'a' - 'A' : *p1
;
1898 c2
= *p2
>= 'A' && *p2
<= 'Z' ? *p2
+ 'a' - 'A' : *p2
;
1908 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1909 static int strincmp(const char *p1
, const char *p2
, int n
)
1913 while ( n
&& *p1
&& *p2
)
1915 c1
= *p1
>= 'A' && *p1
<= 'Z' ? *p1
+ 'a' - 'A' : *p1
;
1916 c2
= *p2
>= 'A' && *p2
<= 'Z' ? *p2
+ 'a' - 'A' : *p2
;
1924 return n
? *p1
- *p2
: c1
;
1928 static INLINE
int ifx_atm_version(char *buf
)
1931 unsigned int major
, minor
;
1933 ifx_atm_get_fw_ver(&major
, &minor
);
1935 len
+= sprintf(buf
+ len
, "Infineon Technologies ATM driver version %d.%d.%d\n", IFX_ATM_VER_MAJOR
, IFX_ATM_VER_MID
, IFX_ATM_VER_MINOR
);
1936 len
+= sprintf(buf
+ len
, "Infineon Technologies ATM (A1) firmware version %d.%d\n", major
, minor
);
1942 static INLINE
void reset_ppe(void)
1948 static INLINE
void check_parameters(void)
1950 /* Please refer to Amazon spec 15.4 for setting these values. */
1953 if ( qsb_tstep
< 1 )
1955 else if ( qsb_tstep
> 4 )
1957 else if ( qsb_tstep
== 3 )
1960 /* There is a delay between PPE write descriptor and descriptor is */
1961 /* really stored in memory. Host also has this delay when writing */
1962 /* descriptor. So PPE will use this value to determine if the write */
1963 /* operation makes effect. */
1964 if ( write_descriptor_delay
< 0 )
1965 write_descriptor_delay
= 0;
1967 if ( aal5_fill_pattern
< 0 )
1968 aal5_fill_pattern
= 0;
1970 aal5_fill_pattern
&= 0xFF;
1972 /* Because of the limitation of length field in descriptors, the packet */
1973 /* size could not be larger than 64K minus overhead size. */
1974 if ( aal5r_max_packet_size
< 0 )
1975 aal5r_max_packet_size
= 0;
1976 else if ( aal5r_max_packet_size
>= 65535 - MAX_RX_FRAME_EXTRA_BYTES
)
1977 aal5r_max_packet_size
= 65535 - MAX_RX_FRAME_EXTRA_BYTES
;
1978 if ( aal5r_min_packet_size
< 0 )
1979 aal5r_min_packet_size
= 0;
1980 else if ( aal5r_min_packet_size
> aal5r_max_packet_size
)
1981 aal5r_min_packet_size
= aal5r_max_packet_size
;
1982 if ( aal5s_max_packet_size
< 0 )
1983 aal5s_max_packet_size
= 0;
1984 else if ( aal5s_max_packet_size
>= 65535 - MAX_TX_FRAME_EXTRA_BYTES
)
1985 aal5s_max_packet_size
= 65535 - MAX_TX_FRAME_EXTRA_BYTES
;
1986 if ( aal5s_min_packet_size
< 0 )
1987 aal5s_min_packet_size
= 0;
1988 else if ( aal5s_min_packet_size
> aal5s_max_packet_size
)
1989 aal5s_min_packet_size
= aal5s_max_packet_size
;
1991 if ( dma_rx_descriptor_length
< 2 )
1992 dma_rx_descriptor_length
= 2;
1993 if ( dma_tx_descriptor_length
< 2 )
1994 dma_tx_descriptor_length
= 2;
1995 if ( dma_rx_clp1_descriptor_threshold
< 0 )
1996 dma_rx_clp1_descriptor_threshold
= 0;
1997 else if ( dma_rx_clp1_descriptor_threshold
> dma_rx_descriptor_length
)
1998 dma_rx_clp1_descriptor_threshold
= dma_rx_descriptor_length
;
2000 if ( dma_tx_descriptor_length
< 2 )
2001 dma_tx_descriptor_length
= 2;
2004 static INLINE
int init_priv_data(void)
2008 struct rx_descriptor rx_desc
= {0};
2009 struct sk_buff
*skb
;
2010 volatile struct tx_descriptor
*p_tx_desc
;
2011 struct sk_buff
**ppskb
;
2013 // clear atm private data structure
2014 memset(&g_atm_priv_data
, 0, sizeof(g_atm_priv_data
));
2016 // allocate memory for RX (AAL) descriptors
2017 p
= kzalloc(dma_rx_descriptor_length
* sizeof(struct rx_descriptor
) + DESC_ALIGNMENT
, GFP_KERNEL
);
2020 dma_cache_wback_inv((unsigned long)p
, dma_rx_descriptor_length
* sizeof(struct rx_descriptor
) + DESC_ALIGNMENT
);
2021 g_atm_priv_data
.aal_desc_base
= p
;
2022 p
= (void *)((((unsigned int)p
+ DESC_ALIGNMENT
- 1) & ~(DESC_ALIGNMENT
- 1)) | KSEG1
);
2023 g_atm_priv_data
.aal_desc
= (volatile struct rx_descriptor
*)p
;
2025 // allocate memory for RX (OAM) descriptors
2026 p
= kzalloc(RX_DMA_CH_OAM_DESC_LEN
* sizeof(struct rx_descriptor
) + DESC_ALIGNMENT
, GFP_KERNEL
);
2029 dma_cache_wback_inv((unsigned long)p
, RX_DMA_CH_OAM_DESC_LEN
* sizeof(struct rx_descriptor
) + DESC_ALIGNMENT
);
2030 g_atm_priv_data
.oam_desc_base
= p
;
2031 p
= (void *)((((unsigned int)p
+ DESC_ALIGNMENT
- 1) & ~(DESC_ALIGNMENT
- 1)) | KSEG1
);
2032 g_atm_priv_data
.oam_desc
= (volatile struct rx_descriptor
*)p
;
2034 // allocate memory for RX (OAM) buffer
2035 p
= kzalloc(RX_DMA_CH_OAM_DESC_LEN
* RX_DMA_CH_OAM_BUF_SIZE
+ DATA_BUFFER_ALIGNMENT
, GFP_KERNEL
);
2038 dma_cache_wback_inv((unsigned long)p
, RX_DMA_CH_OAM_DESC_LEN
* RX_DMA_CH_OAM_BUF_SIZE
+ DATA_BUFFER_ALIGNMENT
);
2039 g_atm_priv_data
.oam_buf_base
= p
;
2040 p
= (void *)(((unsigned int)p
+ DATA_BUFFER_ALIGNMENT
- 1) & ~(DATA_BUFFER_ALIGNMENT
- 1));
2041 g_atm_priv_data
.oam_buf
= p
;
2043 // allocate memory for TX descriptors
2044 p
= kzalloc(MAX_PVC_NUMBER
* dma_tx_descriptor_length
* sizeof(struct tx_descriptor
) + DESC_ALIGNMENT
, GFP_KERNEL
);
2047 dma_cache_wback_inv((unsigned long)p
, MAX_PVC_NUMBER
* dma_tx_descriptor_length
* sizeof(struct tx_descriptor
) + DESC_ALIGNMENT
);
2048 g_atm_priv_data
.tx_desc_base
= p
;
2050 // allocate memory for TX skb pointers
2051 p
= kzalloc(MAX_PVC_NUMBER
* dma_tx_descriptor_length
* sizeof(struct sk_buff
*) + 4, GFP_KERNEL
);
2054 dma_cache_wback_inv((unsigned long)p
, MAX_PVC_NUMBER
* dma_tx_descriptor_length
* sizeof(struct sk_buff
*) + 4);
2055 g_atm_priv_data
.tx_skb_base
= p
;
2057 // setup RX (AAL) descriptors
2062 rx_desc
.byteoff
= 0;
2065 rx_desc
.datalen
= RX_DMA_CH_AAL_BUF_SIZE
;
2066 for ( i
= 0; i
< dma_rx_descriptor_length
; i
++ ) {
2067 skb
= alloc_skb_rx();
2070 rx_desc
.dataptr
= ((unsigned int)skb
->data
>> 2) & 0x0FFFFFFF;
2071 g_atm_priv_data
.aal_desc
[i
] = rx_desc
;
2074 // setup RX (OAM) descriptors
2075 p
= (void *)((unsigned int)g_atm_priv_data
.oam_buf
| KSEG1
);
2080 rx_desc
.byteoff
= 0;
2083 rx_desc
.datalen
= RX_DMA_CH_OAM_BUF_SIZE
;
2084 for ( i
= 0; i
< RX_DMA_CH_OAM_DESC_LEN
; i
++ ) {
2085 rx_desc
.dataptr
= ((unsigned int)p
>> 2) & 0x0FFFFFFF;
2086 g_atm_priv_data
.oam_desc
[i
] = rx_desc
;
2087 p
= (void *)((unsigned int)p
+ RX_DMA_CH_OAM_BUF_SIZE
);
2090 // setup TX descriptors and skb pointers
2091 p_tx_desc
= (volatile struct tx_descriptor
*)((((unsigned int)g_atm_priv_data
.tx_desc_base
+ DESC_ALIGNMENT
- 1) & ~(DESC_ALIGNMENT
- 1)) | KSEG1
);
2092 ppskb
= (struct sk_buff
**)(((unsigned int)g_atm_priv_data
.tx_skb_base
+ 3) & ~3);
2093 for ( i
= 0; i
< MAX_PVC_NUMBER
; i
++ ) {
2094 g_atm_priv_data
.conn
[i
].tx_desc
= &p_tx_desc
[i
* dma_tx_descriptor_length
];
2095 g_atm_priv_data
.conn
[i
].tx_skb
= &ppskb
[i
* dma_tx_descriptor_length
];
2098 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
2099 g_atm_priv_data
.port
[i
].tx_max_cell_rate
= DEFAULT_TX_LINK_RATE
;
2104 static INLINE
void clear_priv_data(void)
2107 struct sk_buff
*skb
;
2109 for ( i
= 0; i
< MAX_PVC_NUMBER
; i
++ ) {
2110 if ( g_atm_priv_data
.conn
[i
].tx_skb
!= NULL
) {
2111 for ( j
= 0; j
< dma_tx_descriptor_length
; j
++ )
2112 if ( g_atm_priv_data
.conn
[i
].tx_skb
[j
] != NULL
)
2113 dev_kfree_skb_any(g_atm_priv_data
.conn
[i
].tx_skb
[j
]);
2117 if ( g_atm_priv_data
.tx_skb_base
!= NULL
)
2118 kfree(g_atm_priv_data
.tx_skb_base
);
2120 if ( g_atm_priv_data
.tx_desc_base
!= NULL
)
2121 kfree(g_atm_priv_data
.tx_desc_base
);
2123 if ( g_atm_priv_data
.oam_buf_base
!= NULL
)
2124 kfree(g_atm_priv_data
.oam_buf_base
);
2126 if ( g_atm_priv_data
.oam_desc_base
!= NULL
)
2127 kfree(g_atm_priv_data
.oam_desc_base
);
2129 if ( g_atm_priv_data
.aal_desc_base
!= NULL
) {
2130 for ( i
= 0; i
< dma_rx_descriptor_length
; i
++ ) {
2131 if ( g_atm_priv_data
.aal_desc
[i
].sop
|| g_atm_priv_data
.aal_desc
[i
].eop
) { // descriptor initialized
2132 skb
= get_skb_rx_pointer(g_atm_priv_data
.aal_desc
[i
].dataptr
);
2133 dev_kfree_skb_any(skb
);
2136 kfree(g_atm_priv_data
.aal_desc_base
);
2140 static INLINE
void init_rx_tables(void)
2143 struct wrx_queue_config wrx_queue_config
= {0};
2144 struct wrx_dma_channel_config wrx_dma_channel_config
= {0};
2145 struct htu_entry htu_entry
= {0};
2146 struct htu_result htu_result
= {0};
2147 struct htu_mask htu_mask
= { set
: 0x01,
2158 *CFG_WRX_HTUTS
= MAX_PVC_NUMBER
+ OAM_HTU_ENTRY_NUMBER
;
2159 *CFG_WRX_QNUM
= MAX_QUEUE_NUMBER
;
2160 *CFG_WRX_DCHNUM
= RX_DMA_CH_TOTAL
;
2161 *WRX_DMACH_ON
= (1 << RX_DMA_CH_TOTAL
) - 1;
2162 *WRX_HUNT_BITTH
= DEFAULT_RX_HUNT_BITTH
;
2165 * WRX Queue Configuration Table
2167 wrx_queue_config
.uumask
= 0;
2168 wrx_queue_config
.cpimask
= 0;
2169 wrx_queue_config
.uuexp
= 0;
2170 wrx_queue_config
.cpiexp
= 0;
2171 wrx_queue_config
.mfs
= aal5r_max_packet_size
;
2172 wrx_queue_config
.oversize
= aal5r_max_packet_size
;
2173 wrx_queue_config
.undersize
= aal5r_min_packet_size
;
2174 wrx_queue_config
.errdp
= aal5r_drop_error_packet
;
2175 wrx_queue_config
.dmach
= RX_DMA_CH_AAL
;
2176 for ( i
= 0; i
< MAX_QUEUE_NUMBER
; i
++ )
2177 *WRX_QUEUE_CONFIG(i
) = wrx_queue_config
;
2178 WRX_QUEUE_CONFIG(OAM_RX_QUEUE
)->dmach
= RX_DMA_CH_OAM
;
2181 * WRX DMA Channel Configuration Table
2183 wrx_dma_channel_config
.chrl
= 0;
2184 wrx_dma_channel_config
.clp1th
= dma_rx_clp1_descriptor_threshold
;
2185 wrx_dma_channel_config
.mode
= 0;
2186 wrx_dma_channel_config
.rlcfg
= 0;
2188 wrx_dma_channel_config
.deslen
= RX_DMA_CH_OAM_DESC_LEN
;
2189 wrx_dma_channel_config
.desba
= ((unsigned int)g_atm_priv_data
.oam_desc
>> 2) & 0x0FFFFFFF;
2190 *WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_OAM
) = wrx_dma_channel_config
;
2192 wrx_dma_channel_config
.deslen
= dma_rx_descriptor_length
;
2193 wrx_dma_channel_config
.desba
= ((unsigned int)g_atm_priv_data
.aal_desc
>> 2) & 0x0FFFFFFF;
2194 *WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_AAL
) = wrx_dma_channel_config
;
2199 for ( i
= 0; i
< MAX_PVC_NUMBER
; i
++ )
2201 htu_result
.qid
= (unsigned int)i
;
2203 *HTU_ENTRY(i
+ OAM_HTU_ENTRY_NUMBER
) = htu_entry
;
2204 *HTU_MASK(i
+ OAM_HTU_ENTRY_NUMBER
) = htu_mask
;
2205 *HTU_RESULT(i
+ OAM_HTU_ENTRY_NUMBER
) = htu_result
;
2208 htu_entry
.vci
= 0x03;
2209 htu_mask
.pid_mask
= 0x03;
2210 htu_mask
.vpi_mask
= 0xFF;
2211 htu_mask
.vci_mask
= 0x0000;
2212 htu_mask
.pti_mask
= 0x07;
2213 htu_result
.cellid
= OAM_RX_QUEUE
;
2214 htu_result
.type
= 1;
2216 htu_result
.qid
= OAM_RX_QUEUE
;
2217 *HTU_RESULT(OAM_F4_SEG_HTU_ENTRY
) = htu_result
;
2218 *HTU_MASK(OAM_F4_SEG_HTU_ENTRY
) = htu_mask
;
2219 *HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY
) = htu_entry
;
2220 htu_entry
.vci
= 0x04;
2221 htu_result
.cellid
= OAM_RX_QUEUE
;
2222 htu_result
.type
= 1;
2224 htu_result
.qid
= OAM_RX_QUEUE
;
2225 *HTU_RESULT(OAM_F4_TOT_HTU_ENTRY
) = htu_result
;
2226 *HTU_MASK(OAM_F4_TOT_HTU_ENTRY
) = htu_mask
;
2227 *HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY
) = htu_entry
;
2228 htu_entry
.vci
= 0x00;
2229 htu_entry
.pti
= 0x04;
2230 htu_mask
.vci_mask
= 0xFFFF;
2231 htu_mask
.pti_mask
= 0x01;
2232 htu_result
.cellid
= OAM_RX_QUEUE
;
2233 htu_result
.type
= 1;
2235 htu_result
.qid
= OAM_RX_QUEUE
;
2236 *HTU_RESULT(OAM_F5_HTU_ENTRY
) = htu_result
;
2237 *HTU_MASK(OAM_F5_HTU_ENTRY
) = htu_mask
;
2238 *HTU_ENTRY(OAM_F5_HTU_ENTRY
) = htu_entry
;
2239 #if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
2240 htu_entry
.pid
= 0x0;
2241 htu_entry
.vpi
= 0x01;
2242 htu_entry
.vci
= 0x0001;
2243 htu_entry
.pti
= 0x00;
2244 htu_mask
.pid_mask
= 0x0;
2245 htu_mask
.vpi_mask
= 0x00;
2246 htu_mask
.vci_mask
= 0x0000;
2247 htu_mask
.pti_mask
= 0x3;
2248 htu_result
.cellid
= OAM_RX_QUEUE
;
2249 htu_result
.type
= 1;
2251 htu_result
.qid
= OAM_RX_QUEUE
;
2252 *HTU_RESULT(OAM_ARQ_HTU_ENTRY
) = htu_result
;
2253 *HTU_MASK(OAM_ARQ_HTU_ENTRY
) = htu_mask
;
2254 *HTU_ENTRY(OAM_ARQ_HTU_ENTRY
) = htu_entry
;
2258 static INLINE
void init_tx_tables(void)
2261 struct wtx_queue_config wtx_queue_config
= {0};
2262 struct wtx_dma_channel_config wtx_dma_channel_config
= {0};
2263 struct wtx_port_config wtx_port_config
= { res1
: 0,
2270 *CFG_WTX_DCHNUM
= MAX_TX_DMA_CHANNEL_NUMBER
;
2271 *WTX_DMACH_ON
= ((1 << MAX_TX_DMA_CHANNEL_NUMBER
) - 1) ^ ((1 << FIRST_QSB_QID
) - 1);
2272 *CFG_WRDES_DELAY
= write_descriptor_delay
;
2275 * WTX Port Configuration Table
2277 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
2278 *WTX_PORT_CONFIG(i
) = wtx_port_config
;
2281 * WTX Queue Configuration Table
2283 wtx_queue_config
.type
= 0x0;
2284 wtx_queue_config
.qsben
= 1;
2285 wtx_queue_config
.sbid
= 0;
2286 for ( i
= 0; i
< MAX_TX_DMA_CHANNEL_NUMBER
; i
++ )
2287 *WTX_QUEUE_CONFIG(i
) = wtx_queue_config
;
2290 * WTX DMA Channel Configuration Table
2292 wtx_dma_channel_config
.mode
= 0;
2293 wtx_dma_channel_config
.deslen
= 0;
2294 wtx_dma_channel_config
.desba
= 0;
2295 for ( i
= 0; i
< FIRST_QSB_QID
; i
++ )
2296 *WTX_DMA_CHANNEL_CONFIG(i
) = wtx_dma_channel_config
;
2297 /* normal connection */
2298 wtx_dma_channel_config
.deslen
= dma_tx_descriptor_length
;
2299 for ( ; i
< MAX_TX_DMA_CHANNEL_NUMBER
; i
++ ) {
2300 wtx_dma_channel_config
.desba
= ((unsigned int)g_atm_priv_data
.conn
[i
- FIRST_QSB_QID
].tx_desc
>> 2) & 0x0FFFFFFF;
2301 *WTX_DMA_CHANNEL_CONFIG(i
) = wtx_dma_channel_config
;
2308 * ####################################
2310 * ####################################
2313 static int atm_showtime_enter(struct port_cell_info
*port_cell
, void *xdata_addr
)
2317 ASSERT(port_cell
!= NULL
, "port_cell is NULL");
2318 ASSERT(xdata_addr
!= NULL
, "xdata_addr is NULL");
2320 for ( j
= 0; j
< ATM_PORT_NUMBER
&& j
< port_cell
->port_num
; j
++ )
2321 if ( port_cell
->tx_link_rate
[j
] > 0 )
2323 for ( i
= 0; i
< ATM_PORT_NUMBER
&& i
< port_cell
->port_num
; i
++ )
2324 g_atm_priv_data
.port
[i
].tx_max_cell_rate
= port_cell
->tx_link_rate
[i
] > 0 ? port_cell
->tx_link_rate
[i
] : port_cell
->tx_link_rate
[j
];
2328 for ( i
= 0; i
< MAX_PVC_NUMBER
; i
++ )
2329 if ( g_atm_priv_data
.conn
[i
].vcc
!= NULL
)
2330 set_qsb(g_atm_priv_data
.conn
[i
].vcc
, &g_atm_priv_data
.conn
[i
].vcc
->qos
, i
);
2332 // TODO: ReTX set xdata_addr
2333 g_xdata_addr
= xdata_addr
;
2337 #if defined(CONFIG_VR9)
2338 IFX_REG_W32(0x0F, UTP_CFG
);
2341 pr_debug("enter showtime, cell rate: 0 - %d, 1 - %d, xdata addr: 0x%08x\n", g_atm_priv_data
.port
[0].tx_max_cell_rate
, g_atm_priv_data
.port
[1].tx_max_cell_rate
, (unsigned int)g_xdata_addr
);
2346 static int atm_showtime_exit(void)
2348 #if defined(CONFIG_VR9)
2349 IFX_REG_W32(0x00, UTP_CFG
);
2354 // TODO: ReTX clean state
2355 g_xdata_addr
= NULL
;
2357 pr_debug("leave showtime\n");
2365 * ####################################
2367 * ####################################
2372 * Initialize global variables, PP32, comunication structures, register IRQ
2373 * and register device.
2378 * else --- failure, usually it is negative value of error code
2380 static int __devinit
ifx_atm_init(void)
2384 struct port_cell_info port_cell
= {0};
2394 ret
= init_priv_data();
2395 if ( ret
!= IFX_SUCCESS
) {
2396 err("INIT_PRIV_DATA_FAIL");
2397 goto INIT_PRIV_DATA_FAIL
;
2400 ifx_atm_init_chip();
2404 /* create devices */
2405 for ( port_num
= 0; port_num
< ATM_PORT_NUMBER
; port_num
++ ) {
2406 g_atm_priv_data
.port
[port_num
].dev
= atm_dev_register("ifxmips_atm", NULL
, &g_ifx_atm_ops
, -1, NULL
);
2407 if ( !g_atm_priv_data
.port
[port_num
].dev
) {
2408 err("failed to register atm device %d!", port_num
);
2410 goto ATM_DEV_REGISTER_FAIL
;
2413 g_atm_priv_data
.port
[port_num
].dev
->ci_range
.vpi_bits
= 8;
2414 g_atm_priv_data
.port
[port_num
].dev
->ci_range
.vci_bits
= 16;
2415 g_atm_priv_data
.port
[port_num
].dev
->link_rate
= g_atm_priv_data
.port
[port_num
].tx_max_cell_rate
;
2416 g_atm_priv_data
.port
[port_num
].dev
->dev_data
= (void*)port_num
;
2420 /* register interrupt handler */
2421 ret
= request_irq(PPE_MAILBOX_IGU1_INT
, mailbox_irq_handler
, IRQF_DISABLED
, "atm_mailbox_isr", &g_atm_priv_data
);
2423 if ( ret
== -EBUSY
) {
2424 err("IRQ may be occupied by other driver, please reconfig to disable it.");
2427 err("request_irq fail");
2429 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL
;
2431 disable_irq(PPE_MAILBOX_IGU1_INT
);
2433 ret
= ifx_pp32_start(0);
2435 err("ifx_pp32_start fail!");
2436 goto PP32_START_FAIL
;
2439 port_cell
.port_num
= ATM_PORT_NUMBER
;
2440 ifx_mei_atm_showtime_check(&g_showtime
, &port_cell
, &g_xdata_addr
);
2442 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
2443 if ( port_cell
.tx_link_rate
[i
] != 0 )
2445 for ( j
= 0; j
< ATM_PORT_NUMBER
; j
++ )
2446 g_atm_priv_data
.port
[j
].tx_max_cell_rate
= port_cell
.tx_link_rate
[j
] != 0 ? port_cell
.tx_link_rate
[j
] : port_cell
.tx_link_rate
[i
];
2450 validate_oam_htu_entry();
2452 /* create proc file */
2455 ifx_mei_atm_showtime_enter
= atm_showtime_enter
;
2456 ifx_mei_atm_showtime_exit
= atm_showtime_exit
;
2458 ifx_atm_version(ver_str
);
2459 printk(KERN_INFO
"%s", ver_str
);
2461 printk("ifxmips_atm: ATM init succeed\n");
2466 free_irq(PPE_MAILBOX_IGU1_INT
, &g_atm_priv_data
);
2467 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL
:
2468 ATM_DEV_REGISTER_FAIL
:
2469 while ( port_num
-- > 0 )
2470 atm_dev_deregister(g_atm_priv_data
.port
[port_num
].dev
);
2471 INIT_PRIV_DATA_FAIL
:
2473 printk("ifxmips_atm: ATM init failed\n");
2479 * Release memory, free IRQ, and deregister device.
2485 static void __exit
ifx_atm_exit(void)
2489 ifx_mei_atm_showtime_enter
= NULL
;
2490 ifx_mei_atm_showtime_exit
= NULL
;
2494 invalidate_oam_htu_entry();
2498 free_irq(PPE_MAILBOX_IGU1_INT
, &g_atm_priv_data
);
2500 for ( port_num
= 0; port_num
< ATM_PORT_NUMBER
; port_num
++ )
2501 atm_dev_deregister(g_atm_priv_data
.port
[port_num
].dev
);
2503 ifx_atm_uninit_chip();
2508 module_init(ifx_atm_init
);
2509 module_exit(ifx_atm_exit
);
2510 MODULE_LICENSE("Dual BSD/GPL");