1 #include <asm/mach-ifxmips/cgu.h>
4 #include "ifx_ppe_fw.h"
5 static void set_qsb(struct atm_vcc
*vcc
, struct atm_qos
*qos
, unsigned int connection
)
8 u32 qsb_clk
= cgu_get_fpi_bus_clock(2); /* FPI configuration 2 (slow FPI bus) */
9 union qsb_queue_parameter_table qsb_queue_parameter_table
= {{0}};
10 union qsb_queue_vbr_parameter_table qsb_queue_vbr_parameter_table
= {{0}};
14 * Peak Cell Rate (PCR) Limiter
16 if ( qos
->txtp
.max_pcr
== 0 )
17 qsb_queue_parameter_table
.bit
.tp
= 0; /* disable PCR limiter */
20 /* peak cell rate would be slightly lower than requested [maximum_rate / pcr = (qsb_clock / 8) * (time_step / 4) / pcr] */
21 tmp
= ((qsb_clk
* ppe_dev
.qsb
.tstepc
) >> 5) / qos
->txtp
.max_pcr
+ 1;
22 /* check if overflow takes place */
23 qsb_queue_parameter_table
.bit
.tp
= tmp
> QSB_TP_TS_MAX
? QSB_TP_TS_MAX
: tmp
;
26 * Weighted Fair Queueing Factor (WFQF)
28 switch ( qos
->txtp
.traffic_class
)
32 /* real time queue gets weighted fair queueing bypass */
33 qsb_queue_parameter_table
.bit
.wfqf
= 0;
37 /* WFQF calculation here is based on virtual cell rates, to reduce granularity for high rates */
38 /* WFQF is maximum cell rate / garenteed cell rate */
39 /* wfqf = qsb_minimum_cell_rate * QSB_WFQ_NONUBR_MAX / requested_minimum_peak_cell_rate */
40 if ( qos
->txtp
.min_pcr
== 0 )
41 qsb_queue_parameter_table
.bit
.wfqf
= QSB_WFQ_NONUBR_MAX
;
44 tmp
= QSB_GCR_MIN
* QSB_WFQ_NONUBR_MAX
/ qos
->txtp
.min_pcr
;
46 qsb_queue_parameter_table
.bit
.wfqf
= 1;
47 else if ( tmp
> QSB_WFQ_NONUBR_MAX
)
48 qsb_queue_parameter_table
.bit
.wfqf
= QSB_WFQ_NONUBR_MAX
;
50 qsb_queue_parameter_table
.bit
.wfqf
= tmp
;
55 qsb_queue_parameter_table
.bit
.wfqf
= QSB_WFQ_UBR_BYPASS
;
58 * Sustained Cell Rate (SCR) Leaky Bucket Shaper VBR.0/VBR.1
60 if ( qos
->txtp
.traffic_class
== ATM_VBR_RT
|| qos
->txtp
.traffic_class
== ATM_VBR_NRT
)
62 if ( qos
->txtp
.scr
== 0 )
65 qsb_queue_vbr_parameter_table
.bit
.taus
= 0;
66 qsb_queue_vbr_parameter_table
.bit
.ts
= 0;
70 /* Cell Loss Priority (CLP) */
71 if ( (vcc
->atm_options
& ATM_ATMOPT_CLP
) )
73 qsb_queue_parameter_table
.bit
.vbr
= 1;
76 qsb_queue_parameter_table
.bit
.vbr
= 0;
77 /* Rate Shaper Parameter (TS) and Burst Tolerance Parameter for SCR (tauS) */
78 tmp
= ((qsb_clk
* ppe_dev
.qsb
.tstepc
) >> 5) / qos
->txtp
.scr
+ 1;
79 qsb_queue_vbr_parameter_table
.bit
.ts
= tmp
> QSB_TP_TS_MAX
? QSB_TP_TS_MAX
: tmp
;
80 tmp
= (qos
->txtp
.mbs
- 1) * (qsb_queue_vbr_parameter_table
.bit
.ts
- qsb_queue_parameter_table
.bit
.tp
) / 64;
82 qsb_queue_vbr_parameter_table
.bit
.taus
= 1;
83 else if ( tmp
> QSB_TAUS_MAX
)
84 qsb_queue_vbr_parameter_table
.bit
.taus
= QSB_TAUS_MAX
;
86 qsb_queue_vbr_parameter_table
.bit
.taus
= tmp
;
91 qsb_queue_vbr_parameter_table
.bit
.taus
= 0;
92 qsb_queue_vbr_parameter_table
.bit
.ts
= 0;
95 /* Queue Parameter Table (QPT) */
96 *QSB_RTM
= QSB_RTM_DM_SET(QSB_QPT_SET_MASK
);
97 *QSB_RTD
= QSB_RTD_TTV_SET(qsb_queue_parameter_table
.dword
);
98 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_QPT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(connection
);
99 /* Queue VBR Paramter Table (QVPT) */
100 *QSB_RTM
= QSB_RTM_DM_SET(QSB_QVPT_SET_MASK
);
101 *QSB_RTD
= QSB_RTD_TTV_SET(qsb_queue_vbr_parameter_table
.dword
);
102 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_VBR
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(connection
);
107 static inline void u64_add_u32(ppe_u64_t opt1
, u32 opt2
,ppe_u64_t
*ret
)
109 ret
->l
= opt1
.l
+ opt2
;
110 if ( ret
->l
< opt1
.l
|| ret
->l
< opt2
)
114 int find_vcc(struct atm_vcc
*vcc
)
117 struct connection
*connection
= ppe_dev
.connection
;
118 int max_connections
= ppe_dev
.port
[(int)vcc
->dev
->dev_data
].max_connections
;
119 u32 occupation_table
= ppe_dev
.port
[(int)vcc
->dev
->dev_data
].connection_table
;
120 int base
= ppe_dev
.port
[(int)vcc
->dev
->dev_data
].connection_base
;
121 for ( i
= 0; i
< max_connections
; i
++, base
++ )
122 if ( (occupation_table
& (1 << i
))
123 && connection
[base
].vcc
== vcc
)
128 int find_vpi(unsigned int vpi
)
131 struct connection
*connection
= ppe_dev
.connection
;
136 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++, port
++ )
138 base
= port
->connection_base
;
139 for ( j
= 0; j
< port
->max_connections
; j
++, base
++ )
140 if ( (port
->connection_table
& (1 << j
))
141 && connection
[base
].vcc
!= NULL
142 && vpi
== connection
[base
].vcc
->vpi
)
148 int find_vpivci(unsigned int vpi
, unsigned int vci
)
151 struct connection
*connection
= ppe_dev
.connection
;
156 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++, port
++ )
158 base
= port
->connection_base
;
159 for ( j
= 0; j
< port
->max_connections
; j
++, base
++ )
160 if ( (port
->connection_table
& (1 << j
))
161 && connection
[base
].vcc
!= NULL
162 && vpi
== connection
[base
].vcc
->vpi
163 && vci
== connection
[base
].vcc
->vci
)
170 static inline void clear_htu_entry(unsigned int connection
)
172 HTU_ENTRY(connection
- QSB_QUEUE_NUMBER_BASE
+ OAM_HTU_ENTRY_NUMBER
)->vld
= 0;
175 static inline void set_htu_entry(unsigned int vpi
, unsigned int vci
, unsigned int connection
, int aal5
)
177 struct htu_entry htu_entry
= { res1
: 0x00,
178 pid
: ppe_dev
.connection
[connection
].port
& 0x01,
184 struct htu_mask htu_mask
= { set
: 0x03,
188 pti_mask
: 0x03, // 0xx, user data
191 struct htu_result htu_result
= {res1
: 0x00,
194 type
: aal5
? 0x00 : 0x01,
199 *HTU_RESULT(connection
- QSB_QUEUE_NUMBER_BASE
+ OAM_HTU_ENTRY_NUMBER
) = htu_result
;
200 *HTU_MASK(connection
- QSB_QUEUE_NUMBER_BASE
+ OAM_HTU_ENTRY_NUMBER
) = htu_mask
;
201 *HTU_ENTRY(connection
- QSB_QUEUE_NUMBER_BASE
+ OAM_HTU_ENTRY_NUMBER
) = htu_entry
;
204 int alloc_tx_connection(int connection
)
206 unsigned long sys_flag
;
209 if ( ppe_dev
.dma
.tx_desc_alloc_pos
[connection
] == ppe_dev
.dma
.tx_desc_release_pos
[connection
] && ppe_dev
.dma
.tx_desc_alloc_flag
[connection
] )
212 /* amend descriptor pointer and allocation number */
213 local_irq_save(sys_flag
);
214 desc_base
= ppe_dev
.dma
.tx_descriptor_number
* (connection
- QSB_QUEUE_NUMBER_BASE
) + ppe_dev
.dma
.tx_desc_alloc_pos
[connection
];
215 if ( ++ppe_dev
.dma
.tx_desc_alloc_pos
[connection
] == ppe_dev
.dma
.tx_descriptor_number
)
216 ppe_dev
.dma
.tx_desc_alloc_pos
[connection
] = 0;
217 ppe_dev
.dma
.tx_desc_alloc_flag
[connection
] = 1;
218 local_irq_restore(sys_flag
);
224 int ppe_open(struct atm_vcc
*vcc
)
227 struct port
*port
= &ppe_dev
.port
[(int)vcc
->dev
->dev_data
];
229 int f_enable_irq
= 0;
231 printk("%s:%s[%d] removed 2 args from signature\n", __FILE__
, __func__
, __LINE__
);
235 if ( vcc
->qos
.aal
!= ATM_AAL5
&& vcc
->qos
.aal
!= ATM_AAL0
)
236 return -EPROTONOSUPPORT
;
240 /* check bandwidth */
241 if ( (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
&& vcc
->qos
.txtp
.max_pcr
> (port
->tx_max_cell_rate
- port
->tx_current_cell_rate
))
242 || (vcc
->qos
.txtp
.traffic_class
== ATM_VBR_RT
&& vcc
->qos
.txtp
.max_pcr
> (port
->tx_max_cell_rate
- port
->tx_current_cell_rate
))
243 || (vcc
->qos
.txtp
.traffic_class
== ATM_VBR_NRT
&& vcc
->qos
.txtp
.pcr
> (port
->tx_max_cell_rate
- port
->tx_current_cell_rate
))
244 || (vcc
->qos
.txtp
.traffic_class
== ATM_UBR_PLUS
&& vcc
->qos
.txtp
.min_pcr
> (port
->tx_max_cell_rate
- port
->tx_current_cell_rate
)) )
250 printk("alloc vpi = %d, vci = %d\n", vcc
->vpi
, vcc
->vci
);
252 /* check existing vpi,vci */
253 conn
= find_vpivci(vcc
->vpi
, vcc
->vci
);
260 /* check whether it need to enable irq */
261 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
262 if ( ppe_dev
.port
[i
].max_connections
!= 0 && ppe_dev
.port
[i
].connection_table
!= 0 )
264 if ( i
== ATM_PORT_NUMBER
)
267 /* allocate connection */
268 for ( i
= 0, conn
= port
->connection_base
; i
< port
->max_connections
; i
++, conn
++ )
269 if ( !(port
->connection_table
& (1 << i
)) )
271 port
->connection_table
|= 1 << i
;
272 ppe_dev
.connection
[conn
].vcc
= vcc
;
275 if ( i
== port
->max_connections
)
281 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
282 /* assign DMA channel and setup weight value for RX QoS */
283 switch ( vcc
->qos
.rxtp
.traffic_class
)
286 ppe_dev
.connection
[conn
].rx_dma_channel
= RX_DMA_CH_CBR
;
289 ppe_dev
.connection
[conn
].rx_dma_channel
= RX_DMA_CH_VBR_RT
;
290 ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_VBR_RT
] += vcc
->qos
.rxtp
.max_pcr
;
291 ppe_dev
.dma
.rx_weight
[RX_DMA_CH_VBR_RT
] += vcc
->qos
.rxtp
.max_pcr
;
294 ppe_dev
.connection
[conn
].rx_dma_channel
= RX_DMA_CH_VBR_NRT
;
295 ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_VBR_NRT
] += vcc
->qos
.rxtp
.pcr
;
296 ppe_dev
.dma
.rx_weight
[RX_DMA_CH_VBR_NRT
] += vcc
->qos
.rxtp
.pcr
;
299 ppe_dev
.connection
[conn
].rx_dma_channel
= RX_DMA_CH_AVR
;
300 ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_AVR
] += vcc
->qos
.rxtp
.min_pcr
;
301 ppe_dev
.dma
.rx_weight
[RX_DMA_CH_AVR
] += vcc
->qos
.rxtp
.min_pcr
;
305 ppe_dev
.connection
[conn
].rx_dma_channel
= RX_DMA_CH_UBR
;
309 /* update RX queue configuration table */
310 WRX_QUEUE_CONFIG(conn
)->dmach
= ppe_dev
.connection
[conn
].rx_dma_channel
;
312 printk("ppe_open: QID %d, DMA %d\n", conn
, WRX_QUEUE_CONFIG(conn
)->dmach
);
314 printk("conn = %d, dmach = %d", conn
, WRX_QUEUE_CONFIG(conn
)->dmach
);
315 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
317 /* reserve bandwidth */
318 switch ( vcc
->qos
.txtp
.traffic_class
)
322 port
->tx_current_cell_rate
+= vcc
->qos
.txtp
.max_pcr
;
325 port
->tx_current_cell_rate
+= vcc
->qos
.txtp
.pcr
;
328 port
->tx_current_cell_rate
+= vcc
->qos
.txtp
.min_pcr
;
333 set_qsb(vcc
, &vcc
->qos
, conn
);
335 /* update atm_vcc structure */
336 vcc
->itf
= (int)vcc
->dev
->dev_data
;
338 set_bit(ATM_VF_READY
, &vcc
->flags
);
341 printk("ppe_open: enable_irq\n");
343 enable_irq(IFXMIPS_PPE_MBOX_INT
);
346 *MBOX_IGU1_ISRC
= (1 << conn
) | (1 << (conn
+ 16));
347 *MBOX_IGU1_IER
|= (1 << conn
) | (1 << (conn
+ 16));
348 *MBOX_IGU3_ISRC
= (1 << conn
) | (1 << (conn
+ 16));
349 *MBOX_IGU3_IER
|= (1 << conn
) | (1 << (conn
+ 16));
352 set_htu_entry(vcc
->vpi
, vcc
->vci
, conn
, vcc
->qos
.aal
== ATM_AAL5
? 1 : 0);
356 printk("ppe_open(%d.%d): conn = %d, ppe_dev.dma = %08X\n", vcc
->vpi
, vcc
->vci
, conn
, (u32
)&ppe_dev
.dma
.rx_descriptor_number
);
362 printk("open ATM itf = %d, vpi = %d, vci = %d, ret = %d", (int)vcc
->dev
->dev_data
, (int)vcc
->vpi
, vcc
->vci
, ret
);
366 void ppe_close(struct atm_vcc
*vcc
)
370 struct connection
*connection
;
378 /* get connection id */
379 conn
= find_vcc(vcc
);
382 printk("can't find vcc\n");
385 if(!((Atm_Priv
*)vcc
)->on
)
387 connection
= &ppe_dev
.connection
[conn
];
388 port
= &ppe_dev
.port
[connection
->port
];
391 clear_htu_entry(conn
);
393 /* release connection */
394 port
->connection_table
&= ~(1 << (conn
- port
->connection_base
));
395 connection
->vcc
= NULL
;
396 connection
->access_time
.tv_sec
= 0;
397 connection
->access_time
.tv_nsec
= 0;
398 connection
->aal5_vcc_crc_err
= 0;
399 connection
->aal5_vcc_oversize_sdu
= 0;
402 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
403 if ( ppe_dev
.port
[i
].max_connections
!= 0 && ppe_dev
.port
[i
].connection_table
!= 0 )
405 if ( i
== ATM_PORT_NUMBER
)
406 disable_irq(IFXMIPS_PPE_MBOX_INT
);
408 *MBOX_IGU1_ISRC
= (1 << conn
) | (1 << (conn
+ 16));
410 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
411 /* remove weight value from RX DMA channel */
412 switch ( vcc
->qos
.rxtp
.traffic_class
)
415 ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_VBR_RT
] -= vcc
->qos
.rxtp
.max_pcr
;
416 if ( ppe_dev
.dma
.rx_weight
[RX_DMA_CH_VBR_RT
] > ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_VBR_RT
] )
417 ppe_dev
.dma
.rx_weight
[RX_DMA_CH_VBR_RT
] = ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_VBR_RT
];
420 ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_VBR_NRT
] -= vcc
->qos
.rxtp
.pcr
;
421 if ( ppe_dev
.dma
.rx_weight
[RX_DMA_CH_VBR_NRT
] > ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_VBR_NRT
] )
422 ppe_dev
.dma
.rx_weight
[RX_DMA_CH_VBR_NRT
] = ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_VBR_NRT
];
425 ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_AVR
] -= vcc
->qos
.rxtp
.min_pcr
;
426 if ( ppe_dev
.dma
.rx_weight
[RX_DMA_CH_AVR
] > ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_AVR
] )
427 ppe_dev
.dma
.rx_weight
[RX_DMA_CH_AVR
] = ppe_dev
.dma
.rx_default_weight
[RX_DMA_CH_AVR
];
434 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
436 /* release bandwidth */
437 switch ( vcc
->qos
.txtp
.traffic_class
)
441 port
->tx_current_cell_rate
-= vcc
->qos
.txtp
.max_pcr
;
444 port
->tx_current_cell_rate
-= vcc
->qos
.txtp
.pcr
;
447 port
->tx_current_cell_rate
-= vcc
->qos
.txtp
.min_pcr
;
451 /* idle for a while to let parallel operation finish */
452 for ( i
= 0; i
< IDLE_CYCLE_NUMBER
; i
++ );
453 ((Atm_Priv
*)vcc
)->on
= 0;
459 int ppe_ioctl(struct atm_dev
*dev
, unsigned int cmd
, void *arg
)
464 int ppe_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
469 register struct tx_descriptor reg_desc
;
470 struct tx_descriptor
*desc
;
474 printk("ppe_send\n");
475 printk("skb->users = %d\n", skb
->users
.counter
);
477 if ( vcc
== NULL
|| skb
== NULL
)
480 // down(&ppe_dev.sem);
482 ATM_SKB(skb
)->vcc
= vcc
;
483 conn
= find_vcc(vcc
);
485 printk("ppe_send: conn = %d\n", conn
);
494 if ( vcc
->qos
.aal
== ATM_AAL5
)
498 struct tx_inband_header
*header
;
500 /* allocate descriptor */
501 desc_base
= alloc_tx_connection(conn
);
505 //goto ALLOC_TX_CONNECTION_FAIL;
507 desc
= &ppe_dev
.dma
.tx_descriptor_base
[desc_base
];
509 /* load descriptor from memory */
513 byteoff
= (u32
)skb
->data
& (DMA_ALIGNMENT
- 1);
514 if ( skb_headroom(skb
) < byteoff
+ TX_INBAND_HEADER_LENGTH
)
516 struct sk_buff
*new_skb
;
518 printk("skb_headroom(skb) < byteoff + TX_INBAND_HEADER_LENGTH");
519 printk("skb_headroom(skb 0x%08X, skb->data 0x%08X) (%d) < byteoff (%d) + TX_INBAND_HEADER_LENGTH (%d)\n", (u32
)skb
, (u32
)skb
->data
, skb_headroom(skb
), byteoff
, TX_INBAND_HEADER_LENGTH
);
521 new_skb
= alloc_skb_tx(datalen
);
522 if ( new_skb
== NULL
)
524 printk("alloc_skb_tx: fail\n");
526 goto ALLOC_SKB_TX_FAIL
;
528 ATM_SKB(new_skb
)->vcc
= NULL
;
529 skb_put(new_skb
, datalen
);
530 memcpy(new_skb
->data
, skb
->data
, datalen
);
531 atm_free_tx_skb_vcc(skb
);
533 byteoff
= (u32
)skb
->data
& (DMA_ALIGNMENT
- 1);
537 printk("skb_headroom(skb) >= byteoff + TX_INBAND_HEADER_LENGTH");
539 printk("before skb_push, skb->data = 0x%08X", (u32
)skb
->data
);
540 skb_push(skb
, byteoff
+ TX_INBAND_HEADER_LENGTH
);
541 printk("after skb_push, skb->data = 0x%08X", (u32
)skb
->data
);
543 header
= (struct tx_inband_header
*)(u32
)skb
->data
;
544 printk("header = 0x%08X", (u32
)header
);
546 /* setup inband trailer */
549 header
->pad
= ppe_dev
.aal5
.padding_byte
;
552 /* setup cell header */
553 header
->clp
= (vcc
->atm_options
& ATM_ATMOPT_CLP
) ? 1 : 0;
554 header
->pti
= ATM_PTI_US0
;
555 header
->vci
= vcc
->vci
;
556 header
->vpi
= vcc
->vpi
;
559 /* setup descriptor */
560 reg_desc
.dataptr
= (u32
)skb
->data
>> 2;
561 reg_desc
.datalen
= datalen
;
562 reg_desc
.byteoff
= byteoff
;
565 printk("setup header, datalen = %d, byteoff = %d", reg_desc
.datalen
, reg_desc
.byteoff
);
567 UPDATE_VCC_STAT(conn
, tx_pdu
, 1);
570 atomic_inc(&vcc
->stats
->tx
);
574 /* allocate descriptor */
575 desc_base
= alloc_tx_connection(conn
);
579 goto ALLOC_TX_CONNECTION_FAIL
;
581 desc
= &ppe_dev
.dma
.tx_descriptor_base
[desc_base
];
583 /* load descriptor from memory */
586 /* if data pointer is not aligned, allocate new sk_buff */
587 if ( ((u32
)skb
->data
& (DMA_ALIGNMENT
- 1)) )
589 struct sk_buff
*new_skb
;
591 printk("skb->data not aligned\n");
593 new_skb
= alloc_skb_tx(skb
->len
);
594 if ( new_skb
== NULL
)
597 goto ALLOC_SKB_TX_FAIL
;
599 ATM_SKB(new_skb
)->vcc
= NULL
;
600 skb_put(new_skb
, skb
->len
);
601 memcpy(new_skb
->data
, skb
->data
, skb
->len
);
602 atm_free_tx_skb_vcc(skb
);
606 reg_desc
.dataptr
= (u32
)skb
->data
>> 2;
607 reg_desc
.datalen
= skb
->len
;
608 reg_desc
.byteoff
= 0;
612 atomic_inc(&vcc
->stats
->tx
);
618 printk("update descriptor send pointer, desc = 0x%08X", (u32
)desc
);
620 ppe_dev
.dma
.tx_skb_pointers
[desc_base
] = skb
;
622 dma_cache_wback((unsigned long)skb
->data
, skb
->len
);
624 mailbox_signal(conn
, 1);
626 printk("ppe_send: success");
632 printk("FIND_VCC_FAIL\n");
635 ppe_dev
.mib
.wtx_err_pdu
++;
636 atm_free_tx_skb_vcc(skb
);
641 printk("ALLOC_SKB_TX_FAIL\n");
644 if ( vcc
->qos
.aal
== ATM_AAL5
)
646 UPDATE_VCC_STAT(conn
, tx_err_pdu
, 1);
647 ppe_dev
.mib
.wtx_err_pdu
++;
650 atomic_inc(&vcc
->stats
->tx_err
);
651 atm_free_tx_skb_vcc(skb
);
655 ALLOC_TX_CONNECTION_FAIL
:
656 printk("ALLOC_TX_CONNECTION_FAIL\n");
659 if ( vcc
->qos
.aal
== ATM_AAL5
)
661 UPDATE_VCC_STAT(conn
, tx_sw_drop_pdu
, 1);
662 ppe_dev
.mib
.wtx_drop_pdu
++;
665 atomic_inc(&vcc
->stats
->tx_err
);
666 atm_free_tx_skb_vcc(skb
);
671 int ppe_send_oam(struct atm_vcc
*vcc
, void *cell
, int flags
)
674 struct uni_cell_header
*uni_cell_header
= (struct uni_cell_header
*)cell
;
677 register struct tx_descriptor reg_desc
;
678 struct tx_descriptor
*desc
;
680 printk("ppe_send_oam");
682 if ( ((uni_cell_header
->pti
== ATM_PTI_SEGF5
|| uni_cell_header
->pti
== ATM_PTI_E2EF5
)
683 && find_vpivci(uni_cell_header
->vpi
, uni_cell_header
->vci
) < 0)
684 || ((uni_cell_header
->vci
== 0x03 || uni_cell_header
->vci
== 0x04)
685 && find_vpi(uni_cell_header
->vpi
) < 0) )
688 #if OAM_TX_QUEUE_NUMBER_PER_PORT != 0
689 /* get queue ID of OAM TX queue, and the TX DMA channel ID is the same as queue ID */
690 conn
= ppe_dev
.port
[(int)vcc
->dev
->dev_data
].oam_tx_queue
;
693 conn
= find_vcc(vcc
);
696 printk("OAM not find queue\n");
700 #endif // OAM_TX_QUEUE_NUMBER_PER_PORT != 0
702 /* allocate descriptor */
703 desc_base
= alloc_tx_connection(conn
);
706 printk("OAM not alloc tx connection\n");
711 desc
= &ppe_dev
.dma
.tx_descriptor_base
[desc_base
];
713 /* load descriptor from memory */
714 reg_desc
= *(struct tx_descriptor
*)desc
;
716 /* allocate sk_buff */
717 skb
= alloc_skb_tx(CELL_SIZE
);
723 #if OAM_TX_QUEUE_NUMBER_PER_PORT != 0
724 ATM_SKB(skb
)->vcc
= NULL
;
726 ATM_SKB(skb
)->vcc
= vcc
;
727 #endif // OAM_TX_QUEUE_NUMBER_PER_PORT != 0
730 skb_put(skb
, CELL_SIZE
);
731 memcpy(skb
->data
, cell
, CELL_SIZE
);
733 /* setup descriptor */
734 reg_desc
.dataptr
= (u32
)skb
->data
>> 2;
735 reg_desc
.datalen
= CELL_SIZE
;
736 reg_desc
.byteoff
= 0;
741 /* update descriptor send pointer */
742 ppe_dev
.dma
.tx_skb_pointers
[desc_base
] = skb
;
744 /* write discriptor to memory and write back cache */
745 *(struct tx_descriptor
*)desc
= reg_desc
;
746 dma_cache_wback((unsigned long)skb
->data
, skb
->len
);
749 mailbox_signal(conn
, 1);
754 int ppe_change_qos(struct atm_vcc
*vcc
, struct atm_qos
*qos
, int flags
)
757 printk("%s:%s[%d]\n", __FILE__
, __func__
, __LINE__
);
759 if(vcc
== NULL
|| qos
== NULL
)
761 conn
= find_vcc(vcc
);
764 set_qsb(vcc
, qos
, conn
);
769 static inline void init_chip(void)
771 /* enable PPE module in PMU */
772 *(unsigned long *)0xBF10201C &= ~((1 << 15) | (1 << 13) | (1 << 9));
774 *EMA_CMDCFG
= (EMA_CMD_BUF_LEN
<< 16) | (EMA_CMD_BASE_ADDR
>> 2);
775 *EMA_DATACFG
= (EMA_DATA_BUF_LEN
<< 16) | (EMA_DATA_BASE_ADDR
>> 2);
776 *EMA_IER
= 0x000000FF;
777 *EMA_CFG
= EMA_READ_BURST
| (EMA_WRITE_BURST
<< 2);
780 *MBOX_IGU1_ISRC
= 0xFFFFFFFF;
781 *MBOX_IGU1_IER
= 0x00000000;
782 *MBOX_IGU3_ISRC
= 0xFFFFFFFF;
783 *MBOX_IGU3_IER
= 0x00000000;
786 int pp32_download_code(u32
*code_src
, unsigned int code_dword_len
, u32
*data_src
, unsigned int data_dword_len
)
791 if ( code_src
== 0 || ((unsigned long)code_src
& 0x03) != 0
792 || data_src
== 0 || ((unsigned long)data_src
& 0x03) )
795 /* save the old value of CDM_CFG and set PPE code memory to FPI bus access mode */
796 reg_old_value
= *CDM_CFG
;
797 if ( code_dword_len
<= 4096 )
798 *CDM_CFG
= CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00);
800 *CDM_CFG
= CDM_CFG_RAM1_SET(0x01) | CDM_CFG_RAM0_SET(0x00);
803 dest
= CDM_CODE_MEMORY_RAM0_ADDR(0);
804 while ( code_dword_len
-- > 0 )
805 *dest
++ = *code_src
++;
808 dest
= PP32_DATA_MEMORY_RAM1_ADDR(0);
809 while ( data_dword_len
-- > 0 )
810 *dest
++ = *data_src
++;
820 /* download firmware */
821 ret
= pp32_download_code(firmware_binary_code
, sizeof(firmware_binary_code
) / sizeof(*firmware_binary_code
), firmware_binary_data
, sizeof(firmware_binary_data
) / sizeof(*firmware_binary_data
));
826 *PP32_DBG_CTRL
= DBG_CTRL_START_SET(1);
828 /* idle for a while to let PP32 init itself */
829 for ( i
= 0; i
< IDLE_CYCLE_NUMBER
; i
++ );
837 *PP32_DBG_CTRL
= DBG_CTRL_STOP_SET(1);