1 #include <asm/mach-ifxmips/cgu.h>
2 #include <linux/module.h>
3 #include <linux/atmdev.h>
10 struct ppe_dev ppe_dev
;
12 static int port_max_connection
[2] = {7, 7}; /* Maximum number of connections for ports (0-14) */
13 static int port_cell_rate_up
[2] = {3200, 3200}; /* Maximum TX cell rate for ports */
14 static int qsb_tau
= 1;
15 static int qsb_srvm
= 0x0f;
16 static int qsb_tstep
= 4;
17 static int write_descriptor_delay
= 0x20;
18 static int aal5_fill_pattern
= 0x007E;
19 static int aal5r_max_packet_size
= 0x0700;
20 static int aal5r_min_packet_size
= 0x0000;
21 static int aal5s_max_packet_size
= 0x0700;
22 static int aal5s_min_packet_size
= 0x0000;
23 static int aal5r_drop_error_packet
= 1;
24 static int dma_rx_descriptor_length
= 48;
25 static int dma_tx_descriptor_length
= 64;
26 static int dma_rx_clp1_descriptor_threshold
= 38;
28 //module_param(port_max_connection, "2-2i");
29 //module_param(port_cell_rate_up, "2-2i");
30 module_param(qsb_tau
, int, 0);
31 module_param(qsb_srvm
, int, 0);
32 module_param(qsb_tstep
, int, 0);
33 module_param(write_descriptor_delay
, int, 0);
34 module_param(aal5_fill_pattern
, int, 0);
35 module_param(aal5r_max_packet_size
, int, 0);
36 module_param(aal5r_min_packet_size
, int, 0);
37 module_param(aal5s_max_packet_size
, int, 0);
38 module_param(aal5s_min_packet_size
, int, 0);
39 module_param(aal5r_drop_error_packet
, int, 0);
40 module_param(dma_rx_descriptor_length
, int, 0);
41 module_param(dma_tx_descriptor_length
, int, 0);
42 module_param(dma_rx_clp1_descriptor_threshold
, int, 0);
44 MODULE_PARM_DESC(port_cell_rate_up
, "ATM port upstream rate in cells/s");
45 MODULE_PARM_DESC(port_max_connection
, "Maximum atm connection for port (0-1)");
46 MODULE_PARM_DESC(qsb_tau
, "Cell delay variation. Value must be > 0");
47 MODULE_PARM_DESC(qsb_srvm
, "Maximum burst size");
48 MODULE_PARM_DESC(qsb_tstep
, "n*32 cycles per sbs cycles n=1,2,4");
49 MODULE_PARM_DESC(write_descriptor_delay
, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
50 MODULE_PARM_DESC(a5_fill_pattern
, "Filling pattern (PAD) for AAL5 frames");
51 MODULE_PARM_DESC(aal5r_max_packet_size
, "Max packet size in byte for downstream AAL5 frames");
52 MODULE_PARM_DESC(aal5r_min_packet_size
, "Min packet size in byte for downstream AAL5 frames");
53 MODULE_PARM_DESC(aal5s_max_packet_size
, "Max packet size in byte for upstream AAL5 frames");
54 MODULE_PARM_DESC(aal5s_min_packet_size
, "Min packet size in byte for upstream AAL5 frames");
55 MODULE_PARM_DESC(aal5r_drop_error_packet
, "Non-zero value to drop error packet for downstream");
56 MODULE_PARM_DESC(dma_rx_descriptor_length
, "Number of descriptor assigned to DMA RX channel (>16)");
57 MODULE_PARM_DESC(dma_tx_descriptor_length
, "Number of descriptor assigned to DMA TX channel (>16)");
58 MODULE_PARM_DESC(dma_rx_clp1_descriptor_threshold
, "Descriptor threshold for cells with cell loss priority 1");
60 void init_rx_tables(void)
63 struct wrx_queue_config wrx_queue_config
= {0};
64 struct wrx_dma_channel_config wrx_dma_channel_config
= {0};
65 struct htu_entry htu_entry
= {0};
66 struct htu_result htu_result
= {0};
68 struct htu_mask htu_mask
= { set
: 0x03,
78 *CFG_WRX_HTUTS
= ppe_dev
.max_connections
+ OAM_HTU_ENTRY_NUMBER
;
79 *CFG_WRX_QNUM
= ppe_dev
.max_connections
+ OAM_RX_QUEUE_NUMBER
+ QSB_QUEUE_NUMBER_BASE
;
80 *CFG_WRX_DCHNUM
= ppe_dev
.dma
.rx_total_channel_used
;
81 *WRX_DMACH_ON
= (1 << ppe_dev
.dma
.rx_total_channel_used
) - 1;
82 *WRX_HUNT_BITTH
= DEFAULT_RX_HUNT_BITTH
;
85 * WRX Queue Configuration Table
87 wrx_queue_config
.uumask
= 0;
88 wrx_queue_config
.cpimask
= 0;
89 wrx_queue_config
.uuexp
= 0;
90 wrx_queue_config
.cpiexp
= 0;
91 wrx_queue_config
.mfs
= ppe_dev
.aal5
.rx_max_packet_size
; // rx_buffer_size
92 wrx_queue_config
.oversize
= ppe_dev
.aal5
.rx_max_packet_size
;
93 wrx_queue_config
.undersize
= ppe_dev
.aal5
.rx_min_packet_size
;
94 wrx_queue_config
.errdp
= ppe_dev
.aal5
.rx_drop_error_packet
;
95 for ( i
= 0; i
< QSB_QUEUE_NUMBER_BASE
; i
++ )
96 *WRX_QUEUE_CONFIG(i
) = wrx_queue_config
;
97 for ( j
= 0; j
< ppe_dev
.max_connections
; j
++ )
99 #if !defined(ENABLE_RX_QOS) || !ENABLE_RX_QOS
100 /* If RX QoS is disabled, the DMA channel must be fixed. */
101 wrx_queue_config
.dmach
= ppe_dev
.connection
[i
].rx_dma_channel
;
102 #endif // !defined(ENABLE_RX_QOS) || !ENABLE_RX_QOS
103 *WRX_QUEUE_CONFIG(i
++) = wrx_queue_config
;
106 for ( j
= 0; j
< OAM_RX_DMA_CHANNEL_NUMBER
; j
++ )
108 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
109 wrx_queue_config
.dmach
= RX_DMA_CH_OAM
;
111 wrx_queue_config
.dmach
= ppe_dev
.oam_rx_dma_channel
+ j
;
112 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
113 *WRX_QUEUE_CONFIG(i
++) = wrx_queue_config
;
116 wrx_dma_channel_config
.deslen
= ppe_dev
.dma
.rx_descriptor_number
;
117 wrx_dma_channel_config
.chrl
= 0;
118 wrx_dma_channel_config
.clp1th
= ppe_dev
.dma
.rx_clp1_desc_threshold
;
119 wrx_dma_channel_config
.mode
= WRX_DMA_CHANNEL_COUNTER_MODE
;
120 wrx_dma_channel_config
.rlcfg
= WRX_DMA_BUF_LEN_PER_DESCRIPTOR
;
121 for ( i
= 0; i
< ppe_dev
.dma
.rx_total_channel_used
; i
++ )
123 wrx_dma_channel_config
.desba
= (((u32
)ppe_dev
.dma
.rx_descriptor_base
>> 2) & 0x0FFFFFFF) + ppe_dev
.dma
.rx_descriptor_number
* i
* (sizeof(struct rx_descriptor
) >> 2);
124 *WRX_DMA_CHANNEL_CONFIG(i
) = wrx_dma_channel_config
;
130 for ( i
= 0; i
< ppe_dev
.max_connections
; i
++ )
132 htu_result
.qid
= (unsigned int)i
;
134 *HTU_ENTRY(i
+ OAM_HTU_ENTRY_NUMBER
) = htu_entry
;
135 *HTU_MASK(i
+ OAM_HTU_ENTRY_NUMBER
) = htu_mask
;
136 *HTU_RESULT(i
+ OAM_HTU_ENTRY_NUMBER
) = htu_result
;
139 htu_entry
.vci
= 0x03;
140 htu_mask
.pid_mask
= 0x03;
141 htu_mask
.vpi_mask
= 0xFF;
142 htu_mask
.vci_mask
= 0x0000;
143 htu_mask
.pti_mask
= 0x07;
144 htu_result
.cellid
= ppe_dev
.oam_rx_queue
;
147 htu_result
.qid
= ppe_dev
.oam_rx_queue
;
148 *HTU_RESULT(OAM_F4_SEG_HTU_ENTRY
) = htu_result
;
149 *HTU_MASK(OAM_F4_SEG_HTU_ENTRY
) = htu_mask
;
150 *HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY
) = htu_entry
;
151 htu_entry
.vci
= 0x04;
152 htu_result
.cellid
= ppe_dev
.oam_rx_queue
;
155 htu_result
.qid
= ppe_dev
.oam_rx_queue
;
156 *HTU_RESULT(OAM_F4_TOT_HTU_ENTRY
) = htu_result
;
157 *HTU_MASK(OAM_F4_TOT_HTU_ENTRY
) = htu_mask
;
158 *HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY
) = htu_entry
;
159 htu_entry
.vci
= 0x00;
160 htu_entry
.pti
= 0x04;
161 htu_mask
.vci_mask
= 0xFFFF;
162 htu_mask
.pti_mask
= 0x01;
163 htu_result
.cellid
= ppe_dev
.oam_rx_queue
;
166 htu_result
.qid
= ppe_dev
.oam_rx_queue
;
167 *HTU_RESULT(OAM_F5_HTU_ENTRY
) = htu_result
;
168 *HTU_MASK(OAM_F5_HTU_ENTRY
) = htu_mask
;
169 *HTU_ENTRY(OAM_F5_HTU_ENTRY
) = htu_entry
;
172 void init_tx_tables(void)
175 struct wtx_queue_config wtx_queue_config
= {0};
176 struct wtx_dma_channel_config wtx_dma_channel_config
= {0};
178 struct wtx_port_config wtx_port_config
= { res1
: 0,
185 *CFG_WTX_DCHNUM
= ppe_dev
.dma
.tx_total_channel_used
+ QSB_QUEUE_NUMBER_BASE
;
186 *WTX_DMACH_ON
= ((1 << (ppe_dev
.dma
.tx_total_channel_used
+ QSB_QUEUE_NUMBER_BASE
)) - 1) ^ ((1 << QSB_QUEUE_NUMBER_BASE
) - 1);
187 *CFG_WRDES_DELAY
= ppe_dev
.dma
.write_descriptor_delay
;
190 * WTX Port Configuration Table
192 #if !defined(DISABLE_QSB) || !DISABLE_QSB
193 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
194 *WTX_PORT_CONFIG(i
) = wtx_port_config
;
196 wtx_port_config
.qsben
= 0;
197 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
199 wtx_port_config
.qid
= ppe_dev
.port
[i
].connection_base
;
200 *WTX_PORT_CONFIG(i
) = wtx_port_config
;
202 printk("port %d: qid = %d, qsb disabled\n", i
, wtx_port_config
.qid
);
207 * WTX Queue Configuration Table
209 wtx_queue_config
.res1
= 0;
210 wtx_queue_config
.res2
= 0;
211 // wtx_queue_config.type = 0x03;
212 wtx_queue_config
.type
= 0x0;
213 #if !defined(DISABLE_QSB) || !DISABLE_QSB
214 wtx_queue_config
.qsben
= 1;
216 wtx_queue_config
.qsben
= 0;
218 wtx_queue_config
.sbid
= 0;
219 for ( i
= 0; i
< QSB_QUEUE_NUMBER_BASE
; i
++ )
220 *WTX_QUEUE_CONFIG(i
) = wtx_queue_config
;
221 for ( j
= 0; j
< ppe_dev
.max_connections
; j
++ )
223 wtx_queue_config
.sbid
= ppe_dev
.connection
[i
].port
& 0x01; /* assign QSB to TX queue */
224 *WTX_QUEUE_CONFIG(i
) = wtx_queue_config
;
228 // wtx_queue_config.type = 0x01;
229 wtx_queue_config
.type
= 0x00;
230 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
232 wtx_queue_config
.sbid
= i
& 0x01;
233 for ( j
= 0; j
< OAM_TX_QUEUE_NUMBER_PER_PORT
; j
++ )
234 *WTX_QUEUE_CONFIG(ppe_dev
.port
[i
].oam_tx_queue
+ j
) = wtx_queue_config
;
237 wtx_dma_channel_config
.mode
= WRX_DMA_CHANNEL_COUNTER_MODE
;
238 wtx_dma_channel_config
.deslen
= 0;
239 wtx_dma_channel_config
.desba
= 0;
240 for ( i
= 0; i
< QSB_QUEUE_NUMBER_BASE
; i
++ )
241 *WTX_DMA_CHANNEL_CONFIG(i
) = wtx_dma_channel_config
;
242 /* normal connection and OAM channel */
243 wtx_dma_channel_config
.deslen
= ppe_dev
.dma
.tx_descriptor_number
;
244 for ( j
= 0; j
< ppe_dev
.dma
.tx_total_channel_used
; j
++ )
246 wtx_dma_channel_config
.desba
= (((u32
)ppe_dev
.dma
.tx_descriptor_base
>> 2) & 0x0FFFFFFF) + ppe_dev
.dma
.tx_descriptor_number
* j
* (sizeof(struct tx_descriptor
) >> 2);
247 *WTX_DMA_CHANNEL_CONFIG(i
++) = wtx_dma_channel_config
;
251 static inline void qsb_global_set(void)
254 u32 qsb_clk
= cgu_get_fpi_bus_clock(2);
255 u32 tmp1
, tmp2
, tmp3
;
256 union qsb_queue_parameter_table qsb_queue_parameter_table
= {{0}};
257 union qsb_queue_vbr_parameter_table qsb_queue_vbr_parameter_table
= {{0}};
260 *QSB_ICDV
= QSB_ICDV_TAU_SET(ppe_dev
.qsb
.tau
);
261 *QSB_SBL
= QSB_SBL_SBL_SET(ppe_dev
.qsb
.sbl
);
262 *QSB_CFG
= QSB_CFG_TSTEPC_SET(ppe_dev
.qsb
.tstepc
>> 1);
265 * set SCT and SPT per port
267 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
268 if ( ppe_dev
.port
[i
].max_connections
!= 0 && ppe_dev
.port
[i
].tx_max_cell_rate
!= 0 )
270 tmp1
= ((qsb_clk
* ppe_dev
.qsb
.tstepc
) >> 1) / ppe_dev
.port
[i
].tx_max_cell_rate
;
271 tmp2
= tmp1
>> 6; /* integer value of Tsb */
272 tmp3
= (tmp1
& ((1 << 6) - 1)) + 1; /* fractional part of Tsb */
273 /* carry over to integer part (?) */
274 if ( tmp3
== (1 << 6) )
282 /* 2. write value to data transfer register */
283 /* 3. start the tranfer */
285 *QSB_RTM
= QSB_RTM_DM_SET(QSB_SET_SCT_MASK
);
286 *QSB_RTD
= QSB_RTD_TTV_SET(tmp3
);
287 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SCT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(i
& 0x01);
288 /* SPT (SBV + PN + IntRage) */
289 *QSB_RTM
= QSB_RTM_DM_SET(QSB_SET_SPT_MASK
);
290 *QSB_RTD
= QSB_RTD_TTV_SET(QSB_SPT_SBV_VALID
| QSB_SPT_PN_SET(i
& 0x01) | QSB_SPT_INTRATE_SET(tmp2
));
291 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SPT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(i
& 0x01);
297 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
298 if ( ppe_dev
.port
[i
].max_connections
!= 0 )
300 tmp1
= ((qsb_clk
* ppe_dev
.qsb
.tstepc
) >> 1) / ppe_dev
.port
[i
].tx_max_cell_rate
;
301 tmp2
= tmp1
>> 6; /* integer value of Tsb */
302 tmp3
= (tmp1
& ((1 << 6) - 1)) + 1; /* fractional part of Tsb */
303 /* carry over to integer part (?) */
304 if ( tmp3
== (1 << 6) )
312 /* 2. write value to data transfer register */
313 /* 3. start the tranfer */
315 *QSB_RTM
= QSB_RTM_DM_SET(QSB_SET_SCT_MASK
);
316 *QSB_RTD
= QSB_RTD_TTV_SET(tmp3
);
317 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SCT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(i
& 0x01);
319 /* SPT (SBV + PN + IntRage) */
320 *QSB_RTM
= QSB_RTM_DM_SET(QSB_SET_SPT_MASK
);
321 *QSB_RTD
= QSB_RTD_TTV_SET(QSB_SPT_SBV_VALID
| QSB_SPT_PN_SET(i
& 0x01) | QSB_SPT_INTRATE_SET(tmp2
));
322 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SPT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(i
& 0x01);
328 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
329 if ( ppe_dev
.port
[i
].max_connections
!= 0 )
330 for ( j
= 0; j
< OAM_TX_QUEUE_NUMBER_PER_PORT
; j
++ )
332 qsb_qid
= ppe_dev
.port
[i
].oam_tx_queue
+ j
;
334 /* disable PCR limiter */
335 qsb_queue_parameter_table
.bit
.tp
= 0;
336 /* set WFQ as real time queue */
337 qsb_queue_parameter_table
.bit
.wfqf
= 0;
338 /* disable leaky bucket shaper */
339 qsb_queue_vbr_parameter_table
.bit
.taus
= 0;
340 qsb_queue_vbr_parameter_table
.bit
.ts
= 0;
342 /* Queue Parameter Table (QPT) */
343 *QSB_RTM
= QSB_RTM_DM_SET(QSB_QPT_SET_MASK
);
344 *QSB_RTD
= QSB_RTD_TTV_SET(qsb_queue_parameter_table
.dword
);
345 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_QPT
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(qsb_qid
);
346 /* Queue VBR Paramter Table (QVPT) */
347 *QSB_RTM
= QSB_RTM_DM_SET(QSB_QVPT_SET_MASK
);
348 *QSB_RTD
= QSB_RTD_TTV_SET(qsb_queue_vbr_parameter_table
.dword
);
349 *QSB_RAMAC
= QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE
) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_VBR
) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW
) | QSB_RAMAC_TESEL_SET(qsb_qid
);
353 static inline void clear_ppe_dev(void)
357 for (i
= 0; i
< ppe_dev
.dma
.tx_total_channel_used
; i
++ )
359 int conn
= i
+ QSB_QUEUE_NUMBER_BASE
;
363 while(ppe_dev
.dma
.tx_desc_release_pos
[conn
] != ppe_dev
.dma
.tx_desc_alloc_pos
[conn
])
365 desc_base
= ppe_dev
.dma
.tx_descriptor_number
* (conn
- QSB_QUEUE_NUMBER_BASE
) + ppe_dev
.dma
.tx_desc_release_pos
[conn
];
366 if(!ppe_dev
.dma
.tx_descriptor_base
[desc_base
].own
)
368 skb
= ppe_dev
.dma
.tx_skb_pointers
[desc_base
];
369 atm_free_tx_skb_vcc(skb
);
371 // pretend PP32 hold owner bit, so that won't be released more than once, so allocation process don't check this bit
372 ppe_dev
.dma
.tx_descriptor_base
[desc_base
].own
= 1;
374 if (++ppe_dev
.dma
.tx_desc_release_pos
[conn
] == ppe_dev
.dma
.tx_descriptor_number
)
375 ppe_dev
.dma
.tx_desc_release_pos
[conn
] = 0;
379 for (i
= ppe_dev
.dma
.rx_total_channel_used
* ppe_dev
.dma
.rx_descriptor_number
- 1; i
>= 0; i
--)
380 dev_kfree_skb_any(*(struct sk_buff
**)(((ppe_dev
.dma
.rx_descriptor_base
[i
].dataptr
<< 2) | KSEG0
) - 4));
382 kfree(ppe_dev
.dma
.tx_skb_pointers
);
383 kfree(ppe_dev
.dma
.tx_descriptor_addr
);
384 kfree(ppe_dev
.dma
.rx_descriptor_addr
);
387 static inline int init_ppe_dev(void)
390 int rx_desc
, tx_desc
;
393 #if !defined(ENABLE_RX_QOS) || !ENABLE_RX_QOS
394 int rx_dma_channel_base
;
395 int rx_dma_channel_assigned
;
396 #endif // !defined(ENABLE_RX_QOS) || !ENABLE_RX_QOS
398 struct rx_descriptor rx_descriptor
= { own
: 1,
411 struct tx_descriptor tx_descriptor
= { own
: 1, // pretend it's hold by PP32
423 memset(&ppe_dev
, 0, sizeof(ppe_dev
));
426 * Setup AAL5 members, buffer size must be larger than max packet size plus overhead.
428 ppe_dev
.aal5
.padding_byte
= (u8
)aal5_fill_pattern
;
429 ppe_dev
.aal5
.rx_max_packet_size
= (u32
)aal5r_max_packet_size
;
430 ppe_dev
.aal5
.rx_min_packet_size
= (u32
)aal5r_min_packet_size
;
431 ppe_dev
.aal5
.rx_buffer_size
= ((u32
)(aal5r_max_packet_size
> CELL_SIZE
? aal5r_max_packet_size
+ MAX_RX_FRAME_EXTRA_BYTES
: CELL_SIZE
+ MAX_RX_FRAME_EXTRA_BYTES
) + DMA_ALIGNMENT
- 1) & ~(DMA_ALIGNMENT
- 1);
432 ppe_dev
.aal5
.tx_max_packet_size
= (u32
)aal5s_max_packet_size
;
433 ppe_dev
.aal5
.tx_min_packet_size
= (u32
)aal5s_min_packet_size
;
434 ppe_dev
.aal5
.tx_buffer_size
= ((u32
)(aal5s_max_packet_size
> CELL_SIZE
? aal5s_max_packet_size
+ MAX_TX_FRAME_EXTRA_BYTES
: CELL_SIZE
+ MAX_TX_FRAME_EXTRA_BYTES
) + DMA_ALIGNMENT
- 1) & ~(DMA_ALIGNMENT
- 1);
435 ppe_dev
.aal5
.rx_drop_error_packet
= aal5r_drop_error_packet
? 1 : 0;
438 * Setup QSB members, please refer to Amazon spec 15.4 to get the value calculation formula.
440 ppe_dev
.qsb
.tau
= (u32
)qsb_tau
;
441 ppe_dev
.qsb
.tstepc
= (u32
)qsb_tstep
;
442 ppe_dev
.qsb
.sbl
= (u32
)qsb_srvm
;
445 * Setup port, connection, other members.
448 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
450 /* first connection ID of port */
451 ppe_dev
.port
[i
].connection_base
= conn
+ QSB_QUEUE_NUMBER_BASE
;
452 /* max number of connections of port */
453 ppe_dev
.port
[i
].max_connections
= (u32
)port_max_connection
[i
];
454 /* max cell rate the port has */
455 ppe_dev
.port
[i
].tx_max_cell_rate
= (u32
)port_cell_rate_up
[i
];
457 /* link connection ID to port ID */
458 for ( j
= port_max_connection
[i
] - 1; j
>= 0; j
-- )
459 ppe_dev
.connection
[conn
++ + QSB_QUEUE_NUMBER_BASE
].port
= i
;
461 /* total connection numbers of all ports */
462 ppe_dev
.max_connections
= conn
;
463 /* OAM RX queue ID, which is the first available connection ID after */
464 /* connections assigned to ports. */
465 ppe_dev
.oam_rx_queue
= conn
+ QSB_QUEUE_NUMBER_BASE
;
467 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
469 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
470 if ( port_max_connection
[i
] != 0 )
472 ppe_dev
.port
[i
].oam_tx_queue
= oam_tx_queue
+ QSB_QUEUE_NUMBER_BASE
;
474 for ( j
= 0; j
< OAM_TX_QUEUE_NUMBER_PER_PORT
; j
++ )
475 /* Since connection ID is one to one mapped to RX/TX queue ID, the connection */
476 /* structure must be reserved for OAM RX/TX queues, and member "port" is set */
477 /* according to port to which OAM TX queue is connected. */
478 ppe_dev
.connection
[oam_tx_queue
++ + QSB_QUEUE_NUMBER_BASE
].port
= i
;
480 /* DMA RX channel assigned to OAM RX queue */
481 ppe_dev
.oam_rx_dma_channel
= RX_DMA_CH_OAM
;
482 /* DMA RX channel will be assigned dynamically when VCC is open. */
483 #else // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
484 rx_dma_channel_base
= 0;
486 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
487 if ( port_max_connection
[i
] != 0 )
489 /* Calculate the number of DMA RX channels could be assigned to port. */
490 rx_dma_channel_assigned
= i
== ATM_PORT_NUMBER
- 1
491 ? (MAX_RX_DMA_CHANNEL_NUMBER
- OAM_RX_DMA_CHANNEL_NUMBER
) - rx_dma_channel_base
492 : (ppe_dev
.port
[i
].max_connections
* (MAX_RX_DMA_CHANNEL_NUMBER
- OAM_RX_DMA_CHANNEL_NUMBER
) + ppe_dev
.max_connections
/ 2) / ppe_dev
.max_connections
;
493 /* Amend the number, which could be zero. */
494 if ( rx_dma_channel_assigned
== 0 )
495 rx_dma_channel_assigned
= 1;
496 /* Calculate the first DMA RX channel ID could be assigned to port. */
497 if ( rx_dma_channel_base
+ rx_dma_channel_assigned
> MAX_RX_DMA_CHANNEL_NUMBER
- OAM_RX_DMA_CHANNEL_NUMBER
)
498 rx_dma_channel_base
= MAX_RX_DMA_CHANNEL_NUMBER
- OAM_RX_DMA_CHANNEL_NUMBER
- rx_dma_channel_assigned
;
500 /* first DMA RX channel ID */
501 ppe_dev
.port
[i
].rx_dma_channel_base
= rx_dma_channel_base
;
502 /* number of DMA RX channels assigned to this port */
503 ppe_dev
.port
[i
].rx_dma_channel_assigned
= rx_dma_channel_assigned
;
504 /* OAM TX queue ID, which must be assigned after connections assigned to ports */
505 ppe_dev
.port
[i
].oam_tx_queue
= oam_tx_queue
+ QSB_QUEUE_NUMBER_BASE
;
507 rx_dma_channel_base
+= rx_dma_channel_assigned
;
509 for ( j
= 0; j
< OAM_TX_QUEUE_NUMBER_PER_PORT
; j
++ )
510 /* Since connection ID is one to one mapped to RX/TX queue ID, the connection */
511 /* structure must be reserved for OAM RX/TX queues, and member "port" is set */
512 /* according to port to which OAM TX queue is connected. */
513 ppe_dev
.connection
[oam_tx_queue
++ + QSB_QUEUE_NUMBER_BASE
].port
= i
;
515 /* DMA RX channel assigned to OAM RX queue */
516 ppe_dev
.oam_rx_dma_channel
= rx_dma_channel_base
;
518 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
519 for ( j
= 0; j
< port_max_connection
[i
]; j
++ )
520 /* Assign DMA RX channel to RX queues. One channel could be assigned to more than one queue. */
521 ppe_dev
.connection
[ppe_dev
.port
[i
].connection_base
+ j
].rx_dma_channel
= ppe_dev
.port
[i
].rx_dma_channel_base
+ j
% ppe_dev
.port
[i
].rx_dma_channel_assigned
;
522 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
524 /* initialize semaphore used by open and close */
525 sema_init(&ppe_dev
.sem
, 1);
526 /* descriptor number of RX DMA channel */
527 ppe_dev
.dma
.rx_descriptor_number
= dma_rx_descriptor_length
;
528 /* descriptor number of TX DMA channel */
529 ppe_dev
.dma
.tx_descriptor_number
= dma_tx_descriptor_length
;
530 /* If used descriptors are more than this value, cell with CLP1 is dropped. */
531 ppe_dev
.dma
.rx_clp1_desc_threshold
= dma_rx_clp1_descriptor_threshold
;
533 /* delay on descriptor write path */
534 ppe_dev
.dma
.write_descriptor_delay
= write_descriptor_delay
;
536 /* total DMA RX channel used */
537 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
538 ppe_dev
.dma
.rx_total_channel_used
= RX_DMA_CH_TOTAL
;
540 ppe_dev
.dma
.rx_total_channel_used
= rx_dma_channel_base
+ OAM_RX_DMA_CHANNEL_NUMBER
;
541 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
542 /* total DMA TX channel used (exclude channel reserved by QSB) */
543 ppe_dev
.dma
.tx_total_channel_used
= oam_tx_queue
;
545 /* allocate memory for RX descriptors */
546 ppe_dev
.dma
.rx_descriptor_addr
= kmalloc(ppe_dev
.dma
.rx_total_channel_used
* ppe_dev
.dma
.rx_descriptor_number
* sizeof(struct rx_descriptor
) + 4, GFP_KERNEL
| GFP_DMA
);
547 if ( !ppe_dev
.dma
.rx_descriptor_addr
)
548 goto RX_DESCRIPTOR_BASE_ALLOCATE_FAIL
;
549 /* do alignment (DWORD) */
550 ppe_dev
.dma
.rx_descriptor_base
= (struct rx_descriptor
*)(((u32
)ppe_dev
.dma
.rx_descriptor_addr
+ 0x03) & ~0x03);
551 ppe_dev
.dma
.rx_descriptor_base
= (struct rx_descriptor
*)((u32
)ppe_dev
.dma
.rx_descriptor_base
| KSEG1
); // no cache
553 /* allocate memory for TX descriptors */
554 ppe_dev
.dma
.tx_descriptor_addr
= kmalloc(ppe_dev
.dma
.tx_total_channel_used
* ppe_dev
.dma
.tx_descriptor_number
* sizeof(struct tx_descriptor
) + 4, GFP_KERNEL
| GFP_DMA
);
555 if ( !ppe_dev
.dma
.tx_descriptor_addr
)
556 goto TX_DESCRIPTOR_BASE_ALLOCATE_FAIL
;
557 /* do alignment (DWORD) */
558 ppe_dev
.dma
.tx_descriptor_base
= (struct tx_descriptor
*)(((u32
)ppe_dev
.dma
.tx_descriptor_addr
+ 0x03) & ~0x03);
559 ppe_dev
.dma
.tx_descriptor_base
= (struct tx_descriptor
*)((u32
)ppe_dev
.dma
.tx_descriptor_base
| KSEG1
); // no cache
560 /* allocate pointers to TX sk_buff */
561 ppe_dev
.dma
.tx_skb_pointers
= kmalloc(ppe_dev
.dma
.tx_total_channel_used
* ppe_dev
.dma
.tx_descriptor_number
* sizeof(struct sk_buff
*), GFP_KERNEL
);
562 if ( !ppe_dev
.dma
.tx_skb_pointers
)
563 goto TX_SKB_POINTER_ALLOCATE_FAIL
;
564 memset(ppe_dev
.dma
.tx_skb_pointers
, 0, ppe_dev
.dma
.tx_total_channel_used
* ppe_dev
.dma
.tx_descriptor_number
* sizeof(struct sk_buff
*));
566 /* Allocate RX sk_buff and fill up RX descriptors. */
567 rx_descriptor
.datalen
= ppe_dev
.aal5
.rx_buffer_size
;
568 for ( rx_desc
= ppe_dev
.dma
.rx_total_channel_used
* ppe_dev
.dma
.rx_descriptor_number
- 1; rx_desc
>= 0; rx_desc
-- )
571 skb
= alloc_skb_rx();
573 panic("sk buffer is used up\n");
574 rx_descriptor
.dataptr
= (u32
)skb
->data
>> 2;
575 ppe_dev
.dma
.rx_descriptor_base
[rx_desc
] = rx_descriptor
;
579 /* Fill up TX descriptors. */
580 tx_descriptor
.datalen
= ppe_dev
.aal5
.tx_buffer_size
;
581 for ( tx_desc
= ppe_dev
.dma
.tx_total_channel_used
* ppe_dev
.dma
.tx_descriptor_number
- 1; tx_desc
>= 0; tx_desc
-- )
582 ppe_dev
.dma
.tx_descriptor_base
[tx_desc
] = tx_descriptor
;
586 TX_SKB_POINTER_ALLOCATE_FAIL
:
587 kfree(ppe_dev
.dma
.tx_descriptor_addr
);
588 TX_DESCRIPTOR_BASE_ALLOCATE_FAIL
:
589 kfree(ppe_dev
.dma
.rx_descriptor_addr
);
590 RX_DESCRIPTOR_BASE_ALLOCATE_FAIL
:
595 static inline void clear_share_buffer(void)
597 volatile u32
*p
= SB_RAM0_ADDR(0);
600 /* write all zeros only */
601 for ( i
= 0; i
< SB_RAM0_DWLEN
+ SB_RAM1_DWLEN
+ SB_RAM2_DWLEN
+ SB_RAM3_DWLEN
; i
++ )
606 static inline void check_parameters(void)
609 int enabled_port_number
;
610 int unassigned_queue_number
;
611 int assigned_queue_number
;
613 enabled_port_number
= 0;
614 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
615 if ( port_max_connection
[i
] < 1 )
616 port_max_connection
[i
] = 0;
618 enabled_port_number
++;
619 /* If the max connection number of a port is not 0, the port is enabled */
620 /* and at lease two connection ID must be reserved for this port. One of */
621 /* them is used as OAM TX path. */
622 unassigned_queue_number
= MAX_QUEUE_NUMBER
- QSB_QUEUE_NUMBER_BASE
;
623 for ( i
= 0; i
< ATM_PORT_NUMBER
; i
++ )
624 if ( port_max_connection
[i
] > 0 )
626 enabled_port_number
--;
627 assigned_queue_number
= unassigned_queue_number
- enabled_port_number
* (1 + OAM_TX_QUEUE_NUMBER_PER_PORT
) - OAM_TX_QUEUE_NUMBER_PER_PORT
;
628 if ( assigned_queue_number
> MAX_QUEUE_NUMBER_PER_PORT
- OAM_TX_QUEUE_NUMBER_PER_PORT
)
629 assigned_queue_number
= MAX_QUEUE_NUMBER_PER_PORT
- OAM_TX_QUEUE_NUMBER_PER_PORT
;
630 if ( port_max_connection
[i
] > assigned_queue_number
)
632 port_max_connection
[i
] = assigned_queue_number
;
633 unassigned_queue_number
-= assigned_queue_number
;
636 unassigned_queue_number
-= port_max_connection
[i
];
639 /* Please refer to Amazon spec 15.4 for setting these values. */
644 else if ( qsb_tstep
> 4 )
646 else if ( qsb_tstep
== 3 )
649 /* There is a delay between PPE write descriptor and descriptor is */
650 /* really stored in memory. Host also has this delay when writing */
651 /* descriptor. So PPE will use this value to determine if the write */
652 /* operation makes effect. */
653 if ( write_descriptor_delay
< 0 )
654 write_descriptor_delay
= 0;
656 if ( aal5_fill_pattern
< 0 )
657 aal5_fill_pattern
= 0;
659 aal5_fill_pattern
&= 0xFF;
661 /* Because of the limitation of length field in descriptors, the packet */
662 /* size could not be larger than 64K minus overhead size. */
663 if ( aal5r_max_packet_size
< 0 )
664 aal5r_max_packet_size
= 0;
665 else if ( aal5r_max_packet_size
>= 65536 - MAX_RX_FRAME_EXTRA_BYTES
)
666 aal5r_max_packet_size
= 65536 - MAX_RX_FRAME_EXTRA_BYTES
;
667 if ( aal5r_min_packet_size
< 0 )
668 aal5r_min_packet_size
= 0;
669 else if ( aal5r_min_packet_size
> aal5r_max_packet_size
)
670 aal5r_min_packet_size
= aal5r_max_packet_size
;
671 if ( aal5s_max_packet_size
< 0 )
672 aal5s_max_packet_size
= 0;
673 else if ( aal5s_max_packet_size
>= 65536 - MAX_TX_FRAME_EXTRA_BYTES
)
674 aal5s_max_packet_size
= 65536 - MAX_TX_FRAME_EXTRA_BYTES
;
675 if ( aal5s_min_packet_size
< 0 )
676 aal5s_min_packet_size
= 0;
677 else if ( aal5s_min_packet_size
> aal5s_max_packet_size
)
678 aal5s_min_packet_size
= aal5s_max_packet_size
;
680 if ( dma_rx_descriptor_length
< 2 )
681 dma_rx_descriptor_length
= 2;
682 if ( dma_tx_descriptor_length
< 2 )
683 dma_tx_descriptor_length
= 2;
684 if ( dma_rx_clp1_descriptor_threshold
< 0 )
685 dma_rx_clp1_descriptor_threshold
= 0;
686 else if ( dma_rx_clp1_descriptor_threshold
> dma_rx_descriptor_length
)
687 dma_rx_clp1_descriptor_threshold
= dma_rx_descriptor_length
;
690 static struct atmdev_ops ppe_atm_ops
= {
696 send_oam
: ppe_send_oam
,
697 change_qos
: ppe_change_qos
,
700 int __init
danube_ppe_init(void)
707 ret
= init_ppe_dev();
709 goto INIT_PPE_DEV_FAIL
;
711 clear_share_buffer();
714 printk("%s:%s[%d]\n", __FILE__
, __func__
, __LINE__
);
716 for ( port_num
= 0; port_num
< ATM_PORT_NUMBER
; port_num
++ )
717 if ( ppe_dev
.port
[port_num
].max_connections
!= 0 )
719 printk("%s:%s[%d]\n", __FILE__
, __func__
, __LINE__
);
720 ppe_dev
.port
[port_num
].dev
= atm_dev_register("danube_atm", &ppe_atm_ops
, -1, 0UL);
721 if ( !ppe_dev
.port
[port_num
].dev
)
723 printk("%s:%s[%d]\n", __FILE__
, __func__
, __LINE__
);
725 goto ATM_DEV_REGISTER_FAIL
;
729 printk("%s:%s[%d]\n", __FILE__
, __func__
, __LINE__
);
730 ppe_dev
.port
[port_num
].dev
->ci_range
.vpi_bits
= 8;
731 ppe_dev
.port
[port_num
].dev
->ci_range
.vci_bits
= 16;
732 ppe_dev
.port
[port_num
].dev
->link_rate
= ppe_dev
.port
[port_num
].tx_max_cell_rate
;
733 ppe_dev
.port
[port_num
].dev
->dev_data
= (void*)port_num
;
736 /* register interrupt handler */
737 ret
= request_irq(IFXMIPS_PPE_MBOX_INT
, mailbox_irq_handler
, IRQF_DISABLED
, "ppe_mailbox_isr", NULL
);
741 printk("ppe: IRQ may be occupied by ETH2 driver, please reconfig to disable it.\n");
742 goto REQUEST_IRQ_IFXMIPS_PPE_MBOX_INT_FAIL
;
744 disable_irq(IFXMIPS_PPE_MBOX_INT
);
746 #if defined(CONFIG_PCI) && defined(USE_FIX_FOR_PCI_PPE) && USE_FIX_FOR_PCI_PPE
747 ret
= request_irq(PPE_MAILBOX_IGU0_INT
, pci_fix_irq_handler
, SA_INTERRUPT
, "ppe_pci_fix_isr", NULL
);
749 printk("failed in registering mailbox 0 interrupt (pci fix)\n");
750 #endif // defined(CONFIG_PCI) && defined(USE_FIX_FOR_PCI_PPE) && USE_FIX_FOR_PCI_PPE
754 goto PP32_START_FAIL
;
757 HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY
)->vld
= 1;
758 HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY
)->vld
= 1;
759 HTU_ENTRY(OAM_F5_HTU_ENTRY
)->vld
= 1;
761 /* create proc file */
764 printk("ppe: ATM init succeeded (firmware version 1.1.0.2.1.13\n");
769 free_irq(IFXMIPS_PPE_MBOX_INT
, NULL
);
770 REQUEST_IRQ_IFXMIPS_PPE_MBOX_INT_FAIL
:
771 ATM_DEV_REGISTER_FAIL
:
774 printk("ppe: ATM init failed\n");
778 void __exit
danube_ppe_exit(void)
783 HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY
)->vld
= 0;
784 HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY
)->vld
= 0;
785 HTU_ENTRY(OAM_F5_HTU_ENTRY
)->vld
= 0;
786 /* idle for a while to finish running HTU search */
787 for (l
= 0; l
< IDLE_CYCLE_NUMBER
; l
++ );
789 free_irq(IFXMIPS_PPE_MBOX_INT
, NULL
);
790 for ( port_num
= 0; port_num
< ATM_PORT_NUMBER
; port_num
++ )
791 if ( ppe_dev
.port
[port_num
].max_connections
!= 0 )
792 atm_dev_deregister(ppe_dev
.port
[port_num
].dev
);
796 module_init(danube_ppe_init
);
797 module_exit(danube_ppe_exit
);
799 MODULE_LICENSE("GPL");