[package] iwinfo: factor txpower offset into info display output, recompile if driver...
[openwrt.git] / target / linux / brcm63xx / patches-3.0 / 080-bcm6345_enet.patch
1 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
2 +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
3 @@ -29,16 +29,15 @@
4 CKCTL_6338_SAR_EN | \
5 CKCTL_6338_SPI_EN)
6
7 -#define CKCTL_6345_CPU_EN (1 << 0)
8 -#define CKCTL_6345_BUS_EN (1 << 1)
9 -#define CKCTL_6345_EBI_EN (1 << 2)
10 -#define CKCTL_6345_UART_EN (1 << 3)
11 -#define CKCTL_6345_ADSLPHY_EN (1 << 4)
12 -#define CKCTL_6345_ENET_EN (1 << 7)
13 -#define CKCTL_6345_USBH_EN (1 << 8)
14 +#define CKCTL_6345_CPU_EN (1 << 16)
15 +#define CKCTL_6345_BUS_EN (1 << 17)
16 +#define CKCTL_6345_EBI_EN (1 << 18)
17 +#define CKCTL_6345_UART_EN (1 << 19)
18 +#define CKCTL_6345_ADSLPHY_EN (1 << 20)
19 +#define CKCTL_6345_ENET_EN (1 << 23)
20 +#define CKCTL_6345_USBH_EN (1 << 24)
21
22 -#define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_ENET_EN | \
23 - CKCTL_6345_USBH_EN | \
24 +#define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_USBH_EN | \
25 CKCTL_6345_ADSLPHY_EN)
26
27 #define CKCTL_6348_ADSLPHY_EN (1 << 0)
28 @@ -548,6 +547,39 @@
29 #define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10)
30
31
32 +/* Broadcom 6345 ENET DMA definitions */
33 +#define ENETDMA_6345_CHANCFG_REG(x) (0x00 + (x) * 0x40)
34 +#define ENETDMA_6345_CHANCFG_EN_SHIFT 0
35 +#define ENETDMA_6345_CHANCFG_EN_MASK (1 << ENETDMA_6345_CHANCFG_EN_SHIFT)
36 +#define ENETDMA_6345_PKTHALT_SHIFT 1
37 +#define ENETDMA_6345_PKTHALT_MASK (1 << ENETDMA_6345_PKTHALT_SHIFT)
38 +#define ENETDMA_6345_CHAINING_SHIFT 2
39 +#define ENETDMA_6345_CHAINING_MASK (1 << ENETDMA_6345_CHAINING_SHIFT)
40 +#define ENETDMA_6345_WRAP_EN_SHIFT 3
41 +#define ENETDMA_6345_WRAP_EN_MASK (1 << ENETDMA_6345_WRAP_EN_SHIFT)
42 +#define ENETDMA_6345_FLOWC_EN_SHIFT 4
43 +#define ENETDMA_6345_FLOWC_EN_MASK (1 << ENETDMA_6345_FLOWC_EN_SHIFT)
44 +
45 +#define ENETDMA_6345_MAXBURST_REG(x) (0x04 + (x) * 0x40)
46 +
47 +#define ENETDMA_6345_RSTART_REG(x) (0x08 + (x) * 0x40)
48 +
49 +#define ENETDMA_6345_LEN_REG(x) (0x0C + (x) * 0x40)
50 +
51 +#define ENETDMA_6345_BSTAT_REG(x) (0x10 + (x) * 0x40)
52 +
53 +#define ENETDMA_6345_IR_REG(x) (0x14 + (x) * 0x40)
54 +#define ENETDMA_6345_IR_BUFDONE_MASK (1 << 0)
55 +#define ENETDMA_6345_IR_PKTDONE_MASK (1 << 1)
56 +#define ENETDMA_6345_IR_NOTOWNER_MASK (1 << 2)
57 +
58 +#define ENETDMA_6345_IRMASK_REG(x) (0x18 + (x) * 0x40)
59 +
60 +#define ENETDMA_6345_FC_REG(x) (0x1C + (x) * 0x40)
61 +
62 +#define ENETDMA_6345_BUFALLOC_REG(x) (0x20 + (x) * 0x40)
63 +
64 +
65 /*************************************************************************
66 * _REG relative to RSET_OHCI_PRIV
67 *************************************************************************/
68 --- a/drivers/net/bcm63xx_enet.c
69 +++ b/drivers/net/bcm63xx_enet.c
70 @@ -31,6 +31,7 @@
71 #include <linux/if_vlan.h>
72
73 #include <bcm63xx_dev_enet.h>
74 +#include <bcm63xx_cpu.h>
75 #include "bcm63xx_enet.h"
76
77 static char bcm_enet_driver_name[] = "bcm63xx_enet";
78 @@ -177,6 +178,7 @@ static void bcm_enet_mdio_write_mii(stru
79 static int bcm_enet_refill_rx(struct net_device *dev)
80 {
81 struct bcm_enet_priv *priv;
82 + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0;
83
84 priv = netdev_priv(dev);
85
86 @@ -205,7 +207,7 @@ static int bcm_enet_refill_rx(struct net
87 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
88 len_stat |= DMADESC_OWNER_MASK;
89 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
90 - len_stat |= DMADESC_WRAP_MASK;
91 + len_stat |= (DMADESC_WRAP_MASK >> desc_shift);
92 priv->rx_dirty_desc = 0;
93 } else {
94 priv->rx_dirty_desc++;
95 @@ -216,7 +218,10 @@ static int bcm_enet_refill_rx(struct net
96 priv->rx_desc_count++;
97
98 /* tell dma engine we allocated one buffer */
99 - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
100 + if (!BCMCPU_IS_6345())
101 + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
102 + else
103 + enet_dma_writel(priv, 1, ENETDMA_6345_BUFALLOC_REG(priv->rx_chan));
104 }
105
106 /* If rx ring is still empty, set a timer to try allocating
107 @@ -254,6 +259,7 @@ static int bcm_enet_receive_queue(struct
108 struct bcm_enet_priv *priv;
109 struct device *kdev;
110 int processed;
111 + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0;
112
113 priv = netdev_priv(dev);
114 kdev = &priv->pdev->dev;
115 @@ -292,7 +298,7 @@ static int bcm_enet_receive_queue(struct
116
117 /* if the packet does not have start of packet _and_
118 * end of packet flag set, then just recycle it */
119 - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
120 + if ((len_stat & (DMADESC_ESOP_MASK >> desc_shift)) != (DMADESC_ESOP_MASK >> desc_shift)) {
121 dev->stats.rx_dropped++;
122 continue;
123 }
124 @@ -352,8 +358,15 @@ static int bcm_enet_receive_queue(struct
125 bcm_enet_refill_rx(dev);
126
127 /* kick rx dma */
128 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
129 - ENETDMA_CHANCFG_REG(priv->rx_chan));
130 + if (!BCMCPU_IS_6345())
131 + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
132 + ENETDMA_CHANCFG_REG(priv->rx_chan));
133 + else
134 + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK |
135 + ENETDMA_6345_CHAINING_MASK |
136 + ENETDMA_6345_WRAP_EN_MASK |
137 + ENETDMA_6345_FLOWC_EN_MASK,
138 + ENETDMA_6345_CHANCFG_REG(priv->rx_chan));
139 }
140
141 return processed;
142 @@ -428,10 +441,21 @@ static int bcm_enet_poll(struct napi_str
143 dev = priv->net_dev;
144
145 /* ack interrupts */
146 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
147 - ENETDMA_IR_REG(priv->rx_chan));
148 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
149 - ENETDMA_IR_REG(priv->tx_chan));
150 + if (!BCMCPU_IS_6345()) {
151 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
152 + ENETDMA_IR_REG(priv->rx_chan));
153 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
154 + ENETDMA_IR_REG(priv->tx_chan));
155 + } else {
156 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
157 + ENETDMA_IR_PKTDONE_MASK |
158 + ENETDMA_IR_NOTOWNER_MASK,
159 + ENETDMA_6345_IR_REG(priv->rx_chan));
160 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
161 + ENETDMA_IR_PKTDONE_MASK |
162 + ENETDMA_IR_NOTOWNER_MASK,
163 + ENETDMA_6345_IR_REG(priv->tx_chan));
164 + }
165
166 /* reclaim sent skb */
167 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
168 @@ -450,10 +474,21 @@ static int bcm_enet_poll(struct napi_str
169 napi_complete(napi);
170
171 /* restore rx/tx interrupt */
172 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
173 - ENETDMA_IRMASK_REG(priv->rx_chan));
174 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
175 - ENETDMA_IRMASK_REG(priv->tx_chan));
176 + if (!BCMCPU_IS_6345()) {
177 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
178 + ENETDMA_IRMASK_REG(priv->rx_chan));
179 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
180 + ENETDMA_IRMASK_REG(priv->tx_chan));
181 + } else {
182 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
183 + ENETDMA_IR_PKTDONE_MASK |
184 + ENETDMA_IR_NOTOWNER_MASK,
185 + ENETDMA_6345_IRMASK_REG(priv->rx_chan));
186 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
187 + ENETDMA_IR_PKTDONE_MASK |
188 + ENETDMA_IR_NOTOWNER_MASK,
189 + ENETDMA_6345_IRMASK_REG(priv->tx_chan));
190 + }
191
192 return rx_work_done;
193 }
194 @@ -496,8 +531,13 @@ static irqreturn_t bcm_enet_isr_dma(int
195 priv = netdev_priv(dev);
196
197 /* mask rx/tx interrupts */
198 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
199 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
200 + if (!BCMCPU_IS_6345()) {
201 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
202 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
203 + } else {
204 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan));
205 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan));
206 + }
207
208 napi_schedule(&priv->napi);
209
210 @@ -513,6 +553,7 @@ static int bcm_enet_start_xmit(struct sk
211 struct bcm_enet_desc *desc;
212 u32 len_stat;
213 int ret;
214 + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0;
215
216 priv = netdev_priv(dev);
217
218 @@ -538,14 +579,13 @@ static int bcm_enet_start_xmit(struct sk
219 DMA_TO_DEVICE);
220
221 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
222 - len_stat |= DMADESC_ESOP_MASK |
223 - DMADESC_APPEND_CRC |
224 - DMADESC_OWNER_MASK;
225 + len_stat |= ((DMADESC_ESOP_MASK >> desc_shift) |
226 + DMADESC_APPEND_CRC | DMADESC_OWNER_MASK);
227
228 priv->tx_curr_desc++;
229 if (priv->tx_curr_desc == priv->tx_ring_size) {
230 priv->tx_curr_desc = 0;
231 - len_stat |= DMADESC_WRAP_MASK;
232 + len_stat |= (DMADESC_WRAP_MASK >> desc_shift);
233 }
234 priv->tx_desc_count--;
235
236 @@ -556,8 +596,15 @@ static int bcm_enet_start_xmit(struct sk
237 wmb();
238
239 /* kick tx dma */
240 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
241 - ENETDMA_CHANCFG_REG(priv->tx_chan));
242 + if (!BCMCPU_IS_6345())
243 + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
244 + ENETDMA_CHANCFG_REG(priv->tx_chan));
245 + else
246 + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK |
247 + ENETDMA_6345_CHAINING_MASK |
248 + ENETDMA_6345_WRAP_EN_MASK |
249 + ENETDMA_6345_FLOWC_EN_MASK,
250 + ENETDMA_6345_CHANCFG_REG(priv->tx_chan));
251
252 /* stop queue if no more desc available */
253 if (!priv->tx_desc_count)
254 @@ -685,6 +732,9 @@ static void bcm_enet_set_flow(struct bcm
255 val &= ~ENET_RXCFG_ENFLOW_MASK;
256 enet_writel(priv, val, ENET_RXCFG_REG);
257
258 + if (BCMCPU_IS_6345())
259 + return;
260 +
261 /* tx flow control (pause frame generation) */
262 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
263 if (tx_en)
264 @@ -832,8 +882,13 @@ static int bcm_enet_open(struct net_devi
265
266 /* mask all interrupts and request them */
267 enet_writel(priv, 0, ENET_IRMASK_REG);
268 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
269 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
270 + if (!BCMCPU_IS_6345()) {
271 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
272 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
273 + } else {
274 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan));
275 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan));
276 + }
277
278 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
279 if (ret)
280 @@ -912,8 +967,12 @@ static int bcm_enet_open(struct net_devi
281 priv->rx_curr_desc = 0;
282
283 /* initialize flow control buffer allocation */
284 - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
285 - ENETDMA_BUFALLOC_REG(priv->rx_chan));
286 + if (!BCMCPU_IS_6345())
287 + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
288 + ENETDMA_BUFALLOC_REG(priv->rx_chan));
289 + else
290 + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
291 + ENETDMA_6345_BUFALLOC_REG(priv->rx_chan));
292
293 if (bcm_enet_refill_rx(dev)) {
294 dev_err(kdev, "cannot allocate rx skb queue\n");
295 @@ -922,37 +981,62 @@ static int bcm_enet_open(struct net_devi
296 }
297
298 /* write rx & tx ring addresses */
299 - enet_dma_writel(priv, priv->rx_desc_dma,
300 - ENETDMA_RSTART_REG(priv->rx_chan));
301 - enet_dma_writel(priv, priv->tx_desc_dma,
302 - ENETDMA_RSTART_REG(priv->tx_chan));
303 + if (!BCMCPU_IS_6345()) {
304 + enet_dma_writel(priv, priv->rx_desc_dma,
305 + ENETDMA_RSTART_REG(priv->rx_chan));
306 + enet_dma_writel(priv, priv->tx_desc_dma,
307 + ENETDMA_RSTART_REG(priv->tx_chan));
308 + } else {
309 + enet_dma_writel(priv, priv->rx_desc_dma,
310 + ENETDMA_6345_RSTART_REG(priv->rx_chan));
311 + enet_dma_writel(priv, priv->tx_desc_dma,
312 + ENETDMA_6345_RSTART_REG(priv->tx_chan));
313 + }
314
315 /* clear remaining state ram for rx & tx channel */
316 - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
317 - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
318 - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
319 - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
320 - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
321 - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
322 + if (!BCMCPU_IS_6345()) {
323 + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
324 + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
325 + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
326 + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
327 + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
328 + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
329 + } else {
330 + enet_dma_writel(priv, 0, ENETDMA_6345_FC_REG(priv->rx_chan));
331 + enet_dma_writel(priv, 0, ENETDMA_6345_FC_REG(priv->tx_chan));
332 + }
333
334 /* set max rx/tx length */
335 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
336 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
337
338 /* set dma maximum burst len */
339 - enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
340 - ENETDMA_MAXBURST_REG(priv->rx_chan));
341 - enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
342 - ENETDMA_MAXBURST_REG(priv->tx_chan));
343 + if (!BCMCPU_IS_6345()) {
344 + enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
345 + ENETDMA_MAXBURST_REG(priv->rx_chan));
346 + enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
347 + ENETDMA_MAXBURST_REG(priv->tx_chan));
348 + } else {
349 + enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
350 + ENETDMA_6345_MAXBURST_REG(priv->rx_chan));
351 + enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
352 + ENETDMA_6345_MAXBURST_REG(priv->tx_chan));
353 + }
354
355 /* set correct transmit fifo watermark */
356 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
357
358 /* set flow control low/high threshold to 1/3 / 2/3 */
359 - val = priv->rx_ring_size / 3;
360 - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
361 - val = (priv->rx_ring_size * 2) / 3;
362 - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
363 + if (!BCMCPU_IS_6345()) {
364 + val = priv->rx_ring_size / 3;
365 + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
366 + val = (priv->rx_ring_size * 2) / 3;
367 + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
368 + } else {
369 + enet_dma_writel(priv, 5, ENETDMA_6345_FC_REG(priv->rx_chan));
370 + enet_dma_writel(priv, priv->rx_ring_size, ENETDMA_6345_LEN_REG(priv->rx_chan));
371 + enet_dma_writel(priv, priv->tx_ring_size, ENETDMA_6345_LEN_REG(priv->tx_chan));
372 + }
373
374 /* all set, enable mac and interrupts, start dma engine and
375 * kick rx dma channel */
376 @@ -960,27 +1044,58 @@ static int bcm_enet_open(struct net_devi
377 val = enet_readl(priv, ENET_CTL_REG);
378 val |= ENET_CTL_ENABLE_MASK;
379 enet_writel(priv, val, ENET_CTL_REG);
380 - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
381 - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
382 - ENETDMA_CHANCFG_REG(priv->rx_chan));
383 +
384 + if (!BCMCPU_IS_6345()) {
385 + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
386 + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
387 + ENETDMA_CHANCFG_REG(priv->rx_chan));
388 + } else {
389 + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK |
390 + ENETDMA_6345_CHAINING_MASK |
391 + ENETDMA_6345_WRAP_EN_MASK |
392 + ENETDMA_6345_FLOWC_EN_MASK,
393 + ENETDMA_6345_CHANCFG_REG(priv->rx_chan));
394 + }
395
396 /* watch "mib counters about to overflow" interrupt */
397 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
398 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
399
400 /* watch "packet transferred" interrupt in rx and tx */
401 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
402 - ENETDMA_IR_REG(priv->rx_chan));
403 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
404 - ENETDMA_IR_REG(priv->tx_chan));
405 + if (!BCMCPU_IS_6345()) {
406 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
407 + ENETDMA_IR_REG(priv->rx_chan));
408 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
409 + ENETDMA_IR_REG(priv->tx_chan));
410 + } else {
411 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
412 + ENETDMA_IR_PKTDONE_MASK |
413 + ENETDMA_IR_NOTOWNER_MASK,
414 + ENETDMA_6345_IR_REG(priv->rx_chan));
415 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
416 + ENETDMA_IR_PKTDONE_MASK |
417 + ENETDMA_IR_NOTOWNER_MASK,
418 + ENETDMA_6345_IR_REG(priv->tx_chan));
419 + }
420
421 /* make sure we enable napi before rx interrupt */
422 napi_enable(&priv->napi);
423
424 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
425 - ENETDMA_IRMASK_REG(priv->rx_chan));
426 - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
427 - ENETDMA_IRMASK_REG(priv->tx_chan));
428 + if (!BCMCPU_IS_6345()) {
429 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
430 + ENETDMA_IRMASK_REG(priv->rx_chan));
431 + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
432 + ENETDMA_IRMASK_REG(priv->tx_chan));
433 + } else {
434 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
435 + ENETDMA_IR_PKTDONE_MASK |
436 + ENETDMA_IR_NOTOWNER_MASK,
437 + ENETDMA_6345_IRMASK_REG(priv->rx_chan));
438 + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK |
439 + ENETDMA_IR_PKTDONE_MASK |
440 + ENETDMA_IR_NOTOWNER_MASK,
441 + ENETDMA_6345_IRMASK_REG(priv->tx_chan));
442 + }
443
444 if (priv->has_phy)
445 phy_start(priv->phydev);
446 @@ -1060,13 +1175,19 @@ static void bcm_enet_disable_dma(struct
447 {
448 int limit;
449
450 - enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
451 + if (!BCMCPU_IS_6345())
452 + enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
453 + else
454 + enet_dma_writel(priv, 0, ENETDMA_6345_CHANCFG_REG(chan));
455
456 limit = 1000;
457 do {
458 u32 val;
459
460 - val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
461 + if (!BCMCPU_IS_6345())
462 + val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
463 + else
464 + val = enet_dma_readl(priv, ENETDMA_6345_CHANCFG_REG(chan));
465 if (!(val & ENETDMA_CHANCFG_EN_MASK))
466 break;
467 udelay(1);
468 @@ -1093,8 +1214,13 @@ static int bcm_enet_stop(struct net_devi
469
470 /* mask all interrupts */
471 enet_writel(priv, 0, ENET_IRMASK_REG);
472 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
473 - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
474 + if (!BCMCPU_IS_6345()) {
475 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
476 + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
477 + } else {
478 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan));
479 + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan));
480 + }
481
482 /* make sure no mib update is scheduled */
483 cancel_work_sync(&priv->mib_update_task);
484 @@ -1623,6 +1749,7 @@ static int __devinit bcm_enet_probe(stru
485 const char *clk_name;
486 unsigned int iomem_size;
487 int i, ret;
488 + unsigned int chan_offset = 0;
489
490 /* stop if shared driver failed, assume driver->probe will be
491 * called in the same order we register devices (correct ?) */
492 @@ -1662,10 +1789,13 @@ static int __devinit bcm_enet_probe(stru
493 priv->irq_tx = res_irq_tx->start;
494 priv->mac_id = pdev->id;
495
496 + if (BCMCPU_IS_6345())
497 + chan_offset = 1;
498 +
499 /* get rx & tx dma channel id for this mac */
500 if (priv->mac_id == 0) {
501 - priv->rx_chan = 0;
502 - priv->tx_chan = 1;
503 + priv->rx_chan = 0 + chan_offset;
504 + priv->tx_chan = 1 + chan_offset;
505 clk_name = "enet0";
506 } else {
507 priv->rx_chan = 2;
508 --- a/drivers/net/bcm63xx_enet.h
509 +++ b/drivers/net/bcm63xx_enet.h
510 @@ -46,6 +46,9 @@ struct bcm_enet_desc {
511 #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
512 #define DMADESC_WRAP_MASK (1 << 12)
513
514 +/* Shift down for EOP, SOP and WRAP bits */
515 +#define DMADESC_6345_SHIFT (3)
516 +
517 #define DMADESC_UNDER_MASK (1 << 9)
518 #define DMADESC_APPEND_CRC (1 << 8)
519 #define DMADESC_OVSIZE_MASK (1 << 4)
520 --- a/arch/mips/bcm63xx/dev-enet.c
521 +++ b/arch/mips/bcm63xx/dev-enet.c
522 @@ -104,7 +104,7 @@ int __init bcm63xx_enet_register(int uni
523 if (unit > 1)
524 return -ENODEV;
525
526 - if (unit == 1 && BCMCPU_IS_6338())
527 + if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
528 return -ENODEV;
529
530 if (!shared_device_registered) {
This page took 0.06183 seconds and 5 git commands to generate.