ar7: use SLUB allocator, it gives slighty more TCP throughput
[openwrt.git] / target / linux / ixp4xx / patches-2.6.24 / 033-velocity_be.patch
1 diff -Naur linux-2.6.24.orig/drivers/net/via-velocity.c linux-2.6.24/drivers/net/via-velocity.c
2 --- linux-2.6.24.orig/drivers/net/via-velocity.c 2008-01-31 23:11:26.000000000 -0600
3 +++ linux-2.6.24/drivers/net/via-velocity.c 2008-02-01 01:12:15.000000000 -0600
4 @@ -254,11 +254,31 @@
5 MODULE_LICENSE("GPL");
6 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
7
8 +/* Valid values for vdebug (additive, this is a bitmask):
9 + * 0x00 => off
10 + * 0x01 => always on
11 + * 0x02 => additional detail on tx (rx, too, if anyone implements same)
12 + * 0x04 => detail the initialization process
13 + * 0x08 => spot debug detail; to be used as developers see fit
14 + */
15 +static int vdebug = 0;
16 +
17 +/* HAIL - these macros are for the normal 0x01-type tracing... */
18 +#define HAIL(S) \
19 + if (vdebug&1) printk(KERN_NOTICE "%s\n", (S));
20 +#define HAILS(S,T) \
21 + if (vdebug&1) printk(KERN_NOTICE "%s -> status=0x%x\n", (S), (T));
22 +
23 #define VELOCITY_PARAM(N,D) \
24 static int N[MAX_UNITS]=OPTION_DEFAULT;\
25 module_param_array(N, int, NULL, 0); \
26 MODULE_PARM_DESC(N, D);
27
28 +#define VELO_DEBUG_MIN 0
29 +#define VELO_DEBUG_MAX 255
30 +#define VELO_DEBUG_DEF 0
31 +VELOCITY_PARAM(velo_debug, "Debug level");
32 +
33 #define RX_DESC_MIN 64
34 #define RX_DESC_MAX 255
35 #define RX_DESC_DEF 64
36 @@ -557,12 +577,12 @@
37 if (val == -1)
38 *opt |= (def ? flag : 0);
39 else if (val < 0 || val > 1) {
40 - printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
41 - devname, name);
42 + printk(KERN_NOTICE "via-velocity: the value of parameter %s is invalid, the valid range is (0-1)\n",
43 + name);
44 *opt |= (def ? flag : 0);
45 } else {
46 - printk(KERN_INFO "%s: set parameter %s to %s\n",
47 - devname, name, val ? "TRUE" : "FALSE");
48 + printk(KERN_INFO "via-velocity: set parameter %s to %s\n",
49 + name, val ? "TRUE" : "FALSE");
50 *opt |= (val ? flag : 0);
51 }
52 }
53 @@ -580,6 +600,7 @@
54 static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname)
55 {
56
57 + velocity_set_int_opt(&opts->velo_debug, velo_debug[index], VELO_DEBUG_MIN, VELO_DEBUG_MAX, VELO_DEBUG_DEF, "velo_debug", devname);
58 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
59 velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
60 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
61 @@ -593,6 +614,7 @@
62 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
63 velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
64 opts->numrx = (opts->numrx & ~3);
65 + vdebug = opts->velo_debug;
66 }
67
68 /**
69 @@ -608,6 +630,8 @@
70 struct mac_regs __iomem * regs = vptr->mac_regs;
71 unsigned short vid;
72
73 + HAIL("velocity_init_cam_filter");
74 +
75 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
76 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
77 WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
78 @@ -636,8 +660,10 @@
79 } else {
80 u16 temp = 0;
81 mac_set_vlan_cam(regs, 0, (u8 *) &temp);
82 - temp = 1;
83 - mac_set_vlan_cam_mask(regs, (u8 *) &temp);
84 + /* temp = 1; BE */
85 + /* mac_set_vlan_cam_mask(regs, (u8 *) &temp); BE */
86 + vptr->vCAMmask[0] |= 1; /* BE */
87 + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); /* BE */
88 }
89 }
90
91 @@ -675,13 +701,15 @@
92 struct mac_regs __iomem * regs = vptr->mac_regs;
93 int i;
94
95 + HAIL("velocity_rx_reset");
96 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;
97
98 /*
99 * Init state, all RD entries belong to the NIC
100 */
101 for (i = 0; i < vptr->options.numrx; ++i)
102 - vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
103 + /* vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; BE */
104 + vptr->rd_ring[i].rdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */
105
106 writew(vptr->options.numrx, &regs->RBRDU);
107 writel(vptr->rd_pool_dma, &regs->RDBaseLo);
108 @@ -704,12 +732,15 @@
109 struct mac_regs __iomem * regs = vptr->mac_regs;
110 int i, mii_status;
111
112 + if (vdebug&5) printk(KERN_NOTICE "velocity_init_registers: entering\n");
113 +
114 mac_wol_reset(regs);
115
116 switch (type) {
117 case VELOCITY_INIT_RESET:
118 case VELOCITY_INIT_WOL:
119
120 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: RESET or WOL\n");
121 netif_stop_queue(vptr->dev);
122
123 /*
124 @@ -737,12 +768,13 @@
125
126 case VELOCITY_INIT_COLD:
127 default:
128 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: COLD or default\n");
129 /*
130 * Do reset
131 */
132 velocity_soft_reset(vptr);
133 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: soft reset complete.\n");
134 mdelay(5);
135 -
136 mac_eeprom_reload(regs);
137 for (i = 0; i < 6; i++) {
138 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
139 @@ -760,11 +792,16 @@
140 */
141 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
142
143 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: Initializing CAM filter\n");
144 /*
145 * Init CAM filter
146 */
147 + if (vdebug&8) printk(KERN_NOTICE "velocity: spot debug: about to init CAM filters\n");
148 + mdelay(5); /* MJW - ARM processors, kernel 2.6.19 - this fixes oopses and hangs */
149 velocity_init_cam_filter(vptr);
150 + if (vdebug&8) printk(KERN_NOTICE "velocity: spot debug: init CAM filters complete\n");
151
152 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: Setting packet filter\n");
153 /*
154 * Set packet filter: Receive directed and broadcast address
155 */
156 @@ -774,10 +811,12 @@
157 * Enable MII auto-polling
158 */
159 enable_mii_autopoll(regs);
160 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: enable_mii_autopoll complete.\n");
161
162 vptr->int_mask = INT_MASK_DEF;
163
164 - writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo);
165 + /* writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo); BE */
166 + writel((vptr->rd_pool_dma), &regs->RDBaseLo); /* BE */
167 writew(vptr->options.numrx - 1, &regs->RDCSize);
168 mac_rx_queue_run(regs);
169 mac_rx_queue_wake(regs);
170 @@ -785,10 +824,13 @@
171 writew(vptr->options.numtx - 1, &regs->TDCSize);
172
173 for (i = 0; i < vptr->num_txq; i++) {
174 - writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
175 + /* writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); BE */
176 + writel((vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); /* BE */
177 mac_tx_queue_run(regs, i);
178 }
179
180 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: DMA settings complete.\n");
181 +
182 init_flow_control_register(vptr);
183
184 writel(CR0_STOP, &regs->CR0Clr);
185 @@ -807,8 +849,10 @@
186
187 enable_flow_control_ability(vptr);
188 mac_hw_mibs_init(regs);
189 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: Set interrupt mask\n");
190 mac_write_int_mask(vptr->int_mask, regs);
191 mac_clear_isr(regs);
192 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: complete.\n");
193
194 }
195 }
196 @@ -826,6 +870,7 @@
197 struct mac_regs __iomem * regs = vptr->mac_regs;
198 int i = 0;
199
200 + HAIL("velocity_soft_reset");
201 writel(CR0_SFRST, &regs->CR0Set);
202
203 for (i = 0; i < W_MAX_TIMEOUT; i++) {
204 @@ -888,6 +933,7 @@
205 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
206 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
207 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
208 + printk(KERN_INFO "BE support, misc. fixes MJW 01Jan2007 - may be unstable\n");
209 first = 0;
210 }
211
212 @@ -1104,6 +1150,7 @@
213 dma_addr_t pool_dma;
214 u8 *pool;
215
216 + HAIL("velocity_init_rings");
217 /*
218 * Allocate all RD/TD rings a single pool
219 */
220 @@ -1166,6 +1213,7 @@
221 static void velocity_free_rings(struct velocity_info *vptr)
222 {
223 int size;
224 + HAIL("velocity_free_rings");
225
226 size = vptr->options.numrx * sizeof(struct rx_desc) +
227 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
228 @@ -1182,6 +1230,7 @@
229 struct mac_regs __iomem *regs = vptr->mac_regs;
230 int avail, dirty, unusable;
231
232 + HAIL("velocity_give_many_rx_descs");
233 /*
234 * RD number must be equal to 4X per hardware spec
235 * (programming guide rev 1.20, p.13)
236 @@ -1195,7 +1244,8 @@
237 dirty = vptr->rd_dirty - unusable;
238 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
239 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
240 - vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
241 + /* vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; BE */
242 + vptr->rd_ring[dirty].rdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */
243 }
244
245 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
246 @@ -1205,12 +1255,14 @@
247 static int velocity_rx_refill(struct velocity_info *vptr)
248 {
249 int dirty = vptr->rd_dirty, done = 0, ret = 0;
250 + HAIL("velocity_rx_refill");
251
252 do {
253 struct rx_desc *rd = vptr->rd_ring + dirty;
254
255 /* Fine for an all zero Rx desc at init time as well */
256 - if (rd->rdesc0.owner == OWNED_BY_NIC)
257 + /* if (rd->rdesc0.owner == OWNED_BY_NIC) BE */
258 + if (rd->rdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */
259 break;
260
261 if (!vptr->rd_info[dirty].skb) {
262 @@ -1244,6 +1296,7 @@
263 int ret;
264 int mtu = vptr->dev->mtu;
265
266 + HAIL("velocity_init_rd_ring");
267 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
268
269 vptr->rd_info = kcalloc(vptr->options.numrx,
270 @@ -1275,6 +1328,7 @@
271 {
272 int i;
273
274 + HAIL("velocity_free_rd_ring");
275 if (vptr->rd_info == NULL)
276 return;
277
278 @@ -1314,6 +1368,7 @@
279 struct tx_desc *td;
280 struct velocity_td_info *td_info;
281
282 + HAIL("velocity_init_td_ring");
283 /* Init the TD ring entries */
284 for (j = 0; j < vptr->num_txq; j++) {
285 curr = vptr->td_pool_dma[j];
286 @@ -1350,6 +1405,7 @@
287 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);
288 int i;
289
290 + HAIL("velocity_free_td_ring_entry");
291 if (td_info == NULL)
292 return;
293
294 @@ -1379,6 +1435,7 @@
295 {
296 int i, j;
297
298 + HAIL("velocity_free_td_ring");
299 for (j = 0; j < vptr->num_txq; j++) {
300 if (vptr->td_infos[j] == NULL)
301 continue;
302 @@ -1406,34 +1463,42 @@
303 struct net_device_stats *stats = &vptr->stats;
304 int rd_curr = vptr->rd_curr;
305 int works = 0;
306 + u16 wRSR; /* BE */
307
308 + HAILS("velocity_rx_srv", status);
309 do {
310 struct rx_desc *rd = vptr->rd_ring + rd_curr;
311
312 if (!vptr->rd_info[rd_curr].skb)
313 break;
314
315 - if (rd->rdesc0.owner == OWNED_BY_NIC)
316 + /* if (rd->rdesc0.owner == OWNED_BY_NIC) BE */
317 + if (rd->rdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */
318 break;
319
320 rmb();
321
322 + wRSR = (u16)(cpu_to_le32(rd->rdesc0)); /* BE */
323 /*
324 * Don't drop CE or RL error frame although RXOK is off
325 */
326 - if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
327 + /* if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { BE */
328 + if ((wRSR & RSR_RXOK) || (!(wRSR & RSR_RXOK) && (wRSR & (RSR_CE | RSR_RL)))) { /* BE */
329 if (velocity_receive_frame(vptr, rd_curr) < 0)
330 stats->rx_dropped++;
331 } else {
332 - if (rd->rdesc0.RSR & RSR_CRC)
333 + /* if (rd->rdesc0.RSR & RSR_CRC) BE */
334 + if (wRSR & RSR_CRC) /* BE */
335 stats->rx_crc_errors++;
336 - if (rd->rdesc0.RSR & RSR_FAE)
337 + /* if (rd->rdesc0.RSR & RSR_FAE) BE */
338 + if (wRSR & RSR_FAE) /* BE */
339 stats->rx_frame_errors++;
340
341 stats->rx_dropped++;
342 }
343
344 - rd->inten = 1;
345 + /* rd->inten = 1; BE */
346 + rd->ltwo |= cpu_to_le32(BE_INT_ENABLE); /* BE */
347
348 vptr->dev->last_rx = jiffies;
349
350 @@ -1464,13 +1529,21 @@
351
352 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
353 {
354 + u8 bCSM;
355 + HAIL("velocity_rx_csum");
356 skb->ip_summed = CHECKSUM_NONE;
357
358 - if (rd->rdesc1.CSM & CSM_IPKT) {
359 - if (rd->rdesc1.CSM & CSM_IPOK) {
360 - if ((rd->rdesc1.CSM & CSM_TCPKT) ||
361 - (rd->rdesc1.CSM & CSM_UDPKT)) {
362 - if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
363 +// if (rd->rdesc1.CSM & CSM_IPKT) {
364 +// if (rd->rdesc1.CSM & CSM_IPOK) {
365 +// if ((rd->rdesc1.CSM & CSM_TCPKT) ||
366 +// (rd->rdesc1.CSM & CSM_UDPKT)) {
367 +// if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
368 + bCSM = (u8)(cpu_to_le32(rd->rdesc1) >> 16); /* BE */
369 + if (bCSM & CSM_IPKT) {
370 + if (bCSM & CSM_IPOK) {
371 + if ((bCSM & CSM_TCPKT) ||
372 + (bCSM & CSM_UDPKT)) {
373 + if (!(bCSM & CSM_TUPOK)) { /* BE */
374 return;
375 }
376 }
377 @@ -1496,9 +1569,11 @@
378 {
379 int ret = -1;
380
381 + HAIL("velocity_rx_copy");
382 if (pkt_size < rx_copybreak) {
383 struct sk_buff *new_skb;
384
385 + HAIL("velocity_rx_copy (working...)");
386 new_skb = dev_alloc_skb(pkt_size + 2);
387 if (new_skb) {
388 new_skb->dev = vptr->dev;
389 @@ -1529,10 +1604,12 @@
390 static inline void velocity_iph_realign(struct velocity_info *vptr,
391 struct sk_buff *skb, int pkt_size)
392 {
393 + HAIL("velocity_iph_realign");
394 /* FIXME - memmove ? */
395 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
396 int i;
397
398 + HAIL("velocity_iph_realign (working...)");
399 for (i = pkt_size; i >= 0; i--)
400 *(skb->data + i + 2) = *(skb->data + i);
401 skb_reserve(skb, 2);
402 @@ -1551,19 +1628,27 @@
403 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
404 {
405 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
406 + u16 pkt_len; /* BE */
407 + u16 wRSR; /* BE */
408 + struct sk_buff *skb;
409 struct net_device_stats *stats = &vptr->stats;
410 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
411 struct rx_desc *rd = &(vptr->rd_ring[idx]);
412 - int pkt_len = rd->rdesc0.len;
413 - struct sk_buff *skb;
414 + /* int pkt_len = rd->rdesc0.len BE */;
415 +
416 + pkt_len = ((cpu_to_le32(rd->rdesc0) >> 16) & 0x00003FFFUL); /* BE */
417 + wRSR = (u16)(cpu_to_le32(rd->rdesc0)); /* BE */
418
419 - if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
420 + HAIL("velocity_receive_frame");
421 + /* if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { BE */
422 + if (wRSR & (RSR_STP | RSR_EDP)) { /* BE */
423 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
424 stats->rx_length_errors++;
425 return -EINVAL;
426 }
427
428 - if (rd->rdesc0.RSR & RSR_MAR)
429 + /* if (rd->rdesc0.RSR & RSR_MAR) BE */
430 + if (wRSR & RSR_MAR) /* BE */
431 vptr->stats.multicast++;
432
433 skb = rd_info->skb;
434 @@ -1576,7 +1661,8 @@
435 */
436
437 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
438 - if (rd->rdesc0.RSR & RSR_RL) {
439 + /* if (rd->rdesc0.RSR & RSR_RL) { BE */
440 + if (wRSR & RSR_RL) { /* BE */
441 stats->rx_length_errors++;
442 return -EINVAL;
443 }
444 @@ -1620,6 +1706,7 @@
445 struct rx_desc *rd = &(vptr->rd_ring[idx]);
446 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
447
448 + HAIL("velocity_alloc_rx_buf");
449 rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);
450 if (rd_info->skb == NULL)
451 return -ENOMEM;
452 @@ -1637,10 +1724,14 @@
453 */
454
455 *((u32 *) & (rd->rdesc0)) = 0;
456 - rd->len = cpu_to_le32(vptr->rx_buf_sz);
457 - rd->inten = 1;
458 + /* rd->len = cpu_to_le32(vptr->rx_buf_sz); BE */
459 + /* rd->inten = 1; BE */
460 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
461 - rd->pa_high = 0;
462 + /* rd->pa_high = 0; BE */
463 + rd->ltwo &= cpu_to_le32(0xC000FFFFUL); /* BE */
464 + rd->ltwo |= cpu_to_le32((vptr->rx_buf_sz << 16)); /* BE */
465 + rd->ltwo |= cpu_to_le32(BE_INT_ENABLE); /* BE */
466 + rd->ltwo &= cpu_to_le32(0xFFFF0000UL); /* BE */
467 return 0;
468 }
469
470 @@ -1661,9 +1752,11 @@
471 int full = 0;
472 int idx;
473 int works = 0;
474 + u16 wTSR; /* BE */
475 struct velocity_td_info *tdinfo;
476 struct net_device_stats *stats = &vptr->stats;
477
478 + HAILS("velocity_tx_srv", status);
479 for (qnum = 0; qnum < vptr->num_txq; qnum++) {
480 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
481 idx = (idx + 1) % vptr->options.numtx) {
482 @@ -1674,22 +1767,29 @@
483 td = &(vptr->td_rings[qnum][idx]);
484 tdinfo = &(vptr->td_infos[qnum][idx]);
485
486 - if (td->tdesc0.owner == OWNED_BY_NIC)
487 + /* if (td->tdesc0.owner == OWNED_BY_NIC) BE */
488 + if (td->tdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */
489 break;
490
491 if ((works++ > 15))
492 break;
493
494 - if (td->tdesc0.TSR & TSR0_TERR) {
495 + wTSR = (u16)cpu_to_le32(td->tdesc0);
496 + /* if (td->tdesc0.TSR & TSR0_TERR) { BE */
497 + if (wTSR & TSR0_TERR) { /* BE */
498 stats->tx_errors++;
499 stats->tx_dropped++;
500 - if (td->tdesc0.TSR & TSR0_CDH)
501 + /* if (td->tdesc0.TSR & TSR0_CDH) BE */
502 + if (wTSR & TSR0_CDH) /* BE */
503 stats->tx_heartbeat_errors++;
504 - if (td->tdesc0.TSR & TSR0_CRS)
505 + /* if (td->tdesc0.TSR & TSR0_CRS) BE */
506 + if (wTSR & TSR0_CRS) /* BE */
507 stats->tx_carrier_errors++;
508 - if (td->tdesc0.TSR & TSR0_ABT)
509 + /* if (td->tdesc0.TSR & TSR0_ABT) BE */
510 + if (wTSR & TSR0_ABT) /* BE */
511 stats->tx_aborted_errors++;
512 - if (td->tdesc0.TSR & TSR0_OWC)
513 + /* if (td->tdesc0.TSR & TSR0_OWC) BE */
514 + if (wTSR & TSR0_OWC) /* BE */
515 stats->tx_window_errors++;
516 } else {
517 stats->tx_packets++;
518 @@ -1778,6 +1878,7 @@
519
520 static void velocity_error(struct velocity_info *vptr, int status)
521 {
522 + HAILS("velocity_error", status);
523
524 if (status & ISR_TXSTLI) {
525 struct mac_regs __iomem * regs = vptr->mac_regs;
526 @@ -1867,6 +1968,7 @@
527 struct sk_buff *skb = tdinfo->skb;
528 int i;
529
530 + HAIL("velocity_free_tx_buf");
531 /*
532 * Don't unmap the pre-allocated tx_bufs
533 */
534 @@ -2067,6 +2169,7 @@
535 struct velocity_td_info *tdinfo;
536 unsigned long flags;
537 int index;
538 + u32 lbufsz; /* BE */
539
540 int pktlen = skb->len;
541
542 @@ -2083,9 +2186,18 @@
543 td_ptr = &(vptr->td_rings[qnum][index]);
544 tdinfo = &(vptr->td_infos[qnum][index]);
545
546 - td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
547 - td_ptr->tdesc1.TCR = TCR0_TIC;
548 - td_ptr->td_buf[0].queue = 0;
549 + td_ptr->tdesc0 = 0x00000000UL; /* BE */
550 + td_ptr->tdesc1 = 0x00000000UL; /* BE */
551 +
552 + /* td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; BE */
553 + td_ptr->tdesc1 &= cpu_to_le32(0xfcffffffUL); /* BE */
554 + td_ptr->tdesc1 |= cpu_to_le32(((u32)TCPLS_NORMAL) << 24); /* BE */
555 +
556 + /* td_ptr->tdesc1.TCR = TCR0_TIC; BE */
557 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_TIC); /* BE */
558 +
559 + /* td_ptr->td_buf[0].queue = 0; BE */
560 + td_ptr->td_buf[0].ltwo &= cpu_to_le32(~BE_QUEUE_ENABLE); /* BE */
561
562 /*
563 * Pad short frames.
564 @@ -2097,20 +2209,36 @@
565 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
566 tdinfo->skb = skb;
567 tdinfo->skb_dma[0] = tdinfo->buf_dma;
568 - td_ptr->tdesc0.pktsize = pktlen;
569 + /* td_ptr->tdesc0.pktsize = pktlen; */
570 + td_ptr->tdesc0 &= cpu_to_le32(0xc000ffffUL); /* BE */
571 + lbufsz = pktlen; /* Assign, and make sure it's unsigned 32 bits - BE */
572 + lbufsz = lbufsz << 16; /* BE - shift over */
573 + td_ptr->tdesc0 |= cpu_to_le32(lbufsz); /* BE */
574 +
575 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
576 - td_ptr->td_buf[0].pa_high = 0;
577 - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
578 + /* td_ptr->td_buf[0].pa_high = 0; */
579 + /* td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; */
580 + td_ptr->td_buf[0].ltwo = cpu_to_le32(lbufsz); /* BE */
581 tdinfo->nskb_dma = 1;
582 - td_ptr->tdesc1.CMDZ = 2;
583 + /* td_ptr->tdesc1.CMDZ = 2; */
584 + td_ptr->tdesc1 &= cpu_to_le32(0x0fffffffUL); /* BE */
585 + td_ptr->tdesc1 |= cpu_to_le32(((u32)0x2) << 28); /* BE */
586 } else
587 #ifdef VELOCITY_ZERO_COPY_SUPPORT
588 + /*
589 + * BE - NOTE on the VELOCITY_ZERO_COPY_SUPPORT:
590 + * This block of code has NOT been patched up for BE support, as
591 + * it is certainly broken -- if it compiles at all. Since the BE
592 + * fixes depend on the broken code, attempts to convert to BE support
593 + * would almost certainly confuse more than help.
594 + */
595 if (skb_shinfo(skb)->nr_frags > 0) {
596 int nfrags = skb_shinfo(skb)->nr_frags;
597 tdinfo->skb = skb;
598 if (nfrags > 6) {
599 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
600 tdinfo->skb_dma[0] = tdinfo->buf_dma;
601 + /* BE: Er, exactly what value are we assigning in this next line? */
602 td_ptr->tdesc0.pktsize =
603 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
604 td_ptr->td_buf[0].pa_high = 0;
605 @@ -2127,6 +2255,7 @@
606 /* FIXME: support 48bit DMA later */
607 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
608 td_ptr->td_buf[i].pa_high = 0;
609 + /* BE: This next line can't be right: */
610 td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
611
612 for (i = 0; i < nfrags; i++) {
613 @@ -2144,7 +2273,7 @@
614 }
615
616 } else
617 -#endif
618 +#endif /* (broken) VELOCITY_ZERO_COPY_SUPPORT */
619 {
620 /*
621 * Map the linear network buffer into PCI space and
622 @@ -2152,19 +2281,29 @@
623 */
624 tdinfo->skb = skb;
625 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
626 - td_ptr->tdesc0.pktsize = pktlen;
627 + /* td_ptr->tdesc0.pktsize = pktlen; BE */
628 + td_ptr->tdesc0 &= cpu_to_le32(0xc000ffffUL); /* BE */
629 + lbufsz = pktlen; /* Assign, and make sure it's unsigned 32 bits - BE */
630 + lbufsz = lbufsz << 16; /* BE */
631 + td_ptr->tdesc0 |= cpu_to_le32(lbufsz); /* BE */
632 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
633 - td_ptr->td_buf[0].pa_high = 0;
634 - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
635 + /* td_ptr->td_buf[0].pa_high = 0; BE */
636 + /* td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; BE */
637 + td_ptr->td_buf[0].ltwo = cpu_to_le32(lbufsz); /* BE */
638 tdinfo->nskb_dma = 1;
639 - td_ptr->tdesc1.CMDZ = 2;
640 + /* td_ptr->tdesc1.CMDZ = 2; BE */
641 + td_ptr->tdesc1 &= cpu_to_le32(0x0fffffffUL); /* BE */
642 + td_ptr->tdesc1 |= cpu_to_le32(((u32)0x2) << 28); /* BE */
643 }
644
645 if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
646 - td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb);
647 - td_ptr->tdesc1.pqinf.priority = 0;
648 - td_ptr->tdesc1.pqinf.CFI = 0;
649 - td_ptr->tdesc1.TCR |= TCR0_VETAG;
650 + /* td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb); BE */
651 + /* td_ptr->tdesc1.pqinf.priority = 0; BE */
652 + /* td_ptr->tdesc1.pqinf.CFI = 0; BE */
653 + /* td_ptr->tdesc1.TCR |= TCR0_VETAG; BE */
654 + td_ptr->tdesc1 &= cpu_to_le32(0xFFFF0000UL); /* BE */
655 + td_ptr->tdesc1 |= cpu_to_le32(vlan_tx_tag_get(skb)); /* BE */
656 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_VETAG); /* BE */
657 }
658
659 /*
660 @@ -2174,26 +2313,34 @@
661 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
662 const struct iphdr *ip = ip_hdr(skb);
663 if (ip->protocol == IPPROTO_TCP)
664 - td_ptr->tdesc1.TCR |= TCR0_TCPCK;
665 + /* td_ptr->tdesc1.TCR |= TCR0_TCPCK; BE */
666 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_TCPCK); /* BE */
667 else if (ip->protocol == IPPROTO_UDP)
668 - td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
669 - td_ptr->tdesc1.TCR |= TCR0_IPCK;
670 - }
671 + /* td_ptr->tdesc1.TCR |= (TCR0_UDPCK); BE */
672 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_UDPCK); /* BE */
673 + /* td_ptr->tdesc1.TCR |= TCR0_IPCK; BE */
674 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_IPCK); /* BE */
675 + }
676 {
677
678 int prev = index - 1;
679
680 if (prev < 0)
681 prev = vptr->options.numtx - 1;
682 - td_ptr->tdesc0.owner = OWNED_BY_NIC;
683 + /* td_ptr->tdesc0.owner = OWNED_BY_NIC; BE */
684 + td_ptr->tdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */
685 vptr->td_used[qnum]++;
686 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
687
688 if (AVAIL_TD(vptr, qnum) < 1)
689 netif_stop_queue(dev);
690
691 - td_ptr = &(vptr->td_rings[qnum][prev]);
692 - td_ptr->td_buf[0].queue = 1;
693 + td_ptr = &(vptr->td_rings[qnum][prev]);
694 + /* td_ptr->td_buf[0].queue = 1; BE */
695 + td_ptr->td_buf[0].ltwo |= cpu_to_le32(BE_QUEUE_ENABLE); /* BE */
696 + if (vdebug&2) printk(KERN_NOTICE "velocity_xmit: (%s) len=%d idx=%d tdesc0=0x%x tdesc1=0x%x ltwo=0x%x\n",
697 + (pktlen<ETH_ZLEN) ? "short" : "normal", pktlen, index,
698 + td_ptr->tdesc0, td_ptr->tdesc1, td_ptr->td_buf[0].ltwo);
699 mac_tx_queue_wake(vptr->mac_regs, qnum);
700 }
701 dev->trans_start = jiffies;
702 @@ -2219,7 +2366,7 @@
703 u32 isr_status;
704 int max_count = 0;
705
706 -
707 + HAIL("velocity_intr");
708 spin_lock(&vptr->lock);
709 isr_status = mac_read_isr(vptr->mac_regs);
710
711 @@ -2238,7 +2385,10 @@
712
713 while (isr_status != 0) {
714 mac_write_isr(vptr->mac_regs, isr_status);
715 - if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
716 + HAILS("velocity_intr",isr_status);
717 + /* MJW - velocity_error is ALWAYS called; need to mask off some other flags */
718 + /* if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) */
719 + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI | ISR_PTX0I | ISR_ISR0)))
720 velocity_error(vptr, isr_status);
721 if (isr_status & (ISR_PRXI | ISR_PPRXI))
722 max_count += velocity_rx_srv(vptr, isr_status);
723 @@ -2276,6 +2426,7 @@
724 int i;
725 struct dev_mc_list *mclist;
726
727 + HAIL("velocity_set_multi");
728 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
729 writel(0xffffffff, &regs->MARCAM[0]);
730 writel(0xffffffff, &regs->MARCAM[4]);
731 @@ -2319,6 +2470,7 @@
732 {
733 struct velocity_info *vptr = netdev_priv(dev);
734
735 + HAIL("net_device_stats");
736 /* If the hardware is down, don't touch MII */
737 if(!netif_running(dev))
738 return &vptr->stats;
739 @@ -2363,6 +2515,7 @@
740 struct velocity_info *vptr = netdev_priv(dev);
741 int ret;
742
743 + HAIL("velocity_ioctl");
744 /* If we are asked for information and the device is power
745 saving then we need to bring the device back up to talk to it */
746
747 @@ -2581,6 +2734,8 @@
748 {
749 u16 ww;
750
751 + HAIL("velocity_mii_read");
752 + HAIL("velocity_mii_write");
753 /*
754 * Disable MIICR_MAUTO, so that mii addr can be set normally
755 */
756 diff -Naur linux-2.6.24.orig/drivers/net/via-velocity.h linux-2.6.24/drivers/net/via-velocity.h
757 --- linux-2.6.24.orig/drivers/net/via-velocity.h 2008-01-31 23:11:26.000000000 -0600
758 +++ linux-2.6.24/drivers/net/via-velocity.h 2008-02-01 01:12:15.000000000 -0600
759 @@ -196,64 +196,70 @@
760 * Receive descriptor
761 */
762
763 -struct rdesc0 {
764 - u16 RSR; /* Receive status */
765 - u16 len:14; /* Received packet length */
766 - u16 reserved:1;
767 - u16 owner:1; /* Who owns this buffer ? */
768 -};
769 -
770 -struct rdesc1 {
771 - u16 PQTAG;
772 - u8 CSM;
773 - u8 IPKT;
774 -};
775 +//struct rdesc0 {
776 +// u16 RSR; /* Receive status */
777 +// u16 len:14; /* Received packet length */
778 +// u16 reserved:1;
779 +// u16 owner:1; /* Who owns this buffer ? */
780 +//};
781 +
782 +//struct rdesc1 {
783 +// u16 PQTAG;
784 +// u8 CSM;
785 +// u8 IPKT;
786 +//};
787
788 struct rx_desc {
789 - struct rdesc0 rdesc0;
790 - struct rdesc1 rdesc1;
791 +// struct rdesc0 rdesc0;
792 +// struct rdesc1 rdesc1;
793 + u32 rdesc0;
794 + u32 rdesc1;
795 u32 pa_low; /* Low 32 bit PCI address */
796 - u16 pa_high; /* Next 16 bit PCI address (48 total) */
797 - u16 len:15; /* Frame size */
798 - u16 inten:1; /* Enable interrupt */
799 +// u16 pa_high; /* Next 16 bit PCI address (48 total) */
800 +// u16 len:15; /* Frame size */
801 +// u16 inten:1; /* Enable interrupt */
802 + u32 ltwo;
803 } __attribute__ ((__packed__));
804
805 /*
806 * Transmit descriptor
807 */
808
809 -struct tdesc0 {
810 - u16 TSR; /* Transmit status register */
811 - u16 pktsize:14; /* Size of frame */
812 - u16 reserved:1;
813 - u16 owner:1; /* Who owns the buffer */
814 -};
815 -
816 -struct pqinf { /* Priority queue info */
817 - u16 VID:12;
818 - u16 CFI:1;
819 - u16 priority:3;
820 -} __attribute__ ((__packed__));
821 -
822 -struct tdesc1 {
823 - struct pqinf pqinf;
824 - u8 TCR;
825 - u8 TCPLS:2;
826 - u8 reserved:2;
827 - u8 CMDZ:4;
828 -} __attribute__ ((__packed__));
829 +//struct tdesc0 {
830 +// u16 TSR; /* Transmit status register */
831 +// u16 pktsize:14; /* Size of frame */
832 +// u16 reserved:1;
833 +// u16 owner:1; /* Who owns the buffer */
834 +//};
835 +
836 +//struct pqinf { /* Priority queue info */
837 +// u16 VID:12;
838 +// u16 CFI:1;
839 +// u16 priority:3;
840 +//} __attribute__ ((__packed__));
841 +
842 +//struct tdesc1 {
843 +// struct pqinf pqinf;
844 +// u8 TCR;
845 +// u8 TCPLS:2;
846 +// u8 reserved:2;
847 +// u8 CMDZ:4;
848 +//} __attribute__ ((__packed__));
849
850 struct td_buf {
851 u32 pa_low;
852 - u16 pa_high;
853 - u16 bufsize:14;
854 - u16 reserved:1;
855 - u16 queue:1;
856 +// u16 pa_high;
857 +// u16 bufsize:14;
858 +// u16 reserved:1;
859 +// u16 queue:1;
860 + u32 ltwo;
861 } __attribute__ ((__packed__));
862
863 struct tx_desc {
864 - struct tdesc0 tdesc0;
865 - struct tdesc1 tdesc1;
866 +// struct tdesc0 tdesc0;
867 +// struct tdesc1 tdesc1;
868 + u32 tdesc0;
869 + u32 tdesc1;
870 struct td_buf td_buf[7];
871 };
872
873 @@ -279,6 +285,16 @@
874 OWNED_BY_NIC = 1
875 };
876
877 +/* Constants added for the BE fixes */
878 +#define BE_OWNED_BY_NIC 0x80000000UL
879 +#define BE_INT_ENABLE 0x80000000UL
880 +#define BE_QUEUE_ENABLE 0x80000000UL
881 +#define BE_TCR_TIC 0x00800000UL
882 +#define BE_TCR_VETAG 0x00200000UL
883 +#define BE_TCR_TCPCK 0x00040000UL
884 +#define BE_TCR_UDPCK 0x00080000UL
885 +#define BE_TCR_IPCK 0x00100000UL
886 +
887
888 /*
889 * MAC registers and macros.
890 @@ -1512,6 +1528,7 @@
891 };
892
893 struct velocity_opt {
894 + int velo_debug; /* debug flag */
895 int numrx; /* Number of RX descriptors */
896 int numtx; /* Number of TX descriptors */
897 enum speed_opt spd_dpx; /* Media link mode */
This page took 0.107658 seconds and 5 git commands to generate.