Prior to kernel 2.6.23, architecture path was i386, allow that when switching kernel...
[openwrt.git] / target / linux / ixp4xx / patches-2.6.23 / 012-velocity_BE.patch
1 Index: linux-2.6.23.17/drivers/net/via-velocity.c
2 ===================================================================
3 --- linux-2.6.23.17.orig/drivers/net/via-velocity.c
4 +++ linux-2.6.23.17/drivers/net/via-velocity.c
5 @@ -96,11 +96,31 @@ MODULE_AUTHOR("VIA Networking Technologi
6 MODULE_LICENSE("GPL");
7 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
8
9 +/* Valid values for vdebug (additive, this is a bitmask):
10 + * 0x00 => off
11 + * 0x01 => always on
12 + * 0x02 => additional detail on tx (rx, too, if anyone implements same)
13 + * 0x04 => detail the initialization process
14 + * 0x08 => spot debug detail; to be used as developers see fit
15 + */
16 +static int vdebug = 0;
17 +
18 +/* HAIL - these macros are for the normal 0x01-type tracing... */
19 +#define HAIL(S) \
20 + if (vdebug&1) printk(KERN_NOTICE "%s\n", (S));
21 +#define HAILS(S,T) \
22 + if (vdebug&1) printk(KERN_NOTICE "%s -> status=0x%x\n", (S), (T));
23 +
24 #define VELOCITY_PARAM(N,D) \
25 static int N[MAX_UNITS]=OPTION_DEFAULT;\
26 module_param_array(N, int, NULL, 0); \
27 MODULE_PARM_DESC(N, D);
28
29 +#define VELO_DEBUG_MIN 0
30 +#define VELO_DEBUG_MAX 255
31 +#define VELO_DEBUG_DEF 0
32 +VELOCITY_PARAM(velo_debug, "Debug level");
33 +
34 #define RX_DESC_MIN 64
35 #define RX_DESC_MAX 255
36 #define RX_DESC_DEF 64
37 @@ -385,12 +405,12 @@ static void __devinit velocity_set_int_o
38 if (val == -1)
39 *opt = def;
40 else if (val < min || val > max) {
41 - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
42 - devname, name, min, max);
43 + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "via-velocity: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
44 + name, min, max);
45 *opt = def;
46 } else {
47 - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
48 - devname, name, val);
49 + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "via-velocity: set value of parameter %s to %d\n",
50 + name, val);
51 *opt = val;
52 }
53 }
54 @@ -415,12 +435,12 @@ static void __devinit velocity_set_bool_
55 if (val == -1)
56 *opt |= (def ? flag : 0);
57 else if (val < 0 || val > 1) {
58 - printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
59 - devname, name);
60 + printk(KERN_NOTICE "via-velocity: the value of parameter %s is invalid, the valid range is (0-1)\n",
61 + name);
62 *opt |= (def ? flag : 0);
63 } else {
64 - printk(KERN_INFO "%s: set parameter %s to %s\n",
65 - devname, name, val ? "TRUE" : "FALSE");
66 + printk(KERN_INFO "via-velocity: set parameter %s to %s\n",
67 + name, val ? "TRUE" : "FALSE");
68 *opt |= (val ? flag : 0);
69 }
70 }
71 @@ -438,6 +458,7 @@ static void __devinit velocity_set_bool_
72 static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname)
73 {
74
75 + velocity_set_int_opt(&opts->velo_debug, velo_debug[index], VELO_DEBUG_MIN, VELO_DEBUG_MAX, VELO_DEBUG_DEF, "velo_debug", devname);
76 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
77 velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
78 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
79 @@ -452,6 +473,7 @@ static void __devinit velocity_get_optio
80 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
81 velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
82 opts->numrx = (opts->numrx & ~3);
83 + vdebug = opts->velo_debug;
84 }
85
86 /**
87 @@ -466,6 +488,8 @@ static void velocity_init_cam_filter(str
88 {
89 struct mac_regs __iomem * regs = vptr->mac_regs;
90
91 + HAIL("velocity_init_cam_filter");
92 +
93 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
94 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
95 WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
96 @@ -484,14 +508,12 @@ static void velocity_init_cam_filter(str
97 WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
98
99 mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM);
100 - vptr->vCAMmask[0] |= 1;
101 - mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
102 } else {
103 u16 temp = 0;
104 mac_set_cam(regs, 0, (u8 *) &temp, VELOCITY_VLAN_ID_CAM);
105 - temp = 1;
106 - mac_set_cam_mask(regs, (u8 *) &temp, VELOCITY_VLAN_ID_CAM);
107 }
108 + vptr->vCAMmask[0] |= 1;
109 + mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
110 }
111
112 /**
113 @@ -508,13 +530,15 @@ static void velocity_rx_reset(struct vel
114 struct mac_regs __iomem * regs = vptr->mac_regs;
115 int i;
116
117 + HAIL("velocity_rx_reset");
118 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;
119
120 /*
121 * Init state, all RD entries belong to the NIC
122 */
123 for (i = 0; i < vptr->options.numrx; ++i)
124 - vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
125 + /* vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; BE */
126 + vptr->rd_ring[i].rdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */
127
128 writew(vptr->options.numrx, &regs->RBRDU);
129 writel(vptr->rd_pool_dma, &regs->RDBaseLo);
130 @@ -537,12 +561,15 @@ static void velocity_init_registers(stru
131 struct mac_regs __iomem * regs = vptr->mac_regs;
132 int i, mii_status;
133
134 + if (vdebug&5) printk(KERN_NOTICE "velocity_init_registers: entering\n");
135 +
136 mac_wol_reset(regs);
137
138 switch (type) {
139 case VELOCITY_INIT_RESET:
140 case VELOCITY_INIT_WOL:
141
142 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: RESET or WOL\n");
143 netif_stop_queue(vptr->dev);
144
145 /*
146 @@ -570,12 +597,13 @@ static void velocity_init_registers(stru
147
148 case VELOCITY_INIT_COLD:
149 default:
150 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: COLD or default\n");
151 /*
152 * Do reset
153 */
154 velocity_soft_reset(vptr);
155 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: soft reset complete.\n");
156 mdelay(5);
157 -
158 mac_eeprom_reload(regs);
159 for (i = 0; i < 6; i++) {
160 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
161 @@ -593,11 +621,16 @@ static void velocity_init_registers(stru
162 */
163 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
164
165 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: Initializing CAM filter\n");
166 /*
167 * Init CAM filter
168 */
169 + if (vdebug&8) printk(KERN_NOTICE "velocity: spot debug: about to init CAM filters\n");
170 + mdelay(5); /* MJW - ARM processors, kernel 2.6.19 - this fixes oopses and hangs */
171 velocity_init_cam_filter(vptr);
172 + if (vdebug&8) printk(KERN_NOTICE "velocity: spot debug: init CAM filters complete\n");
173
174 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: Setting packet filter\n");
175 /*
176 * Set packet filter: Receive directed and broadcast address
177 */
178 @@ -607,10 +640,12 @@ static void velocity_init_registers(stru
179 * Enable MII auto-polling
180 */
181 enable_mii_autopoll(regs);
182 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: enable_mii_autopoll complete.\n");
183
184 vptr->int_mask = INT_MASK_DEF;
185
186 - writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo);
187 + /* writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo); BE */
188 + writel((vptr->rd_pool_dma), &regs->RDBaseLo); /* BE */
189 writew(vptr->options.numrx - 1, &regs->RDCSize);
190 mac_rx_queue_run(regs);
191 mac_rx_queue_wake(regs);
192 @@ -618,10 +653,13 @@ static void velocity_init_registers(stru
193 writew(vptr->options.numtx - 1, &regs->TDCSize);
194
195 for (i = 0; i < vptr->num_txq; i++) {
196 - writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
197 + /* writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); BE */
198 + writel((vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); /* BE */
199 mac_tx_queue_run(regs, i);
200 }
201
202 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: DMA settings complete.\n");
203 +
204 init_flow_control_register(vptr);
205
206 writel(CR0_STOP, &regs->CR0Clr);
207 @@ -640,8 +678,10 @@ static void velocity_init_registers(stru
208
209 enable_flow_control_ability(vptr);
210 mac_hw_mibs_init(regs);
211 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: Set interrupt mask\n");
212 mac_write_int_mask(vptr->int_mask, regs);
213 mac_clear_isr(regs);
214 + if (vdebug&4) printk(KERN_NOTICE "velocity_init_registers: complete.\n");
215
216 }
217 }
218 @@ -659,6 +699,7 @@ static int velocity_soft_reset(struct ve
219 struct mac_regs __iomem * regs = vptr->mac_regs;
220 int i = 0;
221
222 + HAIL("velocity_soft_reset");
223 writel(CR0_SFRST, &regs->CR0Set);
224
225 for (i = 0; i < W_MAX_TIMEOUT; i++) {
226 @@ -722,6 +763,7 @@ static int __devinit velocity_found1(str
227 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
228 printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
229 printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
230 + printk(KERN_INFO "BE support, misc. fixes MJW 01Jan2007 - may be unstable\n");
231 first = 0;
232 }
233
234 @@ -934,6 +976,7 @@ static int velocity_init_rings(struct ve
235 dma_addr_t pool_dma;
236 u8 *pool;
237
238 + HAIL("velocity_init_rings");
239 /*
240 * Allocate all RD/TD rings a single pool
241 */
242 @@ -996,6 +1039,7 @@ static int velocity_init_rings(struct ve
243 static void velocity_free_rings(struct velocity_info *vptr)
244 {
245 int size;
246 + HAIL("velocity_free_rings");
247
248 size = vptr->options.numrx * sizeof(struct rx_desc) +
249 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
250 @@ -1012,6 +1056,7 @@ static inline void velocity_give_many_rx
251 struct mac_regs __iomem *regs = vptr->mac_regs;
252 int avail, dirty, unusable;
253
254 + HAIL("velocity_give_many_rx_descs");
255 /*
256 * RD number must be equal to 4X per hardware spec
257 * (programming guide rev 1.20, p.13)
258 @@ -1025,7 +1070,8 @@ static inline void velocity_give_many_rx
259 dirty = vptr->rd_dirty - unusable;
260 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
261 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
262 - vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
263 + /* vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; BE */
264 + vptr->rd_ring[dirty].rdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */
265 }
266
267 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
268 @@ -1035,12 +1081,14 @@ static inline void velocity_give_many_rx
269 static int velocity_rx_refill(struct velocity_info *vptr)
270 {
271 int dirty = vptr->rd_dirty, done = 0, ret = 0;
272 + HAIL("velocity_rx_refill");
273
274 do {
275 struct rx_desc *rd = vptr->rd_ring + dirty;
276
277 /* Fine for an all zero Rx desc at init time as well */
278 - if (rd->rdesc0.owner == OWNED_BY_NIC)
279 + /* if (rd->rdesc0.owner == OWNED_BY_NIC) BE */
280 + if (rd->rdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */
281 break;
282
283 if (!vptr->rd_info[dirty].skb) {
284 @@ -1075,6 +1123,7 @@ static int velocity_init_rd_ring(struct
285 unsigned int rsize = sizeof(struct velocity_rd_info) *
286 vptr->options.numrx;
287
288 + HAIL("velocity_init_rd_ring");
289 vptr->rd_info = kmalloc(rsize, GFP_KERNEL);
290 if(vptr->rd_info == NULL)
291 goto out;
292 @@ -1104,6 +1153,7 @@ static void velocity_free_rd_ring(struct
293 {
294 int i;
295
296 + HAIL("velocity_free_rd_ring");
297 if (vptr->rd_info == NULL)
298 return;
299
300 @@ -1145,6 +1195,7 @@ static int velocity_init_td_ring(struct
301 unsigned int tsize = sizeof(struct velocity_td_info) *
302 vptr->options.numtx;
303
304 + HAIL("velocity_init_td_ring");
305 /* Init the TD ring entries */
306 for (j = 0; j < vptr->num_txq; j++) {
307 curr = vptr->td_pool_dma[j];
308 @@ -1181,6 +1232,7 @@ static void velocity_free_td_ring_entry(
309 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);
310 int i;
311
312 + HAIL("velocity_free_td_ring_entry");
313 if (td_info == NULL)
314 return;
315
316 @@ -1210,6 +1262,7 @@ static void velocity_free_td_ring(struct
317 {
318 int i, j;
319
320 + HAIL("velocity_free_td_ring");
321 for (j = 0; j < vptr->num_txq; j++) {
322 if (vptr->td_infos[j] == NULL)
323 continue;
324 @@ -1237,34 +1290,42 @@ static int velocity_rx_srv(struct veloci
325 struct net_device_stats *stats = &vptr->stats;
326 int rd_curr = vptr->rd_curr;
327 int works = 0;
328 + u16 wRSR; /* BE */
329
330 + HAILS("velocity_rx_srv", status);
331 do {
332 struct rx_desc *rd = vptr->rd_ring + rd_curr;
333
334 if (!vptr->rd_info[rd_curr].skb)
335 break;
336
337 - if (rd->rdesc0.owner == OWNED_BY_NIC)
338 + /* if (rd->rdesc0.owner == OWNED_BY_NIC) BE */
339 + if (rd->rdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */
340 break;
341
342 rmb();
343
344 + wRSR = (u16)(cpu_to_le32(rd->rdesc0)); /* BE */
345 /*
346 * Don't drop CE or RL error frame although RXOK is off
347 */
348 - if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
349 + /* if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { BE */
350 + if ((wRSR & RSR_RXOK) || (!(wRSR & RSR_RXOK) && (wRSR & (RSR_CE | RSR_RL)))) { /* BE */
351 if (velocity_receive_frame(vptr, rd_curr) < 0)
352 stats->rx_dropped++;
353 } else {
354 - if (rd->rdesc0.RSR & RSR_CRC)
355 + /* if (rd->rdesc0.RSR & RSR_CRC) BE */
356 + if (wRSR & RSR_CRC) /* BE */
357 stats->rx_crc_errors++;
358 - if (rd->rdesc0.RSR & RSR_FAE)
359 + /* if (rd->rdesc0.RSR & RSR_FAE) BE */
360 + if (wRSR & RSR_FAE) /* BE */
361 stats->rx_frame_errors++;
362
363 stats->rx_dropped++;
364 }
365
366 - rd->inten = 1;
367 + /* rd->inten = 1; BE */
368 + rd->ltwo |= cpu_to_le32(BE_INT_ENABLE); /* BE */
369
370 vptr->dev->last_rx = jiffies;
371
372 @@ -1295,13 +1356,21 @@ static int velocity_rx_srv(struct veloci
373
374 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
375 {
376 + u8 bCSM;
377 + HAIL("velocity_rx_csum");
378 skb->ip_summed = CHECKSUM_NONE;
379
380 - if (rd->rdesc1.CSM & CSM_IPKT) {
381 - if (rd->rdesc1.CSM & CSM_IPOK) {
382 - if ((rd->rdesc1.CSM & CSM_TCPKT) ||
383 - (rd->rdesc1.CSM & CSM_UDPKT)) {
384 - if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
385 +// if (rd->rdesc1.CSM & CSM_IPKT) {
386 +// if (rd->rdesc1.CSM & CSM_IPOK) {
387 +// if ((rd->rdesc1.CSM & CSM_TCPKT) ||
388 +// (rd->rdesc1.CSM & CSM_UDPKT)) {
389 +// if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
390 + bCSM = (u8)(cpu_to_le32(rd->rdesc1) >> 16); /* BE */
391 + if (bCSM & CSM_IPKT) {
392 + if (bCSM & CSM_IPOK) {
393 + if ((bCSM & CSM_TCPKT) ||
394 + (bCSM & CSM_UDPKT)) {
395 + if (!(bCSM & CSM_TUPOK)) { /* BE */
396 return;
397 }
398 }
399 @@ -1327,9 +1396,11 @@ static inline int velocity_rx_copy(struc
400 {
401 int ret = -1;
402
403 + HAIL("velocity_rx_copy");
404 if (pkt_size < rx_copybreak) {
405 struct sk_buff *new_skb;
406
407 + HAIL("velocity_rx_copy (working...)");
408 new_skb = dev_alloc_skb(pkt_size + 2);
409 if (new_skb) {
410 new_skb->dev = vptr->dev;
411 @@ -1360,10 +1431,12 @@ static inline int velocity_rx_copy(struc
412 static inline void velocity_iph_realign(struct velocity_info *vptr,
413 struct sk_buff *skb, int pkt_size)
414 {
415 + HAIL("velocity_iph_realign");
416 /* FIXME - memmove ? */
417 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
418 int i;
419
420 + HAIL("velocity_iph_realign (working...)");
421 for (i = pkt_size; i >= 0; i--)
422 *(skb->data + i + 2) = *(skb->data + i);
423 skb_reserve(skb, 2);
424 @@ -1382,19 +1455,27 @@ static inline void velocity_iph_realign(
425 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
426 {
427 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
428 + u16 pkt_len; /* BE */
429 + u16 wRSR; /* BE */
430 + struct sk_buff *skb;
431 struct net_device_stats *stats = &vptr->stats;
432 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
433 struct rx_desc *rd = &(vptr->rd_ring[idx]);
434 - int pkt_len = rd->rdesc0.len;
435 - struct sk_buff *skb;
436 + /* int pkt_len = rd->rdesc0.len BE */;
437 +
438 + pkt_len = ((cpu_to_le32(rd->rdesc0) >> 16) & 0x00003FFFUL); /* BE */
439 + wRSR = (u16)(cpu_to_le32(rd->rdesc0)); /* BE */
440
441 - if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
442 + HAIL("velocity_receive_frame");
443 + /* if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { BE */
444 + if (wRSR & (RSR_STP | RSR_EDP)) { /* BE */
445 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
446 stats->rx_length_errors++;
447 return -EINVAL;
448 }
449
450 - if (rd->rdesc0.RSR & RSR_MAR)
451 + /* if (rd->rdesc0.RSR & RSR_MAR) BE */
452 + if (wRSR & RSR_MAR) /* BE */
453 vptr->stats.multicast++;
454
455 skb = rd_info->skb;
456 @@ -1407,7 +1488,8 @@ static int velocity_receive_frame(struct
457 */
458
459 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
460 - if (rd->rdesc0.RSR & RSR_RL) {
461 + /* if (rd->rdesc0.RSR & RSR_RL) { BE */
462 + if (wRSR & RSR_RL) { /* BE */
463 stats->rx_length_errors++;
464 return -EINVAL;
465 }
466 @@ -1451,6 +1533,7 @@ static int velocity_alloc_rx_buf(struct
467 struct rx_desc *rd = &(vptr->rd_ring[idx]);
468 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
469
470 + HAIL("velocity_alloc_rx_buf");
471 rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);
472 if (rd_info->skb == NULL)
473 return -ENOMEM;
474 @@ -1468,10 +1551,14 @@ static int velocity_alloc_rx_buf(struct
475 */
476
477 *((u32 *) & (rd->rdesc0)) = 0;
478 - rd->len = cpu_to_le32(vptr->rx_buf_sz);
479 - rd->inten = 1;
480 + /* rd->len = cpu_to_le32(vptr->rx_buf_sz); BE */
481 + /* rd->inten = 1; BE */
482 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
483 - rd->pa_high = 0;
484 + /* rd->pa_high = 0; BE */
485 + rd->ltwo &= cpu_to_le32(0xC000FFFFUL); /* BE */
486 + rd->ltwo |= cpu_to_le32((vptr->rx_buf_sz << 16)); /* BE */
487 + rd->ltwo |= cpu_to_le32(BE_INT_ENABLE); /* BE */
488 + rd->ltwo &= cpu_to_le32(0xFFFF0000UL); /* BE */
489 return 0;
490 }
491
492 @@ -1492,9 +1579,11 @@ static int velocity_tx_srv(struct veloci
493 int full = 0;
494 int idx;
495 int works = 0;
496 + u16 wTSR; /* BE */
497 struct velocity_td_info *tdinfo;
498 struct net_device_stats *stats = &vptr->stats;
499
500 + HAILS("velocity_tx_srv", status);
501 for (qnum = 0; qnum < vptr->num_txq; qnum++) {
502 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
503 idx = (idx + 1) % vptr->options.numtx) {
504 @@ -1505,22 +1594,29 @@ static int velocity_tx_srv(struct veloci
505 td = &(vptr->td_rings[qnum][idx]);
506 tdinfo = &(vptr->td_infos[qnum][idx]);
507
508 - if (td->tdesc0.owner == OWNED_BY_NIC)
509 + /* if (td->tdesc0.owner == OWNED_BY_NIC) BE */
510 + if (td->tdesc0 & cpu_to_le32(BE_OWNED_BY_NIC)) /* BE */
511 break;
512
513 if ((works++ > 15))
514 break;
515
516 - if (td->tdesc0.TSR & TSR0_TERR) {
517 + wTSR = (u16)cpu_to_le32(td->tdesc0);
518 + /* if (td->tdesc0.TSR & TSR0_TERR) { BE */
519 + if (wTSR & TSR0_TERR) { /* BE */
520 stats->tx_errors++;
521 stats->tx_dropped++;
522 - if (td->tdesc0.TSR & TSR0_CDH)
523 + /* if (td->tdesc0.TSR & TSR0_CDH) BE */
524 + if (wTSR & TSR0_CDH) /* BE */
525 stats->tx_heartbeat_errors++;
526 - if (td->tdesc0.TSR & TSR0_CRS)
527 + /* if (td->tdesc0.TSR & TSR0_CRS) BE */
528 + if (wTSR & TSR0_CRS) /* BE */
529 stats->tx_carrier_errors++;
530 - if (td->tdesc0.TSR & TSR0_ABT)
531 + /* if (td->tdesc0.TSR & TSR0_ABT) BE */
532 + if (wTSR & TSR0_ABT) /* BE */
533 stats->tx_aborted_errors++;
534 - if (td->tdesc0.TSR & TSR0_OWC)
535 + /* if (td->tdesc0.TSR & TSR0_OWC) BE */
536 + if (wTSR & TSR0_OWC) /* BE */
537 stats->tx_window_errors++;
538 } else {
539 stats->tx_packets++;
540 @@ -1609,6 +1705,7 @@ static void velocity_print_link_status(s
541
542 static void velocity_error(struct velocity_info *vptr, int status)
543 {
544 + HAILS("velocity_error", status);
545
546 if (status & ISR_TXSTLI) {
547 struct mac_regs __iomem * regs = vptr->mac_regs;
548 @@ -1698,6 +1795,7 @@ static void velocity_free_tx_buf(struct
549 struct sk_buff *skb = tdinfo->skb;
550 int i;
551
552 + HAIL("velocity_free_tx_buf");
553 /*
554 * Don't unmap the pre-allocated tx_bufs
555 */
556 @@ -1901,6 +1999,7 @@ static int velocity_xmit(struct sk_buff
557 struct velocity_td_info *tdinfo;
558 unsigned long flags;
559 int index;
560 + u32 lbufsz; /* BE */
561
562 int pktlen = skb->len;
563
564 @@ -1917,9 +2016,18 @@ static int velocity_xmit(struct sk_buff
565 td_ptr = &(vptr->td_rings[qnum][index]);
566 tdinfo = &(vptr->td_infos[qnum][index]);
567
568 - td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
569 - td_ptr->tdesc1.TCR = TCR0_TIC;
570 - td_ptr->td_buf[0].queue = 0;
571 + td_ptr->tdesc0 = 0x00000000UL; /* BE */
572 + td_ptr->tdesc1 = 0x00000000UL; /* BE */
573 +
574 + /* td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; BE */
575 + td_ptr->tdesc1 &= cpu_to_le32(0xfcffffffUL); /* BE */
576 + td_ptr->tdesc1 |= cpu_to_le32(((u32)TCPLS_NORMAL) << 24); /* BE */
577 +
578 + /* td_ptr->tdesc1.TCR = TCR0_TIC; BE */
579 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_TIC); /* BE */
580 +
581 + /* td_ptr->td_buf[0].queue = 0; BE */
582 + td_ptr->td_buf[0].ltwo &= cpu_to_le32(~BE_QUEUE_ENABLE); /* BE */
583
584 /*
585 * Pad short frames.
586 @@ -1931,20 +2039,36 @@ static int velocity_xmit(struct sk_buff
587 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
588 tdinfo->skb = skb;
589 tdinfo->skb_dma[0] = tdinfo->buf_dma;
590 - td_ptr->tdesc0.pktsize = pktlen;
591 + /* td_ptr->tdesc0.pktsize = pktlen; */
592 + td_ptr->tdesc0 &= cpu_to_le32(0xc000ffffUL); /* BE */
593 + lbufsz = pktlen; /* Assign, and make sure it's unsigned 32 bits - BE */
594 + lbufsz = lbufsz << 16; /* BE - shift over */
595 + td_ptr->tdesc0 |= cpu_to_le32(lbufsz); /* BE */
596 +
597 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
598 - td_ptr->td_buf[0].pa_high = 0;
599 - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
600 + /* td_ptr->td_buf[0].pa_high = 0; */
601 + /* td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; */
602 + td_ptr->td_buf[0].ltwo = cpu_to_le32(lbufsz); /* BE */
603 tdinfo->nskb_dma = 1;
604 - td_ptr->tdesc1.CMDZ = 2;
605 + /* td_ptr->tdesc1.CMDZ = 2; */
606 + td_ptr->tdesc1 &= cpu_to_le32(0x0fffffffUL); /* BE */
607 + td_ptr->tdesc1 |= cpu_to_le32(((u32)0x2) << 28); /* BE */
608 } else
609 #ifdef VELOCITY_ZERO_COPY_SUPPORT
610 + /*
611 + * BE - NOTE on the VELOCITY_ZERO_COPY_SUPPORT:
612 + * This block of code has NOT been patched up for BE support, as
613 + * it is certainly broken -- if it compiles at all. Since the BE
614 + * fixes depend on the broken code, attempts to convert to BE support
615 + * would almost certainly confuse more than help.
616 + */
617 if (skb_shinfo(skb)->nr_frags > 0) {
618 int nfrags = skb_shinfo(skb)->nr_frags;
619 tdinfo->skb = skb;
620 if (nfrags > 6) {
621 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
622 tdinfo->skb_dma[0] = tdinfo->buf_dma;
623 + /* BE: Er, exactly what value are we assigning in this next line? */
624 td_ptr->tdesc0.pktsize =
625 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
626 td_ptr->td_buf[0].pa_high = 0;
627 @@ -1961,6 +2085,7 @@ static int velocity_xmit(struct sk_buff
628 /* FIXME: support 48bit DMA later */
629 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
630 td_ptr->td_buf[i].pa_high = 0;
631 + /* BE: This next line can't be right: */
632 td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
633
634 for (i = 0; i < nfrags; i++) {
635 @@ -1978,7 +2103,7 @@ static int velocity_xmit(struct sk_buff
636 }
637
638 } else
639 -#endif
640 +#endif /* (broken) VELOCITY_ZERO_COPY_SUPPORT */
641 {
642 /*
643 * Map the linear network buffer into PCI space and
644 @@ -1986,19 +2111,30 @@ static int velocity_xmit(struct sk_buff
645 */
646 tdinfo->skb = skb;
647 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
648 - td_ptr->tdesc0.pktsize = pktlen;
649 + /* td_ptr->tdesc0.pktsize = pktlen; BE */
650 + td_ptr->tdesc0 &= cpu_to_le32(0xc000ffffUL); /* BE */
651 + lbufsz = pktlen; /* Assign, and make sure it's unsigned 32 bits - BE */
652 + lbufsz = lbufsz << 16; /* BE */
653 + td_ptr->tdesc0 |= cpu_to_le32(lbufsz); /* BE */
654 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
655 - td_ptr->td_buf[0].pa_high = 0;
656 - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
657 + /* td_ptr->td_buf[0].pa_high = 0; BE */
658 + /* td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; BE */
659 + td_ptr->td_buf[0].ltwo = cpu_to_le32(lbufsz); /* BE */
660 +
661 tdinfo->nskb_dma = 1;
662 - td_ptr->tdesc1.CMDZ = 2;
663 + /* td_ptr->tdesc1.CMDZ = 2; BE */
664 + td_ptr->tdesc1 &= cpu_to_le32(0x0fffffffUL); /* BE */
665 + td_ptr->tdesc1 |= cpu_to_le32(((u32)0x2) << 28);/* BE */
666 }
667
668 if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
669 - td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff);
670 - td_ptr->tdesc1.pqinf.priority = 0;
671 - td_ptr->tdesc1.pqinf.CFI = 0;
672 - td_ptr->tdesc1.TCR |= TCR0_VETAG;
673 + /* td_ptr->tdesc1.pqinf.priority = 0; BE */
674 + /* td_ptr->tdesc1.pqinf.CFI = 0; BE */
675 + td_ptr->tdesc1 &= cpu_to_le32(0xFFFF0000UL); /* BE */
676 + /* td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); BE */
677 + td_ptr->tdesc1 |= cpu_to_le32((vptr->options.vid & 0xfff)); /* BE */
678 + /* td_ptr->tdesc1.TCR |= TCR0_VETAG; BE */
679 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_VETAG); /* BE */
680 }
681
682 /*
683 @@ -2008,26 +2144,34 @@ static int velocity_xmit(struct sk_buff
684 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
685 const struct iphdr *ip = ip_hdr(skb);
686 if (ip->protocol == IPPROTO_TCP)
687 - td_ptr->tdesc1.TCR |= TCR0_TCPCK;
688 + /* td_ptr->tdesc1.TCR |= TCR0_TCPCK; BE */
689 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_TCPCK); /* BE */
690 else if (ip->protocol == IPPROTO_UDP)
691 - td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
692 - td_ptr->tdesc1.TCR |= TCR0_IPCK;
693 - }
694 + /* td_ptr->tdesc1.TCR |= (TCR0_UDPCK); BE */
695 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_UDPCK); /* BE */
696 + /* td_ptr->tdesc1.TCR |= TCR0_IPCK; BE */
697 + td_ptr->tdesc1 |= cpu_to_le32(BE_TCR_IPCK); /* BE */
698 + }
699 {
700
701 int prev = index - 1;
702
703 if (prev < 0)
704 prev = vptr->options.numtx - 1;
705 - td_ptr->tdesc0.owner = OWNED_BY_NIC;
706 + /* td_ptr->tdesc0.owner = OWNED_BY_NIC; BE */
707 + td_ptr->tdesc0 |= cpu_to_le32(BE_OWNED_BY_NIC); /* BE */
708 vptr->td_used[qnum]++;
709 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
710
711 if (AVAIL_TD(vptr, qnum) < 1)
712 netif_stop_queue(dev);
713
714 - td_ptr = &(vptr->td_rings[qnum][prev]);
715 - td_ptr->td_buf[0].queue = 1;
716 + td_ptr = &(vptr->td_rings[qnum][prev]);
717 + /* td_ptr->td_buf[0].queue = 1; BE */
718 + td_ptr->td_buf[0].ltwo |= cpu_to_le32(BE_QUEUE_ENABLE); /* BE */
719 + if (vdebug&2) printk(KERN_NOTICE "velocity_xmit: (%s) len=%d idx=%d tdesc0=0x%x tdesc1=0x%x ltwo=0x%x\n",
720 + (pktlen<ETH_ZLEN) ? "short" : "normal", pktlen, index,
721 + td_ptr->tdesc0, td_ptr->tdesc1, td_ptr->td_buf[0].ltwo);
722 mac_tx_queue_wake(vptr->mac_regs, qnum);
723 }
724 dev->trans_start = jiffies;
725 @@ -2053,7 +2197,7 @@ static int velocity_intr(int irq, void *
726 u32 isr_status;
727 int max_count = 0;
728
729 -
730 + HAIL("velocity_intr");
731 spin_lock(&vptr->lock);
732 isr_status = mac_read_isr(vptr->mac_regs);
733
734 @@ -2072,7 +2216,10 @@ static int velocity_intr(int irq, void *
735
736 while (isr_status != 0) {
737 mac_write_isr(vptr->mac_regs, isr_status);
738 - if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
739 + HAILS("velocity_intr",isr_status);
740 + /* MJW - velocity_error is ALWAYS called; need to mask off some other flags */
741 + /* if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) */
742 + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI | ISR_PTX0I | ISR_ISR0)))
743 velocity_error(vptr, isr_status);
744 if (isr_status & (ISR_PRXI | ISR_PPRXI))
745 max_count += velocity_rx_srv(vptr, isr_status);
746 @@ -2110,6 +2257,7 @@ static void velocity_set_multi(struct ne
747 int i;
748 struct dev_mc_list *mclist;
749
750 + HAIL("velocity_set_multi");
751 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
752 writel(0xffffffff, &regs->MARCAM[0]);
753 writel(0xffffffff, &regs->MARCAM[4]);
754 @@ -2153,6 +2301,7 @@ static struct net_device_stats *velocity
755 {
756 struct velocity_info *vptr = netdev_priv(dev);
757
758 + HAIL("net_device_stats");
759 /* If the hardware is down, don't touch MII */
760 if(!netif_running(dev))
761 return &vptr->stats;
762 @@ -2197,6 +2346,7 @@ static int velocity_ioctl(struct net_dev
763 struct velocity_info *vptr = netdev_priv(dev);
764 int ret;
765
766 + HAIL("velocity_ioctl");
767 /* If we are asked for information and the device is power
768 saving then we need to bring the device back up to talk to it */
769
770 @@ -2415,6 +2565,7 @@ static int velocity_mii_read(struct mac_
771 {
772 u16 ww;
773
774 + HAIL("velocity_mii_read");
775 /*
776 * Disable MIICR_MAUTO, so that mii addr can be set normally
777 */
778 @@ -2451,6 +2602,7 @@ static int velocity_mii_write(struct mac
779 {
780 u16 ww;
781
782 + HAIL("velocity_mii_write");
783 /*
784 * Disable MIICR_MAUTO, so that mii addr can be set normally
785 */
786 Index: linux-2.6.23.17/drivers/net/via-velocity.h
787 ===================================================================
788 --- linux-2.6.23.17.orig/drivers/net/via-velocity.h
789 +++ linux-2.6.23.17/drivers/net/via-velocity.h
790 @@ -196,64 +196,70 @@
791 * Receive descriptor
792 */
793
794 -struct rdesc0 {
795 - u16 RSR; /* Receive status */
796 - u16 len:14; /* Received packet length */
797 - u16 reserved:1;
798 - u16 owner:1; /* Who owns this buffer ? */
799 -};
800 -
801 -struct rdesc1 {
802 - u16 PQTAG;
803 - u8 CSM;
804 - u8 IPKT;
805 -};
806 +//struct rdesc0 {
807 +// u16 RSR; /* Receive status */
808 +// u16 len:14; /* Received packet length */
809 +// u16 reserved:1;
810 +// u16 owner:1; /* Who owns this buffer ? */
811 +//};
812 +
813 +//struct rdesc1 {
814 +// u16 PQTAG;
815 +// u8 CSM;
816 +// u8 IPKT;
817 +//};
818
819 struct rx_desc {
820 - struct rdesc0 rdesc0;
821 - struct rdesc1 rdesc1;
822 +// struct rdesc0 rdesc0;
823 +// struct rdesc1 rdesc1;
824 + u32 rdesc0;
825 + u32 rdesc1;
826 u32 pa_low; /* Low 32 bit PCI address */
827 - u16 pa_high; /* Next 16 bit PCI address (48 total) */
828 - u16 len:15; /* Frame size */
829 - u16 inten:1; /* Enable interrupt */
830 +// u16 pa_high; /* Next 16 bit PCI address (48 total) */
831 +// u16 len:15; /* Frame size */
832 +// u16 inten:1; /* Enable interrupt */
833 + u32 ltwo;
834 } __attribute__ ((__packed__));
835
836 /*
837 * Transmit descriptor
838 */
839
840 -struct tdesc0 {
841 - u16 TSR; /* Transmit status register */
842 - u16 pktsize:14; /* Size of frame */
843 - u16 reserved:1;
844 - u16 owner:1; /* Who owns the buffer */
845 -};
846 -
847 -struct pqinf { /* Priority queue info */
848 - u16 VID:12;
849 - u16 CFI:1;
850 - u16 priority:3;
851 -} __attribute__ ((__packed__));
852 -
853 -struct tdesc1 {
854 - struct pqinf pqinf;
855 - u8 TCR;
856 - u8 TCPLS:2;
857 - u8 reserved:2;
858 - u8 CMDZ:4;
859 -} __attribute__ ((__packed__));
860 +//struct tdesc0 {
861 +// u16 TSR; /* Transmit status register */
862 +// u16 pktsize:14; /* Size of frame */
863 +// u16 reserved:1;
864 +// u16 owner:1; /* Who owns the buffer */
865 +//};
866 +
867 +//struct pqinf { /* Priority queue info */
868 +// u16 VID:12;
869 +// u16 CFI:1;
870 +// u16 priority:3;
871 +//} __attribute__ ((__packed__));
872 +
873 +//struct tdesc1 {
874 +// struct pqinf pqinf;
875 +// u8 TCR;
876 +// u8 TCPLS:2;
877 +// u8 reserved:2;
878 +// u8 CMDZ:4;
879 +//} __attribute__ ((__packed__));
880
881 struct td_buf {
882 u32 pa_low;
883 - u16 pa_high;
884 - u16 bufsize:14;
885 - u16 reserved:1;
886 - u16 queue:1;
887 +// u16 pa_high;
888 +// u16 bufsize:14;
889 +// u16 reserved:1;
890 +// u16 queue:1;
891 + u32 ltwo;
892 } __attribute__ ((__packed__));
893
894 struct tx_desc {
895 - struct tdesc0 tdesc0;
896 - struct tdesc1 tdesc1;
897 +// struct tdesc0 tdesc0;
898 +// struct tdesc1 tdesc1;
899 + u32 tdesc0;
900 + u32 tdesc1;
901 struct td_buf td_buf[7];
902 };
903
904 @@ -279,6 +285,16 @@ enum velocity_owner {
905 OWNED_BY_NIC = 1
906 };
907
908 +/* Constants added for the BE fixes */
909 +#define BE_OWNED_BY_NIC 0x80000000UL
910 +#define BE_INT_ENABLE 0x80000000UL
911 +#define BE_QUEUE_ENABLE 0x80000000UL
912 +#define BE_TCR_TIC 0x00800000UL
913 +#define BE_TCR_VETAG 0x00200000UL
914 +#define BE_TCR_TCPCK 0x00040000UL
915 +#define BE_TCR_UDPCK 0x00080000UL
916 +#define BE_TCR_IPCK 0x00100000UL
917 +
918
919 /*
920 * MAC registers and macros.
921 @@ -1698,6 +1714,7 @@ enum velocity_flow_cntl_type {
922 };
923
924 struct velocity_opt {
925 + int velo_debug; /* debug flag */
926 int numrx; /* Number of RX descriptors */
927 int numtx; /* Number of TX descriptors */
928 enum speed_opt spd_dpx; /* Media link mode */
This page took 0.089891 seconds and 5 git commands to generate.