1 diff -urN linux-2.6.19.2.orig/drivers/ide/cris/ide-cris.c linux-2.6.19.2.dev/drivers/ide/cris/ide-cris.c
2 --- linux-2.6.19.2.orig/drivers/ide/cris/ide-cris.c 2007-01-10 20:10:37.000000000 +0100
3 +++ linux-2.6.19.2.dev/drivers/ide/cris/ide-cris.c 2006-12-06 14:17:02.000000000 +0100
5 -/* $Id: cris-ide-driver.patch,v 1.1 2005/06/29 21:39:07 akpm Exp $
6 +/* $Id: ide-cris.c,v 1.10 2006/12/06 13:17:02 starvik Exp $
8 * Etrax specific IDE functions, like init and PIO-mode setting etc.
9 * Almost the entire ide.c is used for the rest of the Etrax ATA driver.
10 - * Copyright (c) 2000-2005 Axis Communications AB
11 + * Copyright (c) 2000-2006 Axis Communications AB
13 * Authors: Bjorn Wesen (initial version)
14 * Mikael Starvik (crisv32 port)
17 #define IDE_REGISTER_TIMEOUT 300
24 enum /* Transfer types */
27 #define ATA_PIO0_STROBE 39
28 #define ATA_PIO0_HOLD 9
32 + * On ETRAX FS, an interrupt remains latched and active until ack:ed.
33 + * Further, ATA acks are without effect as long as INTRQ is asserted, as the
34 + * corresponding ATA interrupt is continuously set to active. There will be a
35 + * clearing ack at the usual cris_ide_ack_intr call, but that serves just to
36 + * gracefully handle an actual spurious interrupt or similar situation (which
37 + * will cause an early return without further actions, see the ide_intr
40 + * However, the normal case at time of this writing is that nothing has
41 + * changed from when INTRQ was asserted until the cris_ide_ack_intr call; no
42 + * ATA registers written and no status register read, so INTRQ will *remain*
43 + * asserted, thus *another* interrupt will be latched, and will be seen as a
44 + * spurious interrupt after the "real" interrupt is serviced. With lots of
45 + * ATA traffic (as in a trivial file-copy between two drives), this will trig
46 + * the condition desc->irqs_unhandled > 99900 in
47 + * kernel/irq/spurious.c:note_interrupt and the system will halt.
49 + * To actually get rid of the interrupt corresponding to the current INTRQ
50 + * assertion, we make a second ack after the next ATA register read or write;
51 + * i.e. when INTRQ must be deasserted. At that time, we don't have the hwif
52 + * pointer available, so we need to stash a local copy (safe, because it'll be
53 + * set and cleared within the same spin_lock_irqsave region). The pointer
54 + * serves doubly as a boolean flag that an ack is needed. The caller must
55 + * NULL the pointer after the "second ack".
58 +static ide_hwif_t *hwif_to_ack;
61 cris_ide_ack_intr(ide_hwif_t* hwif)
63 - reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2,
65 + * The interrupt is shared so we need to find the interface bit number
66 + * to ack. We define the ATA I/O register addresses to have the
67 + * format of ata rw_ctrl2 register contents, conveniently holding this
70 + reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2,
71 int, hwif->io_ports[0]);
72 REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
74 + /* Prepare to ack again, see above. */
82 cris_ide_write_command(unsigned long command)
85 REG_WR_INT(ata, regi_ata, rw_ctrl2, command); /* write data to the drive's register */
88 + * Perform a pending ack if needed; see hwif_ack definition. Perhaps
89 + * we should check closer that this call is really a part of the
90 + * preparation to read the ATA status register or write to the ATA
91 + * command register (causing deassert of INTRQ; see the ATA standard),
92 + * but at time of this writing (and expected to sanely remain so), the
93 + * first ATA register activity after an cris_ide_ack_intr call is
94 + * certain to do exactly that.
97 + /* The drive may take this long to deassert INTRQ. */
99 + cris_ide_ack_intr(hwif_to_ack);
100 + hwif_to_ack = NULL;
107 reg_ata_rw_ctrl2 ctrl2 = {0};
113 return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
116 @@ -184,14 +238,14 @@
118 intr_mask.bus0 = regk_ata_yes;
119 intr_mask.bus1 = regk_ata_yes;
120 - intr_mask.bus2 = regk_ata_yes;
121 + intr_mask.bus2 = regk_ata_yes;
122 intr_mask.bus3 = regk_ata_yes;
124 REG_WR(ata, regi_ata, rw_intr_mask, intr_mask);
126 crisv32_request_dma(2, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
127 crisv32_request_dma(3, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
130 crisv32_pinmux_alloc_fixed(pinmux_ata);
131 crisv32_pinmux_alloc_fixed(pinmux_ata0);
132 crisv32_pinmux_alloc_fixed(pinmux_ata1);
133 @@ -204,14 +258,15 @@
134 DMA_ENABLE(regi_dma3);
136 DMA_WR_CMD (regi_dma2, regk_dma_set_w_size2);
137 - DMA_WR_CMD (regi_dma3, regk_dma_set_w_size2);
138 + DMA_WR_CMD (regi_dma3, regk_dma_set_w_size2);
141 static dma_descr_context mycontext __attribute__ ((__aligned__(32)));
143 #define cris_dma_descr_type dma_descr_data
144 -#define cris_pio_read regk_ata_rd
145 -#define cris_ultra_mask 0x7
146 +#define cris_pio_read (regk_ata_rd << 24)
147 +#define cris_ultra_mask 0x0 /* 0x7 for UDMA */
148 +#define IRQ ATA_INTR_VECT
149 #define MAX_DESCR_SIZE 0xffffffffUL
153 d->buf = (char*)virt_to_phys(buf);
154 d->after = d->buf + len;
156 + /* assume descriptors are consecutively placed in memory */
157 + d->next = last ? 0 : (cris_dma_descr_type*)virt_to_phys(d+1);
162 mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
163 mycontext.saved_data_buf = d->buf;
164 /* start the dma channel */
166 + flush_dma_context(&mycontext); // Cache bug workaround
167 DMA_START_CONTEXT(dir ? regi_dma3 : regi_dma2, virt_to_phys(&mycontext));
170 /* initiate a multi word dma read using PIO handshaking */
171 trf_cnt.cnt = len >> 1;
172 /* Due to a "feature" the transfer count has to be one extra word for UDMA. */
175 ctrl2.rw = dir ? regk_ata_rd : regk_ata_wr;
176 ctrl2.trf_mode = regk_ata_dma;
177 - ctrl2.hsh = type == TYPE_PIO ? regk_ata_pio :
178 + ctrl2.hsh = type == TYPE_PIO ? regk_ata_pio :
179 type == TYPE_DMA ? regk_ata_dma : regk_ata_udma;
180 ctrl2.multi = regk_ata_yes;
181 ctrl2.dma_size = regk_ata_word;
183 #define ATA_PIO0_STROBE 19
184 #define ATA_PIO0_HOLD 4
188 cris_ide_ack_intr(ide_hwif_t* hwif)
191 @@ -348,13 +407,13 @@
195 - return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy) ;
196 + return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy) ;
202 - return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy) ;
203 + return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy) ;
207 @@ -364,12 +423,12 @@
208 *data = (unsigned short)status;
209 return status & IO_MASK(R_ATA_STATUS_DATA, dav);
214 cris_ide_write_command(unsigned long command)
216 - *R_ATA_CTRL_DATA = command;
218 + *R_ATA_CTRL_DATA = command;
222 cris_ide_set_speed(int type, int setup, int strobe, int hold)
224 cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
226 return IO_FIELD(R_ATA_CTRL_DATA, addr, addr) |
227 - IO_FIELD(R_ATA_CTRL_DATA, cs0, cs0) |
228 - IO_FIELD(R_ATA_CTRL_DATA, cs1, cs1);
229 + IO_FIELD(R_ATA_CTRL_DATA, cs0, cs0 ? 0 : 1) |
230 + IO_FIELD(R_ATA_CTRL_DATA, cs1, cs1 ? 0 : 1);
235 #define cris_dma_descr_type etrax_dma_descr
236 #define cris_pio_read IO_STATE(R_ATA_CTRL_DATA, rw, read)
237 #define cris_ultra_mask 0x0
239 #define MAX_DESCR_SIZE 0x10000UL
244 d->buf = virt_to_phys(buf);
245 d->sw_len = len == MAX_DESCR_SIZE ? 0 : len;
248 + d->ctrl = last ? d_eol : 0;
249 + d->next = last ? 0 : virt_to_phys(d+1); /* assumes descr's in array */
252 static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir, int type, int len)
253 @@ -521,14 +581,14 @@
254 *R_DMA_CH2_FIRST = virt_to_phys(d);
255 *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start);
259 /* initiate a multi word dma read using DMA handshaking */
261 *R_ATA_TRANSFER_CNT =
262 IO_FIELD(R_ATA_TRANSFER_CNT, count, len >> 1);
264 cmd = dir ? IO_STATE(R_ATA_CTRL_DATA, rw, read) : IO_STATE(R_ATA_CTRL_DATA, rw, write);
265 - cmd |= type == TYPE_PIO ? IO_STATE(R_ATA_CTRL_DATA, handsh, pio) :
266 + cmd |= type == TYPE_PIO ? IO_STATE(R_ATA_CTRL_DATA, handsh, pio) :
267 IO_STATE(R_ATA_CTRL_DATA, handsh, dma);
277 cris_ide_outw(unsigned short data, unsigned long reg) {
281 printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data);
283 - cris_ide_write_command(reg|data); /* write data to the drive's register */
284 + cris_ide_write_command(reg|data); /* write data to the drive's register */
286 timeleft = IDE_REGISTER_TIMEOUT;
287 /* wait for transmitter ready */
288 @@ -684,13 +744,15 @@
289 static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
290 static int cris_dma_off (ide_drive_t *drive);
291 static int cris_dma_on (ide_drive_t *drive);
292 +static int cris_dma_host_off (ide_drive_t *drive);
293 +static int cris_dma_host_on (ide_drive_t *drive);
295 static void tune_cris_ide(ide_drive_t *drive, u8 pio)
297 int setup, strobe, hold;
303 setup = ATA_PIO0_SETUP;
304 strobe = ATA_PIO0_STROBE;
306 setup = ATA_PIO4_SETUP;
307 strobe = ATA_PIO4_STROBE;
308 hold = ATA_PIO4_HOLD;
324 if (speed >= XFER_UDMA_0)
325 cris_ide_set_speed(TYPE_UDMA, cyc, dvs, 0);
327 - cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
328 + cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
332 @@ -790,11 +852,13 @@
334 for(h = 0; h < MAX_HWIFS; h++) {
335 ide_hwif_t *hwif = &ide_hwifs[h];
336 - ide_setup_ports(&hw, cris_ide_base_address(h),
337 + memset(&hw, 0, sizeof(hw));
338 + ide_setup_ports(&hw, cris_ide_base_address(h),
340 0, 0, cris_ide_ack_intr,
341 - ide_default_irq(0));
343 ide_register_hw(&hw, &hwif);
346 hwif->chipset = ide_etrax100;
347 hwif->tuneproc = &tune_cris_ide;
348 @@ -814,13 +878,15 @@
349 hwif->OUTBSYNC = &cris_ide_outbsync;
350 hwif->INB = &cris_ide_inb;
351 hwif->INW = &cris_ide_inw;
352 - hwif->ide_dma_host_off = &cris_dma_off;
353 - hwif->ide_dma_host_on = &cris_dma_on;
354 + hwif->ide_dma_host_off = &cris_dma_host_off;
355 + hwif->ide_dma_host_on = &cris_dma_host_on;
356 hwif->ide_dma_off_quietly = &cris_dma_off;
357 + hwif->ide_dma_on = &cris_dma_on;
359 hwif->ultra_mask = cris_ultra_mask;
360 hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
361 hwif->swdma_mask = 0x07; /* Singleword DMA 0-2 */
362 + hwif->rqsize = 256;
366 @@ -835,13 +901,25 @@
367 cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
370 +static int cris_dma_host_off (ide_drive_t *drive)
375 +static int cris_dma_host_on (ide_drive_t *drive)
380 static int cris_dma_off (ide_drive_t *drive)
382 + drive->using_dma = 0;
386 static int cris_dma_on (ide_drive_t *drive)
388 + drive->using_dma = 1;
392 @@ -958,30 +1036,28 @@
393 size += sg_dma_len(sg);
396 - /* did we run out of descriptors? */
398 - if(count >= MAX_DMA_DESCRS) {
399 - printk("%s: too few DMA descriptors\n", drive->name);
403 - /* however, this case is more difficult - rw_trf_cnt cannot be more
404 - than 65536 words per transfer, so in that case we need to either
405 + /* rw_trf_cnt cannot be more than 131072 words per transfer,
406 + (- 1 word for UDMA CRC) so in that case we need to either:
407 1) use a DMA interrupt to re-trigger rw_trf_cnt and continue with
409 2) simply do the request here, and get dma_intr to only ide_end_request on
410 those blocks that were actually set-up for transfer.
411 + (The ide framework will issue a new request for the remainder)
414 - if(ata_tot_size + size > 131072) {
415 + if(ata_tot_size + size > 262140) {
416 printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size);
420 - /* If size > MAX_DESCR_SIZE it has to be splitted into new descriptors. Since we
421 - don't handle size > 131072 only one split is necessary */
422 + /* If size > MAX_DESCR_SIZE it has to be splitted into new descriptors. */
424 - if(size > MAX_DESCR_SIZE) {
425 + while (size > MAX_DESCR_SIZE) {
426 + /* did we run out of descriptors? */
427 + if(count >= MAX_DMA_DESCRS) {
428 + printk("%s: too few DMA descriptors\n", drive->name);
431 cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, MAX_DESCR_SIZE, 0);
433 ata_tot_size += MAX_DESCR_SIZE;
434 @@ -989,6 +1065,11 @@
435 addr += MAX_DESCR_SIZE;
438 + /* did we run out of descriptors? */
439 + if(count >= MAX_DMA_DESCRS) {
440 + printk("%s: too few DMA descriptors\n", drive->name);
443 cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, size,i ? 0 : 1);
445 ata_tot_size += size;
446 @@ -1050,8 +1131,12 @@
448 if (id && (id->capability & 1)) {
449 if (ide_use_dma(drive)) {
450 - if (cris_config_drive_for_dma(drive))
451 - return hwif->ide_dma_on(drive);
452 + if (cris_config_drive_for_dma(drive)) {
453 + if (hwif->ide_dma_on)
454 + return hwif->ide_dma_on(drive);
461 --- linux-2.6.19.2.orig/drivers/serial/crisv10.c 2007-01-10 20:10:37.000000000 +0100
462 +++ linux-2.6.19.2.dev/drivers/serial/crisv10.c 2007-01-09 10:30:54.000000000 +0100
465 * Serial port driver for the ETRAX 100LX chip
467 - * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Axis Communications AB
468 + * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004 Axis Communications AB
470 * Many, many authors. Based once upon a time on serial.c for 16x50.
476 +#include <asm/dma.h>
477 #include <asm/system.h>
478 #include <asm/bitops.h>
479 #include <linux/delay.h>
481 /* non-arch dependent serial structures are in linux/serial.h */
482 #include <linux/serial.h>
483 /* while we keep our own stuff (struct e100_serial) in a local .h file */
485 +#include "crisv10.h"
486 #include <asm/fasttimer.h>
487 +#include <asm/arch/io_interface_mux.h>
489 #ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
490 #ifndef CONFIG_ETRAX_FAST_TIMER
491 @@ -586,11 +588,10 @@
492 static void change_speed(struct e100_serial *info);
493 static void rs_throttle(struct tty_struct * tty);
494 static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
495 -static int rs_write(struct tty_struct * tty, int from_user,
496 +static int rs_write(struct tty_struct * tty,
497 const unsigned char *buf, int count);
498 #ifdef CONFIG_ETRAX_RS485
499 -static int e100_write_rs485(struct tty_struct * tty, int from_user,
500 - const unsigned char *buf, int count);
501 +static int e100_write_rs485(struct tty_struct * tty, const unsigned char *buf, int count);
503 static int get_lsr_info(struct e100_serial * info, unsigned int *value);
505 @@ -677,20 +678,39 @@
509 + .dma_owner = dma_ser0,
510 + .io_if = if_serial_0,
511 #ifdef CONFIG_ETRAX_SERIAL_PORT0
513 #ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
514 .dma_out_enabled = 1,
515 + .dma_out_nbr = SER0_TX_DMA_NBR,
516 + .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
517 + .dma_out_irq_flags = IRQF_DISABLED,
518 + .dma_out_irq_description = "serial 0 dma tr",
520 .dma_out_enabled = 0,
521 + .dma_out_nbr = UINT_MAX,
522 + .dma_out_irq_nbr = 0,
523 + .dma_out_irq_flags = 0,
524 + .dma_out_irq_description = NULL,
526 #ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
528 + .dma_in_nbr = SER0_RX_DMA_NBR,
529 + .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
530 + .dma_in_irq_flags = IRQF_DISABLED,
531 + .dma_in_irq_description = "serial 0 dma rec",
533 - .dma_in_enabled = 0
534 + .dma_in_enabled = 0,
535 + .dma_in_nbr = UINT_MAX,
536 + .dma_in_irq_nbr = 0,
537 + .dma_in_irq_flags = 0,
538 + .dma_in_irq_description = NULL,
542 + .io_if_description = NULL,
543 .dma_out_enabled = 0,
546 @@ -712,20 +732,42 @@
550 + .dma_owner = dma_ser1,
551 + .io_if = if_serial_1,
552 #ifdef CONFIG_ETRAX_SERIAL_PORT1
554 + .io_if_description = "ser1",
555 #ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
556 .dma_out_enabled = 1,
557 + .dma_out_nbr = SER1_TX_DMA_NBR,
558 + .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
559 + .dma_out_irq_flags = IRQF_DISABLED,
560 + .dma_out_irq_description = "serial 1 dma tr",
562 .dma_out_enabled = 0,
563 + .dma_out_nbr = UINT_MAX,
564 + .dma_out_irq_nbr = 0,
565 + .dma_out_irq_flags = 0,
566 + .dma_out_irq_description = NULL,
568 #ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
570 + .dma_in_nbr = SER1_RX_DMA_NBR,
571 + .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
572 + .dma_in_irq_flags = IRQF_DISABLED,
573 + .dma_in_irq_description = "serial 1 dma rec",
575 - .dma_in_enabled = 0
576 + .dma_in_enabled = 0,
577 + .dma_in_enabled = 0,
578 + .dma_in_nbr = UINT_MAX,
579 + .dma_in_irq_nbr = 0,
580 + .dma_in_irq_flags = 0,
581 + .dma_in_irq_description = NULL,
585 + .io_if_description = NULL,
586 + .dma_in_irq_nbr = 0,
587 .dma_out_enabled = 0,
590 @@ -746,20 +788,40 @@
594 + .dma_owner = dma_ser2,
595 + .io_if = if_serial_2,
596 #ifdef CONFIG_ETRAX_SERIAL_PORT2
598 + .io_if_description = "ser2",
599 #ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
600 .dma_out_enabled = 1,
601 + .dma_out_nbr = SER2_TX_DMA_NBR,
602 + .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
603 + .dma_out_irq_flags = IRQF_DISABLED,
604 + .dma_out_irq_description = "serial 2 dma tr",
606 .dma_out_enabled = 0,
607 + .dma_in_nbr = UINT_MAX,
608 + .dma_in_irq_nbr = 0,
609 + .dma_in_irq_flags = 0,
610 + .dma_in_irq_description = NULL,
612 #ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
614 + .dma_in_nbr = SER2_RX_DMA_NBR,
615 + .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
616 + .dma_in_irq_flags = IRQF_DISABLED,
617 + .dma_in_irq_description = "serial 2 dma rec",
619 - .dma_in_enabled = 0
620 + .dma_in_enabled = 0,
621 + .dma_in_nbr = UINT_MAX,
622 + .dma_in_irq_nbr = 0,
623 + .dma_in_irq_flags = 0,
624 + .dma_in_irq_description = NULL,
628 + .io_if_description = NULL,
629 .dma_out_enabled = 0,
632 @@ -780,20 +842,40 @@
636 + .dma_owner = dma_ser3,
637 + .io_if = if_serial_3,
638 #ifdef CONFIG_ETRAX_SERIAL_PORT3
640 + .io_if_description = "ser3",
641 #ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
642 .dma_out_enabled = 1,
643 + .dma_out_nbr = SER3_TX_DMA_NBR,
644 + .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
645 + .dma_out_irq_flags = IRQF_DISABLED,
646 + .dma_out_irq_description = "serial 3 dma tr",
648 .dma_out_enabled = 0,
649 + .dma_out_nbr = UINT_MAX,
650 + .dma_out_irq_nbr = 0,
651 + .dma_out_irq_flags = 0,
652 + .dma_out_irq_description = NULL,
654 #ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
656 + .dma_in_nbr = SER3_RX_DMA_NBR,
657 + .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
658 + .dma_in_irq_flags = IRQF_DISABLED,
659 + .dma_in_irq_description = "serial 3 dma rec",
661 - .dma_in_enabled = 0
662 + .dma_in_enabled = 0,
663 + .dma_in_nbr = UINT_MAX,
664 + .dma_in_irq_nbr = 0,
665 + .dma_in_irq_flags = 0,
666 + .dma_in_irq_description = NULL
670 + .io_if_description = NULL,
671 .dma_out_enabled = 0,
674 @@ -1414,12 +1496,11 @@
680 + local_irq_save(flags);
681 *e100_modem_pins[info->line].dtr_shadow &= ~mask;
682 *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask);
683 *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow;
684 - restore_flags(flags);
685 + local_irq_restore(flags);
688 #ifdef SERIAL_DEBUG_IO
689 @@ -1438,12 +1519,11 @@
691 #ifndef CONFIG_SVINTO_SIM
695 + local_irq_save(flags);
696 info->rx_ctrl &= ~E100_RTS_MASK;
697 info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */
698 info->port[REG_REC_CTRL] = info->rx_ctrl;
699 - restore_flags(flags);
700 + local_irq_restore(flags);
701 #ifdef SERIAL_DEBUG_IO
702 printk("ser%i rts %i\n", info->line, set);
704 @@ -1461,12 +1541,11 @@
705 unsigned char mask = e100_modem_pins[info->line].ri_mask;
710 + local_irq_save(flags);
711 *e100_modem_pins[info->line].ri_shadow &= ~mask;
712 *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask);
713 *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
714 - restore_flags(flags);
715 + local_irq_restore(flags);
719 @@ -1479,12 +1558,11 @@
720 unsigned char mask = e100_modem_pins[info->line].cd_mask;
725 + local_irq_save(flags);
726 *e100_modem_pins[info->line].cd_shadow &= ~mask;
727 *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask);
728 *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
729 - restore_flags(flags);
730 + local_irq_restore(flags);
734 @@ -1558,8 +1636,7 @@
735 /* Disable output DMA channel for the serial port in question
736 * ( set to something other then serialX)
740 + local_irq_save(flags);
741 DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
742 if (info->line == 0) {
743 if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) ==
744 @@ -1587,7 +1664,7 @@
747 *R_GEN_CONFIG = genconfig_shadow;
748 - restore_flags(flags);
749 + local_irq_restore(flags);
753 @@ -1595,8 +1672,7 @@
759 + local_irq_save(flags);
760 DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line));
761 /* Enable output DMA channel for the serial port in question */
762 if (info->line == 0) {
763 @@ -1613,7 +1689,7 @@
764 genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3);
766 *R_GEN_CONFIG = genconfig_shadow;
767 - restore_flags(flags);
768 + local_irq_restore(flags);
771 static void e100_disable_rxdma_channel(struct e100_serial *info)
772 @@ -1623,8 +1699,7 @@
773 /* Disable input DMA channel for the serial port in question
774 * ( set to something other then serialX)
778 + local_irq_save(flags);
779 if (info->line == 0) {
780 if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) ==
781 IO_STATE(R_GEN_CONFIG, dma7, serial0)) {
782 @@ -1651,7 +1726,7 @@
785 *R_GEN_CONFIG = genconfig_shadow;
786 - restore_flags(flags);
787 + local_irq_restore(flags);
791 @@ -1659,8 +1734,7 @@
797 + local_irq_save(flags);
798 /* Enable input DMA channel for the serial port in question */
799 if (info->line == 0) {
800 genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
801 @@ -1676,7 +1750,7 @@
802 genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3);
804 *R_GEN_CONFIG = genconfig_shadow;
805 - restore_flags(flags);
806 + local_irq_restore(flags);
809 #ifdef SERIAL_HANDLE_EARLY_ERRORS
810 @@ -1783,7 +1857,7 @@
814 -e100_write_rs485(struct tty_struct *tty, int from_user,
815 +e100_write_rs485(struct tty_struct *tty,
816 const unsigned char *buf, int count)
818 struct e100_serial * info = (struct e100_serial *)tty->driver_data;
819 @@ -1796,7 +1870,7 @@
821 info->rs485.enabled = 1;
822 /* rs_write now deals with RS485 if enabled */
823 - count = rs_write(tty, from_user, buf, count);
824 + count = rs_write(tty, buf, count);
825 info->rs485.enabled = old_enabled;
828 @@ -1834,7 +1908,7 @@
832 - save_flags(flags); cli();
833 + local_irq_save(flags);
834 DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n",
835 CIRC_CNT(info->xmit.head,
836 info->xmit.tail,SERIAL_XMIT_SIZE)));
837 @@ -1846,7 +1920,7 @@
840 *((unsigned long *)&info->port[REG_XOFF]) = xoff;
841 - restore_flags(flags);
842 + local_irq_restore(flags);
846 @@ -1858,7 +1932,7 @@
850 - save_flags(flags); cli();
851 + local_irq_save(flags);
852 DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n",
853 CIRC_CNT(info->xmit.head,
854 info->xmit.tail,SERIAL_XMIT_SIZE)));
855 @@ -1873,7 +1947,7 @@
856 info->xmit.head != info->xmit.tail && info->xmit.buf)
857 e100_enable_serial_tx_ready_irq(info);
859 - restore_flags(flags);
860 + local_irq_restore(flags);
864 @@ -2053,8 +2127,7 @@
865 static void flush_timeout_function(unsigned long data);
866 #define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\
867 unsigned long timer_flags; \
868 - save_flags(timer_flags); \
870 + local_irq_save(timer_flags); \
871 if (fast_timers[info->line].function == NULL) { \
872 serial_fast_timer_started++; \
873 TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \
874 @@ -2068,7 +2141,7 @@
876 TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \
878 - restore_flags(timer_flags); \
879 + local_irq_restore(timer_flags); \
881 #define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec)
883 @@ -2097,8 +2170,7 @@
889 + local_irq_save(flags);
891 if (!info->first_recv_buffer)
892 info->first_recv_buffer = buffer;
893 @@ -2111,7 +2183,7 @@
894 if (info->recv_cnt > info->max_recv_cnt)
895 info->max_recv_cnt = info->recv_cnt;
897 - restore_flags(flags);
898 + local_irq_restore(flags);
902 @@ -2131,11 +2203,7 @@
905 struct tty_struct *tty = info->tty;
906 - *tty->flip.char_buf_ptr = data;
907 - *tty->flip.flag_buf_ptr = flag;
908 - tty->flip.flag_buf_ptr++;
909 - tty->flip.char_buf_ptr++;
911 + tty_insert_flip_char(tty, data, flag);
915 @@ -2320,7 +2388,6 @@
919 - info->tty->flip.count = 0;
920 if (info->uses_dma_in) {
921 /* reset the input dma channel to be sure it works */
923 @@ -2482,70 +2549,21 @@
925 struct tty_struct *tty;
926 struct etrax_recv_buffer *buffer;
927 - unsigned int length;
931 - if (!info->first_recv_buffer)
936 + local_irq_save(flags);
939 - if (!(tty = info->tty)) {
940 - restore_flags(flags);
942 + local_irq_restore(flags);
946 - length = tty->flip.count;
947 - /* Don't flip more than the ldisc has room for.
948 - * The return value from ldisc.receive_room(tty) - might not be up to
949 - * date, the previous flip of up to TTY_FLIPBUF_SIZE might be on the
950 - * processed and not accounted for yet.
951 - * Since we use DMA, 1 SERIAL_DESCR_BUF_SIZE could be on the way.
952 - * Lets buffer data here and let flow control take care of it.
953 - * Since we normally flip large chunks, the ldisc don't react
954 - * with throttle until too late if we flip to much.
956 - max_flip_size = tty->ldisc.receive_room(tty);
957 - if (max_flip_size < 0)
959 - if (max_flip_size <= (TTY_FLIPBUF_SIZE + /* Maybe not accounted for */
960 - length + info->recv_cnt + /* We have this queued */
961 - 2*SERIAL_DESCR_BUF_SIZE + /* This could be on the way */
962 - TTY_THRESHOLD_THROTTLE)) { /* Some slack */
963 - /* check TTY_THROTTLED first so it indicates our state */
964 - if (!test_and_set_bit(TTY_THROTTLED, &tty->flags)) {
965 - DFLOW(DEBUG_LOG(info->line,"flush_to_flip throttles room %lu\n", max_flip_size));
969 - else if (max_flip_size <= (TTY_FLIPBUF_SIZE + /* Maybe not accounted for */
970 - length + info->recv_cnt + /* We have this queued */
971 - SERIAL_DESCR_BUF_SIZE + /* This could be on the way */
972 - TTY_THRESHOLD_THROTTLE)) { /* Some slack */
973 - DFLOW(DEBUG_LOG(info->line,"flush_to_flip throttles again! %lu\n", max_flip_size));
979 - if (max_flip_size > TTY_FLIPBUF_SIZE)
980 - max_flip_size = TTY_FLIPBUF_SIZE;
982 - while ((buffer = info->first_recv_buffer) && length < max_flip_size) {
983 + while ((buffer = info->first_recv_buffer)) {
984 unsigned int count = buffer->length;
986 - if (length + count > max_flip_size)
987 - count = max_flip_size - length;
989 - memcpy(tty->flip.char_buf_ptr + length, buffer->buffer, count);
990 - memset(tty->flip.flag_buf_ptr + length, TTY_NORMAL, count);
991 - tty->flip.flag_buf_ptr[length] = buffer->error;
994 + tty_insert_flip_string(tty, buffer->buffer, count);
995 info->recv_cnt -= count;
996 - DFLIP(DEBUG_LOG(info->line,"flip: %i\n", length));
998 if (count == buffer->length) {
999 info->first_recv_buffer = buffer->next;
1000 @@ -2560,24 +2578,7 @@
1001 if (!info->first_recv_buffer)
1002 info->last_recv_buffer = NULL;
1004 - tty->flip.count = length;
1005 - DFLIP(if (tty->ldisc.chars_in_buffer(tty) > 3500) {
1006 - DEBUG_LOG(info->line, "ldisc %lu\n",
1007 - tty->ldisc.chars_in_buffer(tty));
1008 - DEBUG_LOG(info->line, "flip.count %lu\n",
1012 - restore_flags(flags);
1016 - DEBUG_LOG(info->line, "*** rxtot %i\n", info->icount.rx);
1017 - DEBUG_LOG(info->line, "ldisc %lu\n", tty->ldisc.chars_in_buffer(tty));
1018 - DEBUG_LOG(info->line, "room %lu\n", tty->ldisc.receive_room(tty));
1022 + local_irq_restore(flags);
1024 /* this includes a check for low-latency */
1025 tty_flip_buffer_push(tty);
1026 @@ -2722,21 +2723,7 @@
1027 printk("!NO TTY!\n");
1030 - if (tty->flip.count >= TTY_FLIPBUF_SIZE - TTY_THRESHOLD_THROTTLE) {
1031 - /* check TTY_THROTTLED first so it indicates our state */
1032 - if (!test_and_set_bit(TTY_THROTTLED, &tty->flags)) {
1033 - DFLOW(DEBUG_LOG(info->line, "rs_throttle flip.count: %i\n", tty->flip.count));
1037 - if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
1038 - DEBUG_LOG(info->line, "force FLIP! %i\n", tty->flip.count);
1039 - tty->flip.work.func((void *) tty);
1040 - if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
1041 - DEBUG_LOG(info->line, "FLIP FULL! %i\n", tty->flip.count);
1042 - return info; /* if TTY_DONT_FLIP is set */
1046 /* Read data and status at the same time */
1047 data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]);
1049 @@ -2789,27 +2776,25 @@
1050 DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
1051 info->errorcode = ERRCODE_INSERT_BREAK;
1053 + unsigned char data = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
1054 + char flag = TTY_NORMAL;
1055 if (info->errorcode == ERRCODE_INSERT_BREAK) {
1056 - info->icount.brk++;
1057 - *tty->flip.char_buf_ptr = 0;
1058 - *tty->flip.flag_buf_ptr = TTY_BREAK;
1059 - tty->flip.flag_buf_ptr++;
1060 - tty->flip.char_buf_ptr++;
1061 - tty->flip.count++;
1062 + struct tty_struct *tty = info->tty;
1063 + tty_insert_flip_char(tty, 0, flag);
1066 - *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
1068 if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) {
1069 info->icount.parity++;
1070 - *tty->flip.flag_buf_ptr = TTY_PARITY;
1071 + flag = TTY_PARITY;
1072 } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) {
1073 info->icount.overrun++;
1074 - *tty->flip.flag_buf_ptr = TTY_OVERRUN;
1075 + flag = TTY_OVERRUN;
1076 } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) {
1077 info->icount.frame++;
1078 - *tty->flip.flag_buf_ptr = TTY_FRAME;
1081 + tty_insert_flip_char(tty, data, flag);
1082 info->errorcode = 0;
1084 info->break_detected_cnt = 0;
1085 @@ -2825,16 +2810,12 @@
1086 log_int(rdpc(), 0, 0);
1089 - *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
1090 - *tty->flip.flag_buf_ptr = 0;
1091 + tty_insert_flip_char(tty, IO_EXTRACT(R_SERIAL0_READ, data_in, data_read), TTY_NORMAL);
1093 DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read);
1097 - tty->flip.flag_buf_ptr++;
1098 - tty->flip.char_buf_ptr++;
1099 - tty->flip.count++;
1101 data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]);
1102 if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
1103 @@ -2972,7 +2953,7 @@
1105 unsigned char rstat;
1106 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
1107 - save_flags(flags); cli();
1108 + local_irq_save(flags);
1109 rstat = info->port[REG_STATUS];
1110 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
1112 @@ -2981,7 +2962,7 @@
1114 /* We must enable since it is disabled in ser_interrupt */
1115 e100_enable_serial_tx_ready_irq(info);
1116 - restore_flags(flags);
1117 + local_irq_restore(flags);
1120 if (info->uses_dma_out) {
1121 @@ -2989,7 +2970,7 @@
1123 /* We only use normal tx interrupt when sending x_char */
1124 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
1125 - save_flags(flags); cli();
1126 + local_irq_save(flags);
1127 rstat = info->port[REG_STATUS];
1128 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
1129 e100_disable_serial_tx_ready_irq(info);
1130 @@ -3002,7 +2983,7 @@
1133 *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue);
1134 - restore_flags(flags);
1135 + local_irq_restore(flags);
1138 /* Normal char-by-char interrupt */
1139 @@ -3016,7 +2997,7 @@
1141 DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
1142 /* Send a byte, rs485 timing is critical so turn of ints */
1143 - save_flags(flags); cli();
1144 + local_irq_save(flags);
1145 info->port[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
1146 info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
1148 @@ -3040,7 +3021,7 @@
1149 /* We must enable since it is disabled in ser_interrupt */
1150 e100_enable_serial_tx_ready_irq(info);
1152 - restore_flags(flags);
1153 + local_irq_restore(flags);
1155 if (CIRC_CNT(info->xmit.head,
1157 @@ -3065,7 +3046,7 @@
1159 static volatile unsigned long reentered_ready_mask = 0;
1161 - save_flags(flags); cli();
1162 + local_irq_save(flags);
1163 irq_mask1_rd = *R_IRQ_MASK1_RD;
1164 /* First handle all rx interrupts with ints disabled */
1166 @@ -3110,7 +3091,7 @@
1167 /* Unblock the serial interrupt */
1168 *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set);
1171 + local_irq_enable();
1172 ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */
1174 for (i = 0; i < NR_PORTS; i++) {
1175 @@ -3123,11 +3104,11 @@
1178 /* handle_ser_tx_interrupt enables tr_ready interrupts */
1180 + local_irq_disable();
1181 /* Handle reentered TX interrupt */
1182 irq_mask1_rd = reentered_ready_mask;
1185 + local_irq_disable();
1188 unsigned long ready_mask;
1189 @@ -3143,7 +3124,7 @@
1193 - restore_flags(flags);
1194 + local_irq_restore(flags);
1195 return IRQ_RETVAL(handled);
1196 } /* ser_interrupt */
1198 @@ -3192,13 +3173,12 @@
1202 - save_flags(flags);
1204 + local_irq_save(flags);
1206 /* if it was already initialized, skip this */
1208 if (info->flags & ASYNC_INITIALIZED) {
1209 - restore_flags(flags);
1210 + local_irq_restore(flags);
1211 free_page(xmit_page);
1214 @@ -3324,7 +3304,7 @@
1216 info->flags |= ASYNC_INITIALIZED;
1218 - restore_flags(flags);
1219 + local_irq_restore(flags);
1223 @@ -3375,8 +3355,7 @@
1227 - save_flags(flags);
1228 - cli(); /* Disable interrupts */
1229 + local_irq_save(flags);
1231 if (info->xmit.buf) {
1232 free_page((unsigned long)info->xmit.buf);
1233 @@ -3400,7 +3379,7 @@
1234 set_bit(TTY_IO_ERROR, &info->tty->flags);
1236 info->flags &= ~ASYNC_INITIALIZED;
1237 - restore_flags(flags);
1238 + local_irq_restore(flags);
1242 @@ -3492,8 +3471,7 @@
1244 #ifndef CONFIG_SVINTO_SIM
1245 /* start with default settings and then fill in changes */
1246 - save_flags(flags);
1248 + local_irq_save(flags);
1249 /* 8 bit, no/even parity */
1250 info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) |
1251 IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) |
1252 @@ -3557,7 +3535,7 @@
1255 *((unsigned long *)&info->port[REG_XOFF]) = xoff;
1256 - restore_flags(flags);
1257 + local_irq_restore(flags);
1258 #endif /* !CONFIG_SVINTO_SIM */
1260 update_char_time(info);
1261 @@ -3585,13 +3563,12 @@
1263 /* this protection might not exactly be necessary here */
1265 - save_flags(flags);
1267 + local_irq_save(flags);
1268 start_transmit(info);
1269 - restore_flags(flags);
1270 + local_irq_restore(flags);
1273 -static int rs_raw_write(struct tty_struct * tty, int from_user,
1274 +static int rs_raw_write(struct tty_struct * tty,
1275 const unsigned char *buf, int count)
1278 @@ -3614,72 +3591,37 @@
1279 SIMCOUT(buf, count);
1282 - save_flags(flags);
1283 + local_save_flags(flags);
1284 DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
1285 DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
1288 - /* the cli/restore_flags pairs below are needed because the
1289 + /* the local_irq_disable/restore_flags pairs below are needed because the
1290 * DMA interrupt handler moves the info->xmit values. the memcpy
1291 * needs to be in the critical region unfortunately, because we
1292 * need to read xmit values, memcpy, write xmit values in one
1293 * atomic operation... this could perhaps be avoided by more clever
1297 - mutex_lock(&tmp_buf_mutex);
1300 - c = CIRC_SPACE_TO_END(info->xmit.head,
1302 - SERIAL_XMIT_SIZE);
1308 - c -= copy_from_user(tmp_buf, buf, c);
1315 - c1 = CIRC_SPACE_TO_END(info->xmit.head,
1317 - SERIAL_XMIT_SIZE);
1320 - memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c);
1321 - info->xmit.head = ((info->xmit.head + c) &
1322 - (SERIAL_XMIT_SIZE-1));
1323 - restore_flags(flags);
1328 - mutex_unlock(&tmp_buf_mutex);
1332 - c = CIRC_SPACE_TO_END(info->xmit.head,
1334 - SERIAL_XMIT_SIZE);
1341 - memcpy(info->xmit.buf + info->xmit.head, buf, c);
1342 - info->xmit.head = (info->xmit.head + c) &
1343 - (SERIAL_XMIT_SIZE-1);
1348 - restore_flags(flags);
1349 + local_irq_disable();
1351 + c = CIRC_SPACE_TO_END(info->xmit.head,
1353 + SERIAL_XMIT_SIZE);
1360 + memcpy(info->xmit.buf + info->xmit.head, buf, c);
1361 + info->xmit.head = (info->xmit.head + c) &
1362 + (SERIAL_XMIT_SIZE-1);
1367 + local_irq_restore(flags);
1369 /* enable transmitter if not running, unless the tty is stopped
1370 * this does not need IRQ protection since if tr_running == 0
1371 @@ -3698,7 +3640,7 @@
1372 } /* raw_raw_write() */
1375 -rs_write(struct tty_struct * tty, int from_user,
1376 +rs_write(struct tty_struct * tty,
1377 const unsigned char *buf, int count)
1379 #if defined(CONFIG_ETRAX_RS485)
1380 @@ -3725,7 +3667,7 @@
1382 #endif /* CONFIG_ETRAX_RS485 */
1384 - count = rs_raw_write(tty, from_user, buf, count);
1385 + count = rs_raw_write(tty, buf, count);
1387 #if defined(CONFIG_ETRAX_RS485)
1388 if (info->rs485.enabled)
1389 @@ -3793,10 +3735,9 @@
1390 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
1391 unsigned long flags;
1393 - save_flags(flags);
1395 + local_irq_save(flags);
1396 info->xmit.head = info->xmit.tail = 0;
1397 - restore_flags(flags);
1398 + local_irq_restore(flags);
1400 wake_up_interruptible(&tty->write_wait);
1402 @@ -3818,7 +3759,7 @@
1404 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
1405 unsigned long flags;
1406 - save_flags(flags); cli();
1407 + local_irq_save(flags);
1408 if (info->uses_dma_out) {
1409 /* Put the DMA on hold and disable the channel */
1410 *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold);
1411 @@ -3835,7 +3776,7 @@
1412 DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch));
1414 e100_enable_serial_tx_ready_irq(info);
1415 - restore_flags(flags);
1416 + local_irq_restore(flags);
1420 @@ -4085,61 +4026,6 @@
1426 -set_modem_info(struct e100_serial * info, unsigned int cmd,
1427 - unsigned int *value)
1431 - if (copy_from_user(&arg, value, sizeof(int)))
1436 - if (arg & TIOCM_RTS) {
1437 - e100_rts(info, 1);
1439 - if (arg & TIOCM_DTR) {
1440 - e100_dtr(info, 1);
1442 - /* Handle FEMALE behaviour */
1443 - if (arg & TIOCM_RI) {
1444 - e100_ri_out(info, 1);
1446 - if (arg & TIOCM_CD) {
1447 - e100_cd_out(info, 1);
1451 - if (arg & TIOCM_RTS) {
1452 - e100_rts(info, 0);
1454 - if (arg & TIOCM_DTR) {
1455 - e100_dtr(info, 0);
1457 - /* Handle FEMALE behaviour */
1458 - if (arg & TIOCM_RI) {
1459 - e100_ri_out(info, 0);
1461 - if (arg & TIOCM_CD) {
1462 - e100_cd_out(info, 0);
1466 - e100_rts(info, arg & TIOCM_RTS);
1467 - e100_dtr(info, arg & TIOCM_DTR);
1468 - /* Handle FEMALE behaviour */
1469 - e100_ri_out(info, arg & TIOCM_RI);
1470 - e100_cd_out(info, arg & TIOCM_CD);
1480 rs_break(struct tty_struct *tty, int break_state)
1482 @@ -4149,8 +4035,7 @@
1486 - save_flags(flags);
1488 + local_irq_save(flags);
1489 if (break_state == -1) {
1490 /* Go to manual mode and set the txd pin to 0 */
1491 info->tx_ctrl &= 0x3F; /* Clear bit 7 (txd) and 6 (tr_enable) */
1492 @@ -4158,7 +4043,42 @@
1493 info->tx_ctrl |= (0x80 | 0x40); /* Set bit 7 (txd) and 6 (tr_enable) */
1495 info->port[REG_TR_CTRL] = info->tx_ctrl;
1496 - restore_flags(flags);
1497 + local_irq_restore(flags);
1501 +rs_tiocmset(struct tty_struct *tty, struct file * file, unsigned int set, unsigned int clear)
1503 + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
1505 + if (clear & TIOCM_RTS) {
1506 + e100_rts(info, 0);
1508 + if (clear & TIOCM_DTR) {
1509 + e100_dtr(info, 0);
1511 + /* Handle FEMALE behaviour */
1512 + if (clear & TIOCM_RI) {
1513 + e100_ri_out(info, 0);
1515 + if (clear & TIOCM_CD) {
1516 + e100_cd_out(info, 0);
1519 + if (set & TIOCM_RTS) {
1520 + e100_rts(info, 1);
1522 + if (set & TIOCM_DTR) {
1523 + e100_dtr(info, 1);
1525 + /* Handle FEMALE behaviour */
1526 + if (set & TIOCM_RI) {
1527 + e100_ri_out(info, 1);
1529 + if (set & TIOCM_CD) {
1530 + e100_cd_out(info, 1);
1536 @@ -4177,10 +4097,6 @@
1539 return get_modem_info(info, (unsigned int *) arg);
1543 - return set_modem_info(info, cmd, (unsigned int *) arg);
1545 return get_serial_info(info,
1546 (struct serial_struct *) arg);
1547 @@ -4212,7 +4128,7 @@
1548 if (copy_from_user(&rs485wr, (struct rs485_write*)arg, sizeof(rs485wr)))
1551 - return e100_write_rs485(tty, 1, rs485wr.outc, rs485wr.outc_size);
1552 + return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size);
1556 @@ -4242,46 +4158,6 @@
1560 -/* In debugport.c - register a console write function that uses the normal
1563 -typedef int (*debugport_write_function)(int i, const char *buf, unsigned int len);
1565 -extern debugport_write_function debug_write_function;
1567 -static int rs_debug_write_function(int i, const char *buf, unsigned int len)
1571 - struct tty_struct *tty;
1572 - static int recurse_cnt = 0;
1574 - tty = rs_table[i].tty;
1576 - unsigned long flags;
1577 - if (recurse_cnt > 5) /* We skip this debug output */
1580 - local_irq_save(flags);
1582 - local_irq_restore(flags);
1584 - cnt = rs_write(tty, 0, buf + written, len);
1592 - local_irq_save(flags);
1594 - local_irq_restore(flags);
1601 * ------------------------------------------------------------
1603 @@ -4303,11 +4179,10 @@
1605 /* interrupts are disabled for this entire function */
1607 - save_flags(flags);
1609 + local_irq_save(flags);
1611 if (tty_hung_up_p(filp)) {
1612 - restore_flags(flags);
1613 + local_irq_restore(flags);
1617 @@ -4334,7 +4209,7 @@
1621 - restore_flags(flags);
1622 + local_irq_restore(flags);
1625 info->flags |= ASYNC_CLOSING;
1626 @@ -4388,7 +4263,7 @@
1628 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
1629 wake_up_interruptible(&info->close_wait);
1630 - restore_flags(flags);
1631 + local_irq_restore(flags);
1635 @@ -4410,6 +4285,28 @@
1641 + * Release any allocated DMA irq's.
1643 + if (info->dma_in_enabled) {
1644 + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
1645 + free_irq(info->dma_in_irq_nbr,
1647 + info->uses_dma_in = 0;
1648 +#ifdef SERIAL_DEBUG_OPEN
1649 + printk("DMA irq '%s' freed\n", info->dma_in_irq_description);
1652 + if (info->dma_out_enabled) {
1653 + free_irq(info->dma_out_irq_nbr,
1655 + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
1656 + info->uses_dma_out = 0;
1657 +#ifdef SERIAL_DEBUG_OPEN
1658 + printk("DMA irq '%s' freed\n", info->dma_out_irq_description);
1664 @@ -4485,7 +4382,7 @@
1665 if (tty_hung_up_p(filp) ||
1666 (info->flags & ASYNC_CLOSING)) {
1667 if (info->flags & ASYNC_CLOSING)
1668 - interruptible_sleep_on(&info->close_wait);
1669 + wait_event_interruptible(info->close_wait, 0);
1670 #ifdef SERIAL_DO_RESTART
1671 if (info->flags & ASYNC_HUP_NOTIFY)
1673 @@ -4523,21 +4420,19 @@
1674 printk("block_til_ready before block: ttyS%d, count = %d\n",
1675 info->line, info->count);
1677 - save_flags(flags);
1679 + local_irq_save(flags);
1680 if (!tty_hung_up_p(filp)) {
1684 - restore_flags(flags);
1685 + local_irq_restore(flags);
1686 info->blocked_open++;
1688 - save_flags(flags);
1690 + local_irq_save(flags);
1691 /* assert RTS and DTR */
1694 - restore_flags(flags);
1695 + local_irq_restore(flags);
1696 set_current_state(TASK_INTERRUPTIBLE);
1697 if (tty_hung_up_p(filp) ||
1698 !(info->flags & ASYNC_INITIALIZED)) {
1699 @@ -4589,9 +4484,9 @@
1700 struct e100_serial *info;
1703 + int allocated_resources = 0;
1705 /* find which port we want to open */
1709 if (line < 0 || line >= NR_PORTS)
1710 @@ -4632,7 +4527,7 @@
1711 if (tty_hung_up_p(filp) ||
1712 (info->flags & ASYNC_CLOSING)) {
1713 if (info->flags & ASYNC_CLOSING)
1714 - interruptible_sleep_on(&info->close_wait);
1715 + wait_event_interruptible(info->close_wait, 0);
1716 #ifdef SERIAL_DO_RESTART
1717 return ((info->flags & ASYNC_HUP_NOTIFY) ?
1718 -EAGAIN : -ERESTARTSYS);
1719 @@ -4642,12 +4537,79 @@
1723 + * If DMA is enabled try to allocate the irq's.
1725 + if (info->count == 1) {
1726 + allocated_resources = 1;
1727 + if (info->dma_in_enabled) {
1728 + if (request_irq(info->dma_in_irq_nbr,
1730 + info->dma_in_irq_flags,
1731 + info->dma_in_irq_description,
1733 + printk(KERN_WARNING "DMA irq '%s' busy; falling back to non-DMA mode\n", info->dma_in_irq_description);
1734 + /* Make sure we never try to use DMA in for the port again. */
1735 + info->dma_in_enabled = 0;
1736 + } else if (cris_request_dma(info->dma_in_nbr,
1737 + info->dma_in_irq_description,
1738 + DMA_VERBOSE_ON_ERROR,
1739 + info->dma_owner)) {
1740 + free_irq(info->dma_in_irq_nbr, info);
1741 + printk(KERN_WARNING "DMA '%s' busy; falling back to non-DMA mode\n", info->dma_in_irq_description);
1742 + /* Make sure we never try to use DMA in for the port again. */
1743 + info->dma_in_enabled = 0;
1745 +#ifdef SERIAL_DEBUG_OPEN
1746 + else printk("DMA irq '%s' allocated\n", info->dma_in_irq_description);
1749 + if (info->dma_out_enabled) {
1750 + if (request_irq(info->dma_out_irq_nbr,
1752 + info->dma_out_irq_flags,
1753 + info->dma_out_irq_description,
1755 + printk(KERN_WARNING "DMA irq '%s' busy; falling back to non-DMA mode\n", info->dma_out_irq_description);
1756 + /* Make sure we never try to use DMA out for the port again. */
1757 + info->dma_out_enabled = 0;
1758 + } else if (cris_request_dma(info->dma_out_nbr,
1759 + info->dma_out_irq_description,
1760 + DMA_VERBOSE_ON_ERROR,
1761 + info->dma_owner)) {
1762 + free_irq(info->dma_out_irq_nbr, info);
1763 + printk(KERN_WARNING "DMA '%s' busy; falling back to non-DMA mode\n", info->dma_out_irq_description);
1764 + /* Make sure we never try to use DMA in for the port again. */
1765 + info->dma_out_enabled = 0;
1767 +#ifdef SERIAL_DEBUG_OPEN
1768 + else printk("DMA irq '%s' allocated\n", info->dma_out_irq_description);
1774 * Start up the serial port
1777 retval = startup(info);
1781 + if (allocated_resources) {
1782 + if (info->dma_out_enabled) {
1783 + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
1784 + free_irq(info->dma_out_irq_nbr,
1787 + if (info->dma_in_enabled) {
1788 + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
1789 + free_irq(info->dma_in_irq_nbr,
1793 + /* FIXME Decrease count info->count here too? */
1799 retval = block_til_ready(tty, filp, info);
1801 @@ -4655,6 +4617,19 @@
1802 printk("rs_open returning after block_til_ready with %d\n",
1805 + if (allocated_resources) {
1806 + if (info->dma_out_enabled) {
1807 + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
1808 + free_irq(info->dma_out_irq_nbr,
1811 + if (info->dma_in_enabled) {
1812 + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
1813 + free_irq(info->dma_in_irq_nbr,
1821 @@ -4844,6 +4819,7 @@
1822 .send_xchar = rs_send_xchar,
1823 .wait_until_sent = rs_wait_until_sent,
1824 .read_proc = rs_read_proc,
1825 + .tiocmset = rs_tiocmset
1829 @@ -4863,7 +4839,22 @@
1830 #if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
1831 init_timer(&flush_timer);
1832 flush_timer.function = timed_flush_handler;
1833 - mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS);
1834 + mod_timer(&flush_timer, jiffies + 5);
1837 +#if defined(CONFIG_ETRAX_RS485)
1838 +#if defined(CONFIG_ETRAX_RS485_ON_PA)
1839 + if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, rs485_pa_bit)) {
1840 + printk(KERN_CRIT "ETRAX100LX serial: Could not allocate RS485 pin\n");
1844 +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
1845 + if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, rs485_port_g_bit)) {
1846 + printk(KERN_CRIT "ETRAX100LX serial: Could not allocate RS485 pin\n");
1852 /* Initialize the tty_driver structure */
1853 @@ -4888,6 +4879,14 @@
1854 /* do some initializing for the separate ports */
1856 for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) {
1857 + if (info->enabled) {
1858 + if (cris_request_io_interface(info->io_if, info->io_if_description)) {
1859 + printk(KERN_CRIT "ETRAX100LX async serial: Could not allocate IO pins for %s, port %d\n",
1860 + info->io_if_description,
1862 + info->enabled = 0;
1865 info->uses_dma_in = 0;
1866 info->uses_dma_out = 0;
1868 @@ -4939,64 +4938,16 @@
1871 #ifndef CONFIG_SVINTO_SIM
1872 +#ifndef CONFIG_ETRAX_KGDB
1873 /* Not needed in simulator. May only complicate stuff. */
1874 /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
1876 - if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial ", NULL))
1879 -#ifdef CONFIG_ETRAX_SERIAL_PORT0
1880 -#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
1881 - if (request_irq(SER0_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 0 dma tr", NULL))
1884 -#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
1885 - if (request_irq(SER0_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 0 dma rec", NULL))
1890 -#ifdef CONFIG_ETRAX_SERIAL_PORT1
1891 -#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
1892 - if (request_irq(SER1_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 1 dma tr", NULL))
1895 -#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
1896 - if (request_irq(SER1_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 1 dma rec", NULL))
1900 -#ifdef CONFIG_ETRAX_SERIAL_PORT2
1901 - /* DMA Shared with par0 (and SCSI0 and ATA) */
1902 -#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
1903 - if (request_irq(SER2_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma tr", NULL))
1906 -#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
1907 - if (request_irq(SER2_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma rec", NULL))
1911 -#ifdef CONFIG_ETRAX_SERIAL_PORT3
1912 - /* DMA Shared with par1 (and SCSI1 and Extern DMA 0) */
1913 -#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
1914 - if (request_irq(SER3_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma tr", NULL))
1917 -#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
1918 - if (request_irq(SER3_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma rec", NULL))
1922 + if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
1923 + panic("%s: Failed to request irq8", __FUNCTION__);
1925 -#ifdef CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST
1926 - if (request_irq(TIMER1_IRQ_NBR, timeout_interrupt, IRQF_SHARED | IRQF_DISABLED,
1927 - "fast serial dma timeout", NULL)) {
1928 - printk(KERN_CRIT "err: timer1 irq\n");
1931 #endif /* CONFIG_SVINTO_SIM */
1932 - debug_write_function = rs_debug_write_function;
1937 --- linux-2.6.19.2.orig/drivers/serial/crisv10.h 2007-01-10 20:10:37.000000000 +0100
1938 +++ linux-2.6.19.2.dev/drivers/serial/crisv10.h 2006-10-13 14:44:38.000000000 +0200
1941 #include <linux/circ_buf.h>
1942 #include <asm/termios.h>
1943 +#include <asm/dma.h>
1944 +#include <asm/arch/io_interface_mux.h>
1946 /* Software state per channel */
1949 u8 dma_in_enabled:1; /* Set to 1 if DMA should be used */
1951 /* end of fields defined in rs_table[] in .c-file */
1953 + unsigned int dma_in_nbr;
1954 + unsigned int dma_out_nbr;
1955 + unsigned int dma_in_irq_nbr;
1956 + unsigned int dma_out_irq_nbr;
1957 + unsigned long dma_in_irq_flags;
1958 + unsigned long dma_out_irq_flags;
1959 + char *dma_in_irq_description;
1960 + char *dma_out_irq_description;
1962 + enum cris_io_interface io_if;
1963 + char *io_if_description;
1965 u8 uses_dma_in; /* Set to 1 if DMA is used */
1966 u8 uses_dma_out; /* Set to 1 if DMA is used */
1967 u8 forced_eop; /* a fifo eop has been forced */
1968 --- linux-2.6.19.2.orig/drivers/serial/crisv32.c 1970-01-01 01:00:00.000000000 +0100
1969 +++ linux-2.6.19.2.dev/drivers/serial/crisv32.c 2007-01-05 09:59:53.000000000 +0100
1971 +/* $Id: crisv32.c,v 1.78 2007/01/05 08:59:53 starvik Exp $
1973 + * Serial port driver for the ETRAX FS chip
1975 + * Copyright (C) 1998-2006 Axis Communications AB
1977 + * Many, many authors. Based once upon a time on serial.c for 16x50.
1979 + * Johan Adolfsson - port to ETRAX FS
1980 + * Mikael Starvik - port to serial_core framework
1984 +#include <linux/module.h>
1985 +#include <linux/init.h>
1986 +#include <linux/console.h>
1987 +#include <linux/types.h>
1988 +#include <linux/errno.h>
1989 +#include <linux/serial_core.h>
1991 +#include <asm/io.h>
1992 +#include <asm/irq.h>
1993 +#include <asm/system.h>
1994 +#include <asm/uaccess.h>
1996 +#include <asm/arch/dma.h>
1997 +#include <asm/arch/system.h>
1998 +#include <asm/arch/pinmux.h>
1999 +#include <asm/arch/hwregs/dma.h>
2000 +#include <asm/arch/hwregs/reg_rdwr.h>
2001 +#include <asm/arch/hwregs/ser_defs.h>
2002 +#include <asm/arch/hwregs/dma_defs.h>
2003 +#include <asm/arch/hwregs/gio_defs.h>
2004 +#include <asm/arch/hwregs/intr_vect_defs.h>
2005 +#include <asm/arch/hwregs/reg_map.h>
2007 +#define UART_NR 5 /* 4 ports + dummy port */
2008 +#define SERIAL_RECV_DESCRIPTORS 8
2010 +/* We only buffer 255 characters here, no need for more tx descriptors. */
2011 +#define SERIAL_TX_DESCRIPTORS 4
2013 +/* Kept for experimental purposes. */
2014 +#define ETRAX_SER_FIFO_SIZE 1
2015 +#define SERIAL_DESCR_BUF_SIZE 256
2016 +#define regi_NULL 0
2017 +#define DMA_WAIT_UNTIL_RESET(inst) \
2019 + reg_dma_rw_stat r; \
2021 + r = REG_RD(dma, (inst), rw_stat); \
2022 + } while (r.mode != regk_dma_rst); \
2025 +/* Macro to set up control lines for a port. */
2026 +#define SETUP_PINS(port) \
2027 + if (serial_cris_ports[port].used) { \
2028 + if (strcmp(CONFIG_ETRAX_SER##port##_DTR_BIT, "")) \
2029 + crisv32_io_get_name(&serial_cris_ports[port].dtr_pin, \
2030 + CONFIG_ETRAX_SER##port##_DTR_BIT); \
2032 + serial_cris_ports[port].dtr_pin = dummy_pin; \
2033 + if (strcmp(CONFIG_ETRAX_SER##port##_DSR_BIT, "")) \
2034 + crisv32_io_get_name(&serial_cris_ports[port].dsr_pin, \
2035 + CONFIG_ETRAX_SER##port##_DSR_BIT); \
2037 + serial_cris_ports[port].dsr_pin = dummy_pin; \
2038 + if (strcmp(CONFIG_ETRAX_SER##port##_RI_BIT, "")) \
2039 + crisv32_io_get_name(&serial_cris_ports[port].ri_pin, \
2040 + CONFIG_ETRAX_SER##port##_RI_BIT); \
2042 + serial_cris_ports[port].ri_pin = dummy_pin; \
2043 + if (strcmp(CONFIG_ETRAX_SER##port##_CD_BIT, "")) \
2044 + crisv32_io_get_name(&serial_cris_ports[port].cd_pin, \
2045 + CONFIG_ETRAX_SER##port##_CD_BIT); \
2047 + serial_cris_ports[port].cd_pin = dummy_pin; \
2050 +/* Set a serial port register if anything has changed. */
2051 +#define MODIFY_REG(instance, reg, var) \
2052 + if (REG_RD_INT(ser, instance, reg) \
2053 + != REG_TYPE_CONV(int, reg_ser_##reg, var)) \
2054 + REG_WR(ser, instance, reg, var);
2057 + * Regarding RS485 operation in crisv32 serial driver.
2058 + * ---------------------------------------------------
2059 + * RS485 can be run in two modes, full duplex using four wires (485FD) and
2060 + * half duplex using two wires (485HD). The default mode of each serial port
2061 + * is configured in the kernel configuration. The available modes are:
2062 + * RS-232, RS-485 half duplex, and RS-485 full duplex.
2064 + * In the 485HD mode the direction of the data bus must be able to switch.
2065 + * The direction of the transceiver is controlled by the RTS signal. Hence
2066 + * the auto_rts function in the ETRAX FS chip is enabled in this mode, which
2067 + * automatically toggle RTS when transmitting. The initial direction of the
2068 + * port is receiving.
2070 + * In the 485FD mode two transceivers will be used, one in each direction.
2071 + * Usually the hardware can handle both 485HD and 485FD, which implies that
2072 + * one of the transceivers can change direction. Consequently that transceiver
2073 + * must be tied to operate in the opposite direction of the other one, setting
2074 + * and keeping RTS to a fixed value do this.
2076 + * There are two special "ioctl" that can configure the ports. These two are
2077 + * left for backward compatible with older applications. The effects of using
2078 + * them are described below:
2079 + * The TIOCSERSETRS485:
2080 + * This ioctl sets a serial port in 232 mode to 485HD mode or vise versa. The
2081 + * state of the port is kept when closing the port. Note that this ioctl has no
2082 + * effect on a serial port in the 485FD mode.
2083 + * The TIOCSERWRRS485:
2084 + * This ioctl set a serial port in 232 mode to 485HD mode and writes the data
2085 + * "included" in the ioctl to the port. The port will then stay in 485HD mode.
2086 + * Using this ioctl on a serial port in the 485HD mode will transmit the data
2087 + * without changing the mode. Using this ioctl on a serial port in 485FD mode
2088 + * will not change the mode and simply send the data using the 485FD mode.
2092 +#define TYPE_485HD 1
2093 +#define TYPE_485FD 2
2095 +struct etrax_recv_buffer {
2096 + struct etrax_recv_buffer *next;
2097 + unsigned short length;
2098 + unsigned char error;
2099 + unsigned char pad;
2101 + unsigned char buffer[0];
2104 +struct uart_cris_port {
2105 + struct uart_port port;
2111 + /* Used to check if port enabled as well by testing for zero. */
2112 + reg_scope_instances regi_ser;
2113 + reg_scope_instances regi_dmain;
2114 + reg_scope_instances regi_dmaout;
2116 + struct crisv32_iopin dtr_pin;
2117 + struct crisv32_iopin dsr_pin;
2118 + struct crisv32_iopin ri_pin;
2119 + struct crisv32_iopin cd_pin;
2121 + struct dma_descr_context tr_context_descr
2122 + __attribute__ ((__aligned__(32)));
2123 + struct dma_descr_data tr_descr[SERIAL_TX_DESCRIPTORS]
2124 + __attribute__ ((__aligned__(32)));
2125 + struct dma_descr_context rec_context_descr
2126 + __attribute__ ((__aligned__(32)));
2127 + struct dma_descr_data rec_descr[SERIAL_RECV_DESCRIPTORS]
2128 + __attribute__ ((__aligned__(32)));
2130 + /* This is the first one in the list the HW is working on now. */
2131 + struct dma_descr_data* first_tx_descr;
2133 + /* This is the last one in the list the HW is working on now. */
2134 + struct dma_descr_data* last_tx_descr;
2136 + /* This is how many characters the HW is working on now. */
2137 + unsigned int tx_pending_chars;
2140 + unsigned int cur_rec_descr;
2141 + struct etrax_recv_buffer *first_recv_buffer;
2142 + struct etrax_recv_buffer *last_recv_buffer;
2144 + unsigned int recv_cnt;
2145 + unsigned int max_recv_cnt;
2147 + /* The time for 1 char, in usecs. */
2148 + unsigned long char_time_usec;
2150 + /* Last tx usec in the jiffies. */
2151 + unsigned long last_tx_active_usec;
2153 + /* Last tx time in jiffies. */
2154 + unsigned long last_tx_active;
2156 + /* Last rx usec in the jiffies. */
2157 + unsigned long last_rx_active_usec;
2159 + /* Last rx time in jiffies. */
2160 + unsigned long last_rx_active;
2162 +#ifdef CONFIG_ETRAX_RS485
2163 + /* RS-485 support, duh. */
2164 + struct rs485_control rs485;
2169 +extern struct uart_driver serial_cris_driver;
2170 +static struct uart_port *console_port;
2171 +static int console_baud = 115200;
2172 +static struct uart_cris_port serial_cris_ports[UART_NR] = {
2174 +#ifdef CONFIG_ETRAX_SERIAL_PORT0
2176 + .irq = SER0_INTR_VECT,
2177 + .regi_ser = regi_ser0,
2179 + * We initialize the dma stuff like this to get a compiler error
2180 + * if a CONFIG is missing
2183 +# ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
2186 +# ifdef CONFIG_ETRAX_SERIAL_PORT0_NO_DMA_IN
2191 +# ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
2194 +# ifdef CONFIG_ETRAX_SERIAL_PORT0_NO_DMA_OUT
2198 +# ifdef CONFIG_ETRAX_RS485
2199 +# ifdef CONFIG_ETRAX_SERIAL_PORT0_TYPE_485HD
2200 + .port_type = TYPE_485HD,
2202 +# ifdef CONFIG_ETRAX_SERIAL_PORT0_TYPE_485FD
2203 + .port_type = TYPE_485FD,
2207 + .regi_ser = regi_NULL,
2208 + .regi_dmain = regi_NULL,
2209 + .regi_dmaout = regi_NULL,
2213 +#ifdef CONFIG_ETRAX_SERIAL_PORT1
2215 + .irq = SER1_INTR_VECT,
2216 + .regi_ser = regi_ser1,
2218 +# ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA5_IN
2221 +# ifdef CONFIG_ETRAX_SERIAL_PORT1_NO_DMA_IN
2226 +# ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA4_OUT
2229 +# ifdef CONFIG_ETRAX_SERIAL_PORT1_NO_DMA_OUT
2233 +# ifdef CONFIG_ETRAX_RS485
2234 +# ifdef CONFIG_ETRAX_SERIAL_PORT1_TYPE_485HD
2235 + .port_type = TYPE_485HD,
2237 +# ifdef CONFIG_ETRAX_SERIAL_PORT1_TYPE_485FD
2238 + .port_type = TYPE_485FD,
2242 + .regi_ser = regi_NULL,
2243 + .regi_dmain = regi_NULL,
2244 + .regi_dmaout = regi_NULL,
2248 +#ifdef CONFIG_ETRAX_SERIAL_PORT2
2250 + .irq = SER2_INTR_VECT,
2251 + .regi_ser = regi_ser2,
2253 +# ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
2256 +# ifdef CONFIG_ETRAX_SERIAL_PORT2_NO_DMA_IN
2261 +# ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
2264 +# ifdef CONFIG_ETRAX_SERIAL_PORT2_NO_DMA_OUT
2268 +# ifdef CONFIG_ETRAX_RS485
2269 +# ifdef CONFIG_ETRAX_SERIAL_PORT2_TYPE_485HD
2270 + .port_type = TYPE_485HD,
2272 +# ifdef CONFIG_ETRAX_SERIAL_PORT2_TYPE_485FD
2273 + .port_type = TYPE_485FD,
2277 + .regi_ser = regi_NULL,
2278 + .regi_dmain = regi_NULL,
2279 + .regi_dmaout = regi_NULL,
2283 +#ifdef CONFIG_ETRAX_SERIAL_PORT3
2285 + .irq = SER3_INTR_VECT,
2286 + .regi_ser = regi_ser3,
2288 +# ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA9_IN
2291 +# ifdef CONFIG_ETRAX_SERIAL_PORT3_NO_DMA_IN
2296 +# ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA8_OUT
2299 +# ifdef CONFIG_ETRAX_SERIAL_PORT3_NO_DMA_OUT
2303 +# ifdef CONFIG_ETRAX_RS485
2304 +# ifdef CONFIG_ETRAX_SERIAL_PORT3_TYPE_485HD
2305 + .port_type = TYPE_485HD,
2307 +# ifdef CONFIG_ETRAX_SERIAL_PORT3_TYPE_485FD
2308 + .port_type = TYPE_485FD,
2312 + .regi_ser = regi_NULL,
2313 + .regi_dmain = regi_NULL,
2314 + .regi_dmaout = regi_NULL,
2318 +#ifdef CONFIG_ETRAX_DEBUG_PORT_NULL
2321 + .regi_ser = regi_NULL
2322 +} /* Dummy console port */
2326 +/* Dummy pin used for unused CD, DSR, DTR and RI signals. */
2327 +static unsigned long io_dummy;
2328 +static struct crisv32_ioport dummy_port =
2335 +static struct crisv32_iopin dummy_pin =
2341 +static int selected_console =
2342 +#if defined(CONFIG_ETRAX_DEBUG_PORT0)
2344 +#elif defined(CONFIG_ETRAX_DEBUG_PORT1)
2346 +#elif defined(CONFIG_ETRAX_DEBUG_PORT2)
2348 +#elif defined(CONFIG_ETRAX_DEBUG_PORT3)
2350 +#else /* CONFIG_ETRAX_DEBUG_PORT_NULL */
2354 +extern void reset_watchdog(void);
2357 + * Interrupts are disabled on entering
2360 +cris_console_write(struct console *co, const char *s, unsigned int count)
2362 + struct uart_cris_port *up;
2364 + reg_ser_r_stat_din stat;
2365 + reg_ser_rw_tr_dma_en tr_dma_en, old;
2367 + up = &serial_cris_ports[selected_console];
2370 + * This function isn't covered by the struct uart_ops, so we
2371 + * have to check manually that the port really is there,
2372 + * configured and live.
2374 + if (!up->regi_ser)
2377 + /* Switch to manual mode. */
2378 + tr_dma_en = old = REG_RD (ser, up->regi_ser, rw_tr_dma_en);
2379 + if (tr_dma_en.en == regk_ser_yes) {
2380 + tr_dma_en.en = regk_ser_no;
2381 + REG_WR(ser, up->regi_ser, rw_tr_dma_en, tr_dma_en);
2385 + for (i = 0; i < count; i++) {
2387 + if (s[i] == '\n') {
2389 + stat = REG_RD (ser, up->regi_ser, r_stat_din);
2390 + } while (!stat.tr_rdy);
2391 + REG_WR_INT (ser, up->regi_ser, rw_dout, '\r');
2393 + /* Wait until transmitter is ready and send. */
2395 + stat = REG_RD (ser, up->regi_ser, r_stat_din);
2396 + } while (!stat.tr_rdy);
2397 + REG_WR_INT (ser, up->regi_ser, rw_dout, s[i]);
2399 + /* Feed watchdog, because this may take looong time. */
2403 + /* Restore mode. */
2404 + if (tr_dma_en.en != old.en)
2405 + REG_WR(ser, up->regi_ser, rw_tr_dma_en, old);
2408 +static void cris_serial_port_init(struct uart_port *port, int line);
2410 +cris_console_setup(struct console *co, char *options)
2412 + struct uart_port *port;
2413 + int baud = 115200;
2418 + if (co->index >= UART_NR)
2421 + selected_console = co->index;
2422 + port = &serial_cris_ports[selected_console].port;
2423 + console_port = port;
2426 + uart_parse_options(options, &baud, &parity, &bits, &flow);
2427 + console_baud = baud;
2428 + cris_serial_port_init(port, selected_console);
2429 + co->index = port->line;
2430 + uart_set_options(port, co, baud, parity, bits, flow);
2435 +static struct tty_driver*
2436 +cris_console_device(struct console* co, int *index)
2438 + struct uart_driver *p = co->data;
2439 + *index = selected_console;
2440 + return p->tty_driver;
2443 +static struct console cris_console = {
2445 + .write = cris_console_write,
2446 + .device = cris_console_device,
2447 + .setup = cris_console_setup,
2448 + .flags = CON_PRINTBUFFER,
2450 + .data = &serial_cris_driver,
2453 +#define SERIAL_CRIS_CONSOLE &cris_console
2455 +struct uart_driver serial_cris_driver = {
2456 + .owner = THIS_MODULE,
2457 + .driver_name = "serial",
2458 + .dev_name = "ttyS",
2459 + .major = TTY_MAJOR,
2462 + .cons = SERIAL_CRIS_CONSOLE,
2465 +static int inline crisv32_serial_get_rts(struct uart_cris_port *up)
2467 + reg_scope_instances regi_ser = up->regi_ser;
2469 + * Return what the user has controlled rts to or
2470 + * what the pin is? (if auto_rts is used it differs during tx)
2472 + reg_ser_r_stat_din rstat = REG_RD(ser, regi_ser, r_stat_din);
2473 + return !(rstat.rts_n == regk_ser_active);
2477 + * A set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
2480 +static inline void crisv32_serial_set_rts(struct uart_cris_port *up, int set)
2482 + reg_scope_instances regi_ser = up->regi_ser;
2484 +#ifdef CONFIG_ETRAX_RS485
2485 + /* Never toggle RTS if port is in 485 mode. If port is in 485FD mode we
2486 + * do not want to send with the reciever and for 485HD mode auto_rts
2487 + * take care of the RTS for us.
2489 + if (!up->rs485.enabled) {
2493 + unsigned long flags;
2494 + reg_ser_rw_rec_ctrl rec_ctrl;
2496 + local_irq_save(flags);
2497 + rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
2499 + rec_ctrl.rts_n = regk_ser_active;
2501 + rec_ctrl.rts_n = regk_ser_inactive;
2502 + REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
2503 + local_irq_restore(flags);
2508 +static int inline crisv32_serial_get_cts(struct uart_cris_port *up)
2510 + reg_scope_instances regi_ser = up->regi_ser;
2511 + reg_ser_r_stat_din rstat = REG_RD(ser, regi_ser, r_stat_din);
2512 + return (rstat.cts_n == regk_ser_active);
2516 + * Send a single character for XON/XOFF purposes. We do it in this separate
2517 + * function instead of the alternative support port.x_char, in the ...start_tx
2518 + * function, so we don't mix up this case with possibly enabling transmission
2519 + * of queued-up data (in case that's disabled after *receiving* an XOFF or
2520 + * negative CTS). This function is used for both DMA and non-DMA case; see HW
2521 + * docs specifically blessing sending characters manually when DMA for
2522 + * transmission is enabled and running. We may be asked to transmit despite
2523 + * the transmitter being disabled by a ..._stop_tx call so we need to enable
2524 + * it temporarily but restore the state afterwards.
2526 + * Beware: I'm not sure how the RS-485 stuff is supposed to work. Using
2527 + * XON/XOFF seems problematic if there are several controllers, but if it's
2528 + * actually RS-422 (multi-drop; one sender and multiple receivers), it might
2529 + * Just Work, so don't bail out just because it looks a little suspicious.
2532 +void serial_cris_send_xchar(struct uart_port *port, char ch)
2534 + struct uart_cris_port *up = (struct uart_cris_port *)port;
2535 + reg_ser_rw_dout dout = { .data = ch };
2536 + reg_ser_rw_ack_intr ack_intr = { .tr_rdy = regk_ser_yes };
2537 + reg_ser_r_stat_din rstat;
2538 + reg_ser_rw_tr_ctrl prev_tr_ctrl, tr_ctrl;
2539 + reg_scope_instances regi_ser = up->regi_ser;
2540 + unsigned long flags;
2543 + * Wait for tr_rdy in case a character is already being output. Make
2544 + * sure we have integrity between the register reads and the writes
2545 + * below, but don't busy-wait with interrupts off and the port lock
2548 + spin_lock_irqsave(&port->lock, flags);
2550 + spin_unlock_irqrestore(&port->lock, flags);
2551 + spin_lock_irqsave(&port->lock, flags);
2552 + prev_tr_ctrl = tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
2553 + rstat = REG_RD(ser, regi_ser, r_stat_din);
2554 + } while (!rstat.tr_rdy);
2557 + * Ack an interrupt if one was just issued for the previous character
2558 + * that was output. This is required for non-DMA as the interrupt is
2559 + * used as the only indicator that the transmitter is ready and it
2560 + * isn't while this x_char is being transmitted.
2562 + REG_WR(ser, regi_ser, rw_ack_intr, ack_intr);
2564 + /* Enable the transmitter in case it was disabled. */
2566 + REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
2569 + * Finally, send the blessed character; nothing should stop it now,
2570 + * except for an xoff-detected state, which we'll handle below.
2572 + REG_WR(ser, regi_ser, rw_dout, dout);
2573 + up->port.icount.tx++;
2575 + /* There might be an xoff state to clear. */
2576 + rstat = REG_RD(ser, up->regi_ser, r_stat_din);
2579 + * Clear any xoff state that *may* have been there to
2580 + * inhibit transmission of the character.
2582 + if (rstat.xoff_detect) {
2583 + reg_ser_rw_xoff_clr xoff_clr = { .clr = 1 };
2584 + REG_WR(ser, regi_ser, rw_xoff_clr, xoff_clr);
2585 + reg_ser_rw_tr_dma_en tr_dma_en
2586 + = REG_RD(ser, regi_ser, rw_tr_dma_en);
2589 + * If we had an xoff state but cleared it, instead sneak in a
2590 + * disabled state for the transmitter, after the character we
2591 + * sent. Thus we keep the port disabled, just as if the xoff
2592 + * state was still in effect (or actually, as if stop_tx had
2593 + * been called, as we stop DMA too).
2595 + prev_tr_ctrl.stop = 1;
2598 + REG_WR(ser, regi_ser, rw_tr_dma_en, tr_dma_en);
2601 + /* Restore "previous" enabled/disabled state of the transmitter. */
2602 + REG_WR(ser, regi_ser, rw_tr_ctrl, prev_tr_ctrl);
2604 + spin_unlock_irqrestore(&port->lock, flags);
2607 +static void transmit_chars_dma(struct uart_cris_port *up);
2610 + * Do not spin_lock_irqsave or disable interrupts by other means here; it's
2611 + * already done by the caller.
2614 +static void serial_cris_start_tx(struct uart_port *port)
2616 + struct uart_cris_port *up = (struct uart_cris_port *)port;
2617 + reg_scope_instances regi_ser = up->regi_ser;
2618 + reg_ser_rw_tr_ctrl tr_ctrl;
2620 + tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
2621 + tr_ctrl.stop = regk_ser_no;
2622 + REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
2623 + if (!up->regi_dmaout) {
2624 + reg_ser_rw_intr_mask intr_mask =
2625 + REG_RD(ser, regi_ser, rw_intr_mask);
2626 + intr_mask.tr_rdy = regk_ser_yes;
2627 + REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
2630 + * We're called possibly to re-enable transmission after it
2631 + * has been disabled. If so, DMA needs to be re-enabled.
2633 + reg_ser_rw_tr_dma_en tr_dma_en = { .en = 1 };
2634 + REG_WR(ser, regi_ser, rw_tr_dma_en, tr_dma_en);
2635 + transmit_chars_dma(up);
2640 + * This function handles both the DMA and non-DMA case by ordering the
2641 + * transmitter to stop of after the current character. We don't need to wait
2642 + * for any such character to be completely transmitted; we do that where it
2643 + * matters, like in serial_cris_set_termios. Don't busy-wait here; see
2644 + * Documentation/serial/driver: this function is called within
2645 + * spin_lock_irq{,save} and thus separate ones would be disastrous (when SMP).
2646 + * There's no documented need to set the txd pin to any particular value;
2647 + * break setting is controlled solely by serial_cris_break_ctl.
2650 +static void serial_cris_stop_tx(struct uart_port *port)
2652 + struct uart_cris_port *up = (struct uart_cris_port *)port;
2653 + reg_scope_instances regi_ser = up->regi_ser;
2654 + reg_ser_rw_tr_ctrl tr_ctrl;
2655 + reg_ser_rw_intr_mask intr_mask;
2656 + reg_ser_rw_tr_dma_en tr_dma_en = {0};
2657 + reg_ser_rw_xoff_clr xoff_clr = {0};
2660 + * For the non-DMA case, we'd get a tr_rdy interrupt that we're not
2661 + * interested in as we're not transmitting any characters. For the
2662 + * DMA case, that interrupt is already turned off, but no reason to
2663 + * waste code on conditionals here.
2665 + intr_mask = REG_RD(ser, regi_ser, rw_intr_mask);
2666 + intr_mask.tr_rdy = regk_ser_no;
2667 + REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
2669 + tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
2671 + REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
2674 + * Always clear possible hardware xoff-detected state here, no need to
2675 + * unnecessary consider mctrl settings and when they change. We clear
2676 + * it here rather than in start_tx: both functions are called as the
2677 + * effect of XOFF processing, but start_tx is also called when upper
2678 + * levels tell the driver that there are more characters to send, so
2679 + * avoid adding code there.
2682 + REG_WR(ser, regi_ser, rw_xoff_clr, xoff_clr);
2685 + * Disable transmitter DMA, so that if we're in XON/XOFF, we can send
2686 + * those single characters without also giving go-ahead for queued up
2690 + REG_WR(ser, regi_ser, rw_tr_dma_en, tr_dma_en);
2693 +static void serial_cris_stop_rx(struct uart_port *port)
2695 + struct uart_cris_port *up = (struct uart_cris_port *)port;
2696 + reg_scope_instances regi_ser = up->regi_ser;
2697 + reg_ser_rw_rec_ctrl rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
2699 + rec_ctrl.en = regk_ser_no;
2700 + REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
2703 +static void serial_cris_enable_ms(struct uart_port *port)
2707 +static void check_modem_status(struct uart_cris_port *up)
2711 +static unsigned int serial_cris_tx_empty(struct uart_port *port)
2713 + struct uart_cris_port *up = (struct uart_cris_port *)port;
2714 + unsigned long flags;
2716 + reg_ser_r_stat_din rstat = {0};
2718 + spin_lock_irqsave(&up->port.lock, flags);
2719 + if (up->regi_dmaout) {
2721 + * For DMA, before looking at r_stat, we need to check that we
2722 + * either haven't actually started or that end-of-list is
2723 + * reached, else a tr_empty indication is just an internal
2724 + * state. The caller qualifies, if needed, that the
2725 + * port->info.xmit buffer is empty, so we don't need to
2728 + reg_dma_rw_stat status = REG_RD(dma, up->regi_dmaout, rw_stat);
2730 + if (!up->tx_started) {
2735 + if (status.list_state != regk_dma_data_at_eol) {
2741 + rstat = REG_RD(ser, up->regi_ser, r_stat_din);
2742 + ret = rstat.tr_empty ? TIOCSER_TEMT : 0;
2745 + spin_unlock_irqrestore(&up->port.lock, flags);
2748 +static unsigned int serial_cris_get_mctrl(struct uart_port *port)
2750 + struct uart_cris_port *up = (struct uart_cris_port *)port;
2754 + if (crisv32_serial_get_rts(up))
2756 + if (crisv32_io_rd(&up->dtr_pin))
2758 + if (crisv32_io_rd(&up->cd_pin))
2760 + if (crisv32_io_rd(&up->ri_pin))
2762 + if (!crisv32_io_rd(&up->dsr_pin))
2764 + if (crisv32_serial_get_cts(up))
2769 +static void serial_cris_set_mctrl(struct uart_port *port, unsigned int mctrl)
2771 + struct uart_cris_port *up = (struct uart_cris_port *)port;
2773 + crisv32_serial_set_rts(up, mctrl & TIOCM_RTS ? 1 : 0);
2774 + crisv32_io_set(&up->dtr_pin, mctrl & TIOCM_DTR ? 1 : 0);
2775 + crisv32_io_set(&up->ri_pin, mctrl & TIOCM_RNG ? 1 : 0);
2776 + crisv32_io_set(&up->cd_pin, mctrl & TIOCM_CD ? 1 : 0);
2779 +static void serial_cris_break_ctl(struct uart_port *port, int break_state)
2781 + struct uart_cris_port *up = (struct uart_cris_port *)port;
2782 + unsigned long flags;
2783 + reg_ser_rw_tr_ctrl tr_ctrl;
2784 + reg_ser_rw_tr_dma_en tr_dma_en;
2785 + reg_ser_rw_intr_mask intr_mask;
2787 + spin_lock_irqsave(&up->port.lock, flags);
2788 + tr_ctrl = REG_RD(ser, up->regi_ser, rw_tr_ctrl);
2789 + tr_dma_en = REG_RD(ser, up->regi_ser, rw_tr_dma_en);
2790 + intr_mask = REG_RD(ser, up->regi_ser, rw_intr_mask);
2792 + if (break_state != 0) { /* Send break */
2794 + * We need to disable DMA (if used) or tr_rdy interrupts if no
2795 + * DMA. No need to make this conditional on use of DMA;
2796 + * disabling will be a no-op for the other mode.
2798 + intr_mask.tr_rdy = regk_ser_no;
2802 + * Stop transmission and set the txd pin to 0 after the
2803 + * current character. The txd setting will take effect after
2804 + * any current transmission has completed.
2809 + /* Re-enable either transmit DMA or the serial interrupt. */
2810 + if (up->regi_dmaout)
2813 + intr_mask.tr_rdy = regk_ser_yes;
2819 + REG_WR(ser, up->regi_ser, rw_tr_ctrl, tr_ctrl);
2820 + REG_WR(ser, up->regi_ser, rw_tr_dma_en, tr_dma_en);
2821 + REG_WR(ser, up->regi_ser, rw_intr_mask, intr_mask);
2823 + spin_unlock_irqrestore(&up->port.lock, flags);
2827 + * The output DMA channel is free - use it to send as many chars as
2832 +transmit_chars_dma(struct uart_cris_port *up)
2834 + struct dma_descr_data *descr, *pending_descr, *dmapos;
2835 + struct dma_descr_data *last_tx_descr;
2836 + struct circ_buf *xmit = &up->port.info->xmit;
2837 + unsigned int sentl = 0;
2838 + reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
2839 + reg_dma_rw_stat status;
2840 + reg_scope_instances regi_dmaout = up->regi_dmaout;
2841 + unsigned int chars_in_q;
2842 + unsigned int chars_to_send;
2844 + /* Acknowledge dma data descriptor irq, if there was one. */
2845 + REG_WR(dma, regi_dmaout, rw_ack_intr, ack_intr);
2848 + * First get the amount of bytes sent during the last DMA transfer,
2849 + * and update xmit accordingly.
2851 + status = REG_RD(dma, regi_dmaout, rw_stat);
2852 + if (status.list_state == regk_dma_data_at_eol || !up->tx_started)
2853 + dmapos = phys_to_virt((int)up->last_tx_descr->next);
2855 + dmapos = phys_to_virt(REG_RD_INT(dma, regi_dmaout, rw_data));
2857 + pending_descr = up->first_tx_descr;
2858 + while (pending_descr != dmapos) {
2859 + sentl += pending_descr->after - pending_descr->buf;
2860 + pending_descr->after = pending_descr->buf = NULL;
2861 + pending_descr = phys_to_virt((int)pending_descr->next);
2864 + up->first_tx_descr = pending_descr;
2865 + last_tx_descr = up->last_tx_descr;
2867 + /* Update stats. */
2868 + up->port.icount.tx += sentl;
2870 + up->tx_pending_chars -= sentl;
2872 + /* Update xmit buffer. */
2873 + xmit->tail = (xmit->tail + sentl) & (UART_XMIT_SIZE - 1);
2876 + * Find out the largest amount of consecutive bytes we want to send
2879 + chars_in_q = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
2881 + if (chars_in_q == 0)
2882 + /* Tell upper layers that we're now idle. */
2885 + /* Some of those characters are actually pending output. */
2886 + chars_to_send = chars_in_q - up->tx_pending_chars;
2889 + * Clamp the new number of pending chars to the advertised
2892 + if (chars_to_send + up->tx_pending_chars > up->port.fifosize)
2893 + chars_to_send = up->port.fifosize - up->tx_pending_chars;
2895 + /* If we don't want to send any, we're done. */
2896 + if (chars_to_send == 0)
2899 + descr = phys_to_virt((int)last_tx_descr->next);
2902 + * We can't send anything if we could make the condition in
2903 + * the while-loop above (reaping finished descriptors) be met
2904 + * immediately before the first iteration. However, don't
2905 + * mistake the full state for the empty state.
2907 + if ((descr == up->first_tx_descr && up->tx_pending_chars != 0)
2908 + || descr->next == up->first_tx_descr)
2911 + /* Set up the descriptor for output. */
2912 + descr->buf = (void*)virt_to_phys(xmit->buf + xmit->tail
2913 + + up->tx_pending_chars);
2914 + descr->after = descr->buf + chars_to_send;
2916 + descr->out_eop = 0;
2919 + descr->in_eop = 0;
2922 + * Make sure GCC doesn't move this eol clear before the eol set
2926 + last_tx_descr->eol = 0;
2928 + up->last_tx_descr = descr;
2929 + up->tx_pending_chars += chars_to_send;
2931 + if (!up->tx_started) {
2932 + up->tx_started = 1;
2933 + up->tr_context_descr.next = 0;
2934 + up->tr_context_descr.saved_data
2935 + = (dma_descr_data*)virt_to_phys(descr);
2936 + up->tr_context_descr.saved_data_buf = descr->buf;
2937 + DMA_START_CONTEXT(regi_dmaout,
2938 + virt_to_phys(&up->tr_context_descr));
2940 + DMA_CONTINUE_DATA(regi_dmaout);
2942 + /* DMA is now running (hopefully). */
2945 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2946 + uart_write_wakeup(&up->port);
2950 +transmit_chars_no_dma(struct uart_cris_port *up)
2953 + struct circ_buf *xmit = &up->port.info->xmit;
2955 + reg_scope_instances regi_ser = up->regi_ser;
2956 + reg_ser_r_stat_din rstat;
2957 + reg_ser_rw_ack_intr ack_intr = { .tr_rdy = regk_ser_yes };
2959 + if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
2960 + /* No more to send, so disable the interrupt. */
2961 + reg_ser_rw_intr_mask intr_mask;
2962 + intr_mask = REG_RD(ser, regi_ser, rw_intr_mask);
2963 + intr_mask.tr_rdy = 0;
2964 + intr_mask.tr_empty = 0;
2965 + REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
2969 + count = ETRAX_SER_FIFO_SIZE;
2971 + reg_ser_rw_dout dout = { .data = xmit->buf[xmit->tail] };
2972 + REG_WR(ser, regi_ser, rw_dout, dout);
2973 + REG_WR(ser, regi_ser, rw_ack_intr, ack_intr);
2974 + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
2975 + up->port.icount.tx++;
2976 + if (xmit->head == xmit->tail)
2978 + rstat = REG_RD(ser, regi_ser, r_stat_din);
2979 + } while ((--count > 0) && rstat.tr_rdy);
2981 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2982 + uart_write_wakeup(&up->port);
2983 +} /* transmit_chars_no_dma */
2985 +static struct etrax_recv_buffer *
2986 +alloc_recv_buffer(unsigned int size)
2988 + struct etrax_recv_buffer *buffer;
2990 + if (!(buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC)))
2991 + panic("%s: Could not allocate %d bytes buffer\n",
2992 + __FUNCTION__, size);
2994 + buffer->next = NULL;
2995 + buffer->length = 0;
2996 + buffer->error = TTY_NORMAL;
3002 +append_recv_buffer(struct uart_cris_port *up,
3003 + struct etrax_recv_buffer *buffer)
3005 + unsigned long flags;
3007 + local_irq_save(flags);
3009 + if (!up->first_recv_buffer)
3010 + up->first_recv_buffer = buffer;
3012 + up->last_recv_buffer->next = buffer;
3014 + up->last_recv_buffer = buffer;
3016 + up->recv_cnt += buffer->length;
3017 + if (up->recv_cnt > up->max_recv_cnt)
3018 + up->max_recv_cnt = up->recv_cnt;
3020 + local_irq_restore(flags);
3024 +add_char_and_flag(struct uart_cris_port *up, unsigned char data,
3025 + unsigned char flag)
3027 + struct etrax_recv_buffer *buffer;
3029 + buffer = alloc_recv_buffer(4);
3030 + buffer->length = 1;
3031 + buffer->error = flag;
3032 + buffer->buffer[0] = data;
3034 + append_recv_buffer(up, buffer);
3036 + up->port.icount.rx++;
3042 +flush_to_flip_buffer(struct uart_cris_port *up)
3044 + struct tty_struct *tty;
3045 + struct etrax_recv_buffer *buffer;
3047 + tty = up->port.info->tty;
3048 + if (!up->first_recv_buffer || !tty)
3051 + while ((buffer = up->first_recv_buffer)) {
3052 + unsigned int count = (unsigned int)
3053 + tty_insert_flip_string(tty, buffer->buffer,
3056 + up->recv_cnt -= count;
3058 + if (count == buffer->length) {
3059 + up->first_recv_buffer = buffer->next;
3062 + buffer->length -= count;
3063 + memmove(buffer->buffer, buffer->buffer + count,
3065 + buffer->error = TTY_NORMAL;
3069 + if (!up->first_recv_buffer)
3070 + up->last_recv_buffer = NULL;
3072 + /* This call includes a check for low-latency. */
3073 + tty_flip_buffer_push(tty);
3076 +static unsigned int
3077 +handle_descr_data(struct uart_cris_port *up, struct dma_descr_data *descr,
3078 + unsigned int recvl)
3080 + struct etrax_recv_buffer *buffer
3081 + = phys_to_virt((unsigned long)descr->buf) - sizeof *buffer;
3083 + if (up->recv_cnt + recvl > 65536) {
3084 + printk(KERN_ERR "Too much pending incoming data on %s!"
3085 + " Dropping %u bytes.\n", up->port.info->tty->name,
3090 + buffer->length = recvl;
3092 + append_recv_buffer(up, buffer);
3094 + flush_to_flip_buffer(up);
3096 + buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE);
3097 + descr->buf = (void*)virt_to_phys(buffer->buffer);
3098 + descr->after = descr->buf + SERIAL_DESCR_BUF_SIZE;
3103 +static unsigned int
3104 +handle_all_descr_data(struct uart_cris_port *up)
3106 + struct dma_descr_data *descr
3107 + = &up->rec_descr[(up->cur_rec_descr - 1)
3108 + % SERIAL_RECV_DESCRIPTORS];
3109 + struct dma_descr_data *prev_descr;
3110 + unsigned int recvl;
3111 + unsigned int ret = 0;
3112 + reg_scope_instances regi_dmain = up->regi_dmain;
3115 + prev_descr = descr;
3116 + descr = &up->rec_descr[up->cur_rec_descr];
3118 + if (descr == phys_to_virt(REG_RD(dma, regi_dmain, rw_data)))
3121 + if (++up->cur_rec_descr == SERIAL_RECV_DESCRIPTORS)
3122 + up->cur_rec_descr = 0;
3124 + /* Find out how many bytes were read. */
3125 + recvl = descr->after - descr->buf;
3127 + /* Update stats. */
3128 + up->port.icount.rx += recvl;
3130 + ret += handle_descr_data(up, descr, recvl);
3133 + * Make sure GCC doesn't move this eol clear before the
3137 + prev_descr->eol = 0;
3138 + flush_dma_descr(descr,1); // Cache bug workaround
3139 + flush_dma_descr(prev_descr,0); // Cache bug workaround
3146 +receive_chars_dma(struct uart_cris_port *up)
3148 + reg_ser_r_stat_din rstat;
3149 + reg_dma_rw_ack_intr ack_intr = {0};
3151 + /* Acknowledge both dma_descr and dma_eop irq. */
3152 + ack_intr.data = 1;
3153 + ack_intr.in_eop = 1;
3154 + REG_WR(dma, up->regi_dmain, rw_ack_intr, ack_intr);
3156 + handle_all_descr_data(up);
3158 + /* Read the status register to detect errors. */
3159 + rstat = REG_RD(ser, up->regi_ser, r_stat_din);
3161 + if (rstat.framing_err | rstat.par_err | rstat.orun) {
3163 + * If we got an error, we must reset it by reading the
3164 + * rs_stat_din register and put the data in buffer manually.
3166 + reg_ser_rs_stat_din stat_din;
3167 + stat_din = REG_RD(ser, up->regi_ser, rs_stat_din);
3169 + if (stat_din.par_err)
3170 + add_char_and_flag(up, stat_din.data, TTY_PARITY);
3171 + else if (stat_din.orun)
3172 + add_char_and_flag(up, stat_din.data, TTY_OVERRUN);
3173 + else if (stat_din.framing_err)
3174 + add_char_and_flag(up, stat_din.data, TTY_FRAME);
3177 + /* Restart the receiving DMA, in case it got stuck on an EOL. */
3178 + DMA_CONTINUE_DATA(up->regi_dmain);
3181 +void receive_chars_no_dma(struct uart_cris_port *up)
3183 + reg_ser_rs_stat_din stat_din;
3184 + reg_ser_r_stat_din rstat;
3185 + struct tty_struct *tty;
3186 + struct uart_icount *icount;
3187 + int max_count = 16;
3189 + reg_ser_rw_ack_intr ack_intr = { 0 };
3191 + rstat = REG_RD(ser, up->regi_ser, r_stat_din);
3192 + up->last_rx_active_usec = GET_JIFFIES_USEC();
3193 + up->last_rx_active = jiffies;
3194 + icount = &up->port.icount;
3195 + tty = up->port.info->tty;
3198 + stat_din = REG_RD(ser, up->regi_ser, rs_stat_din);
3200 + flag = TTY_NORMAL;
3202 + REG_WR(ser, up->regi_ser, rw_ack_intr, ack_intr);
3205 + if (stat_din.framing_err | stat_din.par_err | stat_din.orun) {
3206 + if (stat_din.data == 0x00 &&
3207 + stat_din.framing_err) {
3208 + /* Most likely a break. */
3211 + } else if (stat_din.par_err) {
3212 + flag = TTY_PARITY;
3214 + } else if (stat_din.orun) {
3215 + flag = TTY_OVERRUN;
3216 + icount->overrun++;
3217 + } else if (stat_din.framing_err) {
3224 + * If this becomes important, we probably *could* handle this
3225 + * gracefully by keeping track of the unhandled character.
3227 + if (!tty_insert_flip_char(tty, stat_din.data, flag))
3228 + panic("%s: No tty buffer space", __FUNCTION__);
3229 + rstat = REG_RD(ser, up->regi_ser, r_stat_din);
3230 + } while (rstat.dav && (max_count-- > 0));
3231 + spin_unlock(&up->port.lock);
3232 + tty_flip_buffer_push(tty);
3233 + spin_lock(&up->port.lock);
3234 +} /* receive_chars_no_dma */
3237 + * DMA output channel interrupt handler.
3238 + * this interrupt is called from DMA2(ser2), DMA8(ser3), DMA6(ser0) or
3239 + * DMA4(ser1) when they have finished a descriptor with the intr flag set.
3243 +dma_tr_interrupt(int irq, void *dev_id, struct pt_regs * regs)
3245 + struct uart_cris_port *up = (struct uart_cris_port *)dev_id;
3246 + reg_dma_r_masked_intr masked_intr;
3247 + reg_scope_instances regi_dmaout;
3250 + spin_lock(&up->port.lock);
3251 + regi_dmaout = up->regi_dmaout;
3252 + if (!regi_dmaout) {
3253 + spin_unlock(&up->port.lock);
3258 + * Check for dma_descr (don't need to check for dma_eop in
3259 + * output DMA for serial).
3261 + masked_intr = REG_RD(dma, regi_dmaout, r_masked_intr);
3263 + if (masked_intr.data) {
3264 + /* We can send a new dma bunch. make it so. */
3267 + * Read jiffies_usec first.
3268 + * We want this time to be as late as possible.
3270 + up->last_tx_active_usec = GET_JIFFIES_USEC();
3271 + up->last_tx_active = jiffies;
3272 + transmit_chars_dma(up);
3275 + check_modem_status(up);
3276 + spin_unlock(&up->port.lock);
3277 + return IRQ_RETVAL(handled);
3280 +/* DMA input channel interrupt handler. */
3283 +dma_rec_interrupt(int irq, void *dev_id, struct pt_regs * regs)
3285 + struct uart_cris_port *up = (struct uart_cris_port *)dev_id;
3286 + reg_dma_r_masked_intr masked_intr;
3287 + reg_scope_instances regi_dmain;
3290 + spin_lock(&up->port.lock);
3291 + regi_dmain = up->regi_dmain;
3292 + if (!regi_dmain) {
3293 + spin_unlock(&up->port.lock);
3297 + /* Check for both dma_eop and dma_descr for the input dma channel. */
3298 + masked_intr = REG_RD(dma, regi_dmain, r_masked_intr);
3299 + if (masked_intr.data || masked_intr.in_eop) {
3300 + /* We have received something. */
3301 + receive_chars_dma(up);
3304 + check_modem_status(up);
3305 + spin_unlock(&up->port.lock);
3306 + return IRQ_RETVAL(handled);
3309 +/* "Normal" serial port interrupt handler - both rx and tx. */
3312 +ser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3314 + struct uart_cris_port *up = (struct uart_cris_port *)dev_id;
3315 + reg_scope_instances regi_ser;
3318 + spin_lock(&up->port.lock);
3319 + if (up->regi_dmain && up->regi_dmaout) {
3320 + spin_unlock(&up->port.lock);
3324 + regi_ser = up->regi_ser;
3327 + reg_ser_r_masked_intr masked_intr;
3328 + masked_intr = REG_RD(ser, regi_ser, r_masked_intr);
3330 + * Check what interrupts are active before taking
3331 + * actions. If DMA is used the interrupt shouldn't
3334 + if (masked_intr.dav) {
3335 + receive_chars_no_dma(up);
3338 + check_modem_status(up);
3340 + if (masked_intr.tr_rdy) {
3341 + transmit_chars_no_dma(up);
3345 + spin_unlock(&up->port.lock);
3346 + return IRQ_RETVAL(handled);
3347 +} /* ser_interrupt */
3349 +static int start_recv_dma(struct uart_cris_port *up)
3351 + struct dma_descr_data *descr = up->rec_descr;
3352 + struct etrax_recv_buffer *buffer;
3355 + /* Set up the receiving descriptors. */
3356 + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
3357 + buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE);
3358 + descr[i].next = (void*)virt_to_phys(&descr[i+1]);
3359 + descr[i].buf = (void*)virt_to_phys(buffer->buffer);
3360 + descr[i].after = descr[i].buf + SERIAL_DESCR_BUF_SIZE;
3362 + descr[i].out_eop = 0;
3363 + descr[i].intr = 1;
3364 + descr[i].wait = 0;
3365 + descr[i].in_eop = 0;
3370 + /* Link the last descriptor to the first. */
3371 + descr[i-1].next = (void*)virt_to_phys(&descr[0]);
3373 + /* And mark it as end of list. */
3374 + descr[i-1].eol = 1;
3376 + /* Start with the first descriptor in the list. */
3377 + up->cur_rec_descr = 0;
3378 + up->rec_context_descr.next = 0;
3379 + up->rec_context_descr.saved_data
3380 + = (dma_descr_data *)virt_to_phys(&descr[up->cur_rec_descr]);
3381 + up->rec_context_descr.saved_data_buf = descr[up->cur_rec_descr].buf;
3383 + /* Start the DMA. */
3384 + DMA_START_CONTEXT(up->regi_dmain,
3385 + virt_to_phys(&up->rec_context_descr));
3387 + /* Input DMA should be running now. */
3392 +static void start_receive(struct uart_cris_port *up)
3394 + reg_scope_instances regi_dmain = up->regi_dmain;
3396 + start_recv_dma(up);
3401 +static void start_transmitter(struct uart_cris_port *up)
3404 + reg_scope_instances regi_dmaout = up->regi_dmaout;
3405 + if (regi_dmaout) {
3406 + for (i = 0; i < SERIAL_TX_DESCRIPTORS; i++) {
3407 + memset(&up->tr_descr[i], 0, sizeof(up->tr_descr[i]));
3408 + up->tr_descr[i].eol = 1;
3409 + up->tr_descr[i].intr = 1;
3410 + up->tr_descr[i].next = (dma_descr_data *)
3411 + virt_to_phys(&up->tr_descr[i+1]);
3413 + up->tr_descr[i-1].next = (dma_descr_data *)
3414 + virt_to_phys(&up->tr_descr[0]);
3415 + up->first_tx_descr = &up->tr_descr[0];
3418 + * We'll be counting up to up->last_tx_descr->next from
3419 + * up->first_tx_descr when starting DMA, so we should make
3420 + * them the same for the very first round. If instead we'd
3421 + * set last_tx_descr = first_tx_descr, we'd rely on
3422 + * accidentally working code and data as we'd take a pass over
3423 + * the first, unused, descriptor.
3425 + up->last_tx_descr = &up->tr_descr[i-1];
3426 + up->tx_started = 0;
3427 + up->tx_pending_chars = 0;
3431 +static int serial_cris_startup(struct uart_port *port)
3433 + struct uart_cris_port *up = (struct uart_cris_port *)port;
3434 + unsigned long flags;
3435 + reg_intr_vect_rw_mask intr_mask;
3436 + reg_ser_rw_intr_mask ser_intr_mask = {0};
3437 + reg_dma_rw_intr_mask dmain_intr_mask = {0};
3438 + reg_dma_rw_intr_mask dmaout_intr_mask = {0};
3439 + reg_dma_rw_cfg cfg = {.en = 1};
3440 + reg_scope_instances regi_dma;
3442 + spin_lock_irqsave(&up->port.lock, flags);
3444 + intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
3446 + dmain_intr_mask.data = dmain_intr_mask.in_eop = regk_dma_yes;
3447 + dmaout_intr_mask.data = regk_dma_yes;
3448 + if (!up->regi_dmain)
3449 + ser_intr_mask.dav = regk_ser_yes;
3451 + if (port->line == 0) {
3452 + if (request_irq(SER0_INTR_VECT, ser_interrupt,
3453 + IRQF_SHARED | IRQF_DISABLED, "ser0",
3454 + &serial_cris_ports[0]))
3455 + panic("irq ser0");
3456 + /* Enable the ser0 irq in global config. */
3457 + intr_mask.ser0 = 1;
3458 + /* Port ser0 can use dma6 for tx and dma7 for rx. */
3459 +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
3460 + if (request_irq(DMA6_INTR_VECT, dma_tr_interrupt,
3461 + IRQF_DISABLED, "serial 0 dma tr",
3462 + &serial_cris_ports[0]))
3463 + panic("irq ser0txdma");
3464 + crisv32_request_dma(6, "ser0", DMA_PANIC_ON_ERROR, 0,
3466 + /* Enable the dma6 irq in global config. */
3467 + intr_mask.dma6 = 1;
3469 +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
3470 + if (request_irq(DMA7_INTR_VECT, dma_rec_interrupt,
3471 + IRQF_DISABLED, "serial 0 dma rec",
3472 + &serial_cris_ports[0]))
3473 + panic("irq ser0rxdma");
3474 + crisv32_request_dma(7, "ser0", DMA_PANIC_ON_ERROR, 0,
3476 + /* Enable the dma7 irq in global config. */
3477 + intr_mask.dma7 = 1;
3479 + } else if (port->line == 1) {
3480 + if (request_irq(SER1_INTR_VECT, ser_interrupt,
3481 + IRQF_SHARED | IRQF_DISABLED, "ser1",
3482 + &serial_cris_ports[1]))
3483 + panic("irq ser1");
3484 + /* Enable the ser1 irq in global config. */
3485 + intr_mask.ser1 = 1;
3487 + /* Port ser1 can use dma4 for tx and dma5 for rx. */
3488 +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA4_OUT
3489 + if (request_irq(DMA4_INTR_VECT, dma_tr_interrupt,
3490 + IRQF_DISABLED, "serial 1 dma tr",
3491 + &serial_cris_ports[1]))
3492 + panic("irq ser1txdma");
3493 + crisv32_request_dma(4, "ser1", DMA_PANIC_ON_ERROR, 0,
3495 + /* Enable the dma4 irq in global config. */
3496 + intr_mask.dma4 = 1;
3498 +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA5_IN
3499 + if (request_irq(DMA5_INTR_VECT, dma_rec_interrupt,
3500 + IRQF_DISABLED, "serial 1 dma rec",
3501 + &serial_cris_ports[1]))
3502 + panic("irq ser1rxdma");
3503 + crisv32_request_dma(5, "ser1", DMA_PANIC_ON_ERROR, 0,
3505 + /* Enable the dma5 irq in global config. */
3506 + intr_mask.dma5 = 1;
3508 + } else if (port->line == 2) {
3509 + if (request_irq(SER2_INTR_VECT, ser_interrupt,
3510 + IRQF_SHARED | IRQF_DISABLED, "ser2",
3511 + &serial_cris_ports[2]))
3512 + panic("irq ser2");
3513 + /* Enable the ser2 irq in global config. */
3514 + intr_mask.ser2 = 1;
3516 + /* Port ser2 can use dma2 for tx and dma3 for rx. */
3517 +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
3518 + if (request_irq(DMA2_INTR_VECT, dma_tr_interrupt,
3519 + IRQF_DISABLED, "serial 2 dma tr",
3520 + &serial_cris_ports[2]))
3521 + panic("irq ser2txdma");
3522 + crisv32_request_dma(2, "ser2", DMA_PANIC_ON_ERROR, 0,
3524 + /* Enable the dma2 irq in global config. */
3525 + intr_mask.dma2 = 1;
3527 +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
3528 + if (request_irq(DMA3_INTR_VECT, dma_rec_interrupt,
3529 + IRQF_DISABLED, "serial 2 dma rec",
3530 + &serial_cris_ports[2]))
3531 + panic("irq ser2rxdma");
3532 + crisv32_request_dma(3, "ser2", DMA_PANIC_ON_ERROR, 0,
3534 + /* Enable the dma3 irq in global config. */
3535 + intr_mask.dma3 = 1;
3537 + } else if (port->line == 3) {
3538 + if (request_irq(SER3_INTR_VECT, ser_interrupt,
3539 + IRQF_SHARED | IRQF_DISABLED, "ser3",
3540 + &serial_cris_ports[3]))
3541 + panic("irq ser3" );
3542 + /* Enable the ser3 irq in global config. */
3543 + intr_mask.ser3 = 1;
3545 + /* Port ser3 can use dma8 for tx and dma9 for rx. */
3546 +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA8_OUT
3547 + if (request_irq(DMA8_INTR_VECT, dma_tr_interrupt,
3548 + IRQF_DISABLED, "serial 3 dma tr",
3549 + &serial_cris_ports[3]))
3550 + panic("irq ser3txdma");
3551 + crisv32_request_dma(8, "ser3", DMA_PANIC_ON_ERROR, 0,
3553 + /* Enable the dma2 irq in global config. */
3554 + intr_mask.dma8 = 1;
3556 +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA9_IN
3557 + if (request_irq(DMA9_INTR_VECT, dma_rec_interrupt,
3558 + IRQF_DISABLED, "serial 3 dma rec",
3559 + &serial_cris_ports[3]))
3560 + panic("irq ser3rxdma");
3561 + crisv32_request_dma(9, "ser3", DMA_PANIC_ON_ERROR, 0,
3563 + /* Enable the dma3 irq in global config. */
3564 + intr_mask.dma9 = 1;
3569 + * Reset the DMA channels and make sure their interrupts are cleared.
3572 + regi_dma = up->regi_dmain;
3574 + reg_dma_rw_ack_intr ack_intr = { 0 };
3575 + DMA_RESET(regi_dma);
3576 + /* Wait until reset cycle is complete. */
3577 + DMA_WAIT_UNTIL_RESET(regi_dma);
3578 + REG_WR(dma, regi_dma, rw_cfg, cfg);
3579 + /* Make sure the irqs are cleared. */
3580 + ack_intr.group = 1;
3581 + ack_intr.ctxt = 1;
3582 + ack_intr.data = 1;
3583 + ack_intr.in_eop = 1;
3584 + ack_intr.stream_cmd = 1;
3585 + REG_WR(dma, regi_dma, rw_ack_intr, ack_intr);
3587 + regi_dma = up->regi_dmaout;
3589 + reg_dma_rw_ack_intr ack_intr = { 0 };
3590 + DMA_RESET(regi_dma);
3591 + /* Wait until reset cycle is complete. */
3592 + DMA_WAIT_UNTIL_RESET(regi_dma);
3593 + REG_WR(dma, regi_dma, rw_cfg, cfg);
3594 + /* Make sure the irqs are cleared. */
3595 + ack_intr.group = 1;
3596 + ack_intr.ctxt = 1;
3597 + ack_intr.data = 1;
3598 + ack_intr.in_eop = 1;
3599 + ack_intr.stream_cmd = 1;
3600 + REG_WR(dma, regi_dma, rw_ack_intr, ack_intr);
3603 + REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
3604 + REG_WR(ser, up->regi_ser, rw_intr_mask, ser_intr_mask);
3605 + if (up->regi_dmain)
3606 + REG_WR(dma, up->regi_dmain, rw_intr_mask, dmain_intr_mask);
3607 + if (up->regi_dmaout)
3608 + REG_WR(dma, up->regi_dmaout, rw_intr_mask, dmaout_intr_mask);
3610 + start_receive(up);
3611 + start_transmitter(up);
3613 + serial_cris_set_mctrl(&up->port, up->port.mctrl);
3614 + spin_unlock_irqrestore(&up->port.lock, flags);
3619 +static void serial_cris_shutdown(struct uart_port *port)
3621 + struct uart_cris_port *up = (struct uart_cris_port *)port;
3622 + unsigned long flags;
3623 + reg_intr_vect_rw_mask intr_mask;
3625 + spin_lock_irqsave(&up->port.lock, flags);
3627 + intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
3628 + serial_cris_stop_tx(port);
3629 + serial_cris_stop_rx(port);
3631 + if (port->line == 0) {
3632 + intr_mask.ser0 = 0;
3633 + free_irq(SER0_INTR_VECT, &serial_cris_ports[0]);
3634 +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
3635 + intr_mask.dma6 = 0;
3636 + crisv32_free_dma(6);
3637 + free_irq(DMA6_INTR_VECT, &serial_cris_ports[0]);
3639 +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
3640 + intr_mask.dma7 = 0;
3641 + crisv32_free_dma(7);
3642 + free_irq(DMA7_INTR_VECT, &serial_cris_ports[0]);
3644 + } else if (port->line == 1) {
3645 + intr_mask.ser1 = 0;
3646 + free_irq(SER1_INTR_VECT, &serial_cris_ports[1]);
3647 +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA4_OUT
3648 + intr_mask.dma4 = 0;
3649 + crisv32_free_dma(4);
3650 + free_irq(DMA4_INTR_VECT, &serial_cris_ports[1]);
3652 +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA5_IN
3653 + intr_mask.dma5 = 0;
3654 + crisv32_free_dma(5);
3655 + free_irq(DMA5_INTR_VECT, &serial_cris_ports[1]);
3657 + } else if (port->line == 2) {
3658 + intr_mask.ser2 = 0;
3659 + free_irq(SER2_INTR_VECT, &serial_cris_ports[2]);
3660 +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
3661 + intr_mask.dma2 = 0;
3662 + crisv32_free_dma(2);
3663 + free_irq(DMA2_INTR_VECT, &serial_cris_ports[2]);
3665 +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
3666 + intr_mask.dma3 = 0;
3667 + crisv32_free_dma(3);
3668 + free_irq(DMA3_INTR_VECT, &serial_cris_ports[2]);
3670 + } else if (port->line == 3) {
3671 + intr_mask.ser3 = 0;
3672 + free_irq(SER3_INTR_VECT, &serial_cris_ports[3]);
3673 +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA8_OUT
3674 + intr_mask.dma8 = 0;
3675 + crisv32_free_dma(8);
3676 + free_irq(DMA8_INTR_VECT, &serial_cris_ports[3]);
3678 +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA9_IN
3679 + intr_mask.dma9 = 0;
3680 + crisv32_free_dma(9);
3681 + free_irq(DMA9_INTR_VECT, &serial_cris_ports[3]);
3685 + REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
3687 + serial_cris_set_mctrl(&up->port, up->port.mctrl);
3689 + if (up->regi_dmain) {
3690 + struct etrax_recv_buffer *rb;
3691 + struct etrax_recv_buffer *rb_next;
3693 + struct dma_descr_data *descr;
3696 + * In case of DMA and receive errors, there might be pending
3697 + * receive buffers still linked here and not flushed upwards.
3700 + for (rb = up->first_recv_buffer; rb != NULL; rb = rb_next) {
3701 + rb_next = rb->next;
3704 + up->first_recv_buffer = NULL;
3705 + up->last_recv_buffer = NULL;
3708 + * Also release buffers that were attached to the DMA
3709 + * before we shut down the hardware above.
3711 + for (i = 0, descr = up->rec_descr;
3712 + i < SERIAL_RECV_DESCRIPTORS;
3714 + if (descr[i].buf) {
3715 + rb = phys_to_virt((u32) descr[i].buf)
3718 + descr[i].buf = NULL;
3722 + spin_unlock_irqrestore(&up->port.lock, flags);
3727 +serial_cris_set_termios(struct uart_port *port, struct termios *termios,
3728 + struct termios *old)
3730 + struct uart_cris_port *up = (struct uart_cris_port *)port;
3731 + unsigned long flags;
3732 + reg_ser_rw_xoff xoff;
3733 + reg_ser_rw_xoff_clr xoff_clr = {0};
3734 + reg_ser_rw_tr_ctrl tx_ctrl = {0};
3735 + reg_ser_rw_tr_dma_en tx_dma_en = {0};
3736 + reg_ser_rw_rec_ctrl rx_ctrl = {0};
3737 + reg_ser_rw_tr_baud_div tx_baud_div = {0};
3738 + reg_ser_rw_rec_baud_div rx_baud_div = {0};
3739 + reg_ser_r_stat_din rstat;
3743 + termios->c_cflag == old->c_cflag &&
3744 + termios->c_iflag == old->c_iflag)
3747 + /* Start with default settings and then fill in changes. */
3749 + /* Tx: 8 bit, no/even parity, 1 stop bit, no cts. */
3750 + tx_ctrl.base_freq = regk_ser_f29_493;
3753 +#ifdef CONFIG_ETRAX_RS485
3754 + if (up->rs485.enabled && (up->port_type != TYPE_485FD)) {
3755 + tx_ctrl.auto_rts = regk_ser_yes;
3758 + tx_ctrl.auto_rts = regk_ser_no;
3760 + tx_ctrl.auto_cts = 0;
3761 + /* Rx: 8 bit, no/even parity. */
3762 + if (up->regi_dmain) {
3763 + rx_ctrl.dma_mode = 1;
3764 + rx_ctrl.auto_eop = 1;
3766 + rx_ctrl.dma_err = regk_ser_stop;
3767 + rx_ctrl.sampling = regk_ser_majority;
3768 + rx_ctrl.timeout = 1;
3770 +#ifdef CONFIG_ETRAX_RS485
3771 + if (up->rs485.enabled && (up->port_type != TYPE_485FD)) {
3772 +# ifdef CONFIG_ETRAX_RS485_DISABLE_RECEIVER
3773 + rx_ctrl.half_duplex = regk_ser_yes;
3775 + rx_ctrl.rts_n = up->rs485.rts_after_sent ?
3776 + regk_ser_active : regk_ser_inactive;
3777 + } else if (up->port_type == TYPE_485FD) {
3778 + rx_ctrl.rts_n = regk_ser_active;
3781 + rx_ctrl.rts_n = regk_ser_inactive;
3783 + /* Common for tx and rx: 8N1. */
3784 + tx_ctrl.data_bits = regk_ser_bits8;
3785 + rx_ctrl.data_bits = regk_ser_bits8;
3786 + tx_ctrl.par = regk_ser_even;
3787 + rx_ctrl.par = regk_ser_even;
3788 + tx_ctrl.par_en = regk_ser_no;
3789 + rx_ctrl.par_en = regk_ser_no;
3791 + tx_ctrl.stop_bits = regk_ser_bits1;
3794 + /* Change baud-rate and write it to the hardware. */
3796 + /* baud_clock = base_freq / (divisor*8)
3797 + * divisor = base_freq / (baud_clock * 8)
3798 + * base_freq is either:
3799 + * off, ext, 29.493MHz, 32.000 MHz, 32.768 MHz or 100 MHz
3800 + * 20.493MHz is used for standard baudrates
3804 + * For the console port we keep the original baudrate here. Not very
3807 + if ((port != console_port) || old)
3808 + baud = uart_get_baud_rate(port, termios, old, 0,
3809 + port->uartclk / 8);
3811 + baud = console_baud;
3813 + tx_baud_div.div = 29493000 / (8 * baud);
3814 + /* Rx uses same as tx. */
3815 + rx_baud_div.div = tx_baud_div.div;
3816 + rx_ctrl.base_freq = tx_ctrl.base_freq;
3818 + if ((termios->c_cflag & CSIZE) == CS7) {
3819 + /* Set 7 bit mode. */
3820 + tx_ctrl.data_bits = regk_ser_bits7;
3821 + rx_ctrl.data_bits = regk_ser_bits7;
3824 + if (termios->c_cflag & CSTOPB) {
3825 + /* Set 2 stop bit mode. */
3826 + tx_ctrl.stop_bits = regk_ser_bits2;
3829 + if (termios->c_cflag & PARENB) {
3830 + /* Enable parity. */
3831 + tx_ctrl.par_en = regk_ser_yes;
3832 + rx_ctrl.par_en = regk_ser_yes;
3835 + if (termios->c_cflag & CMSPAR) {
3836 + if (termios->c_cflag & PARODD) {
3837 + /* Set mark parity if PARODD and CMSPAR. */
3838 + tx_ctrl.par = regk_ser_mark;
3839 + rx_ctrl.par = regk_ser_mark;
3841 + tx_ctrl.par = regk_ser_space;
3842 + rx_ctrl.par = regk_ser_space;
3845 + if (termios->c_cflag & PARODD) {
3846 + /* Set odd parity. */
3847 + tx_ctrl.par = regk_ser_odd;
3848 + rx_ctrl.par = regk_ser_odd;
3852 + if (termios->c_cflag & CRTSCTS) {
3853 + /* Enable automatic CTS handling. */
3854 + tx_ctrl.auto_cts = regk_ser_yes;
3857 + /* Make sure the tx and rx are enabled. */
3858 + tx_ctrl.en = regk_ser_yes;
3859 + rx_ctrl.en = regk_ser_yes;
3862 + * Wait for tr_idle in case a character is being output, so it won't
3863 + * be damaged by the changes we do below. It seems the termios
3864 + * changes "sometimes" (we can't see e.g. a tcsetattr TCSANOW
3865 + * parameter here) should take place no matter what state. However,
3866 + * in case we should wait, we may have a non-empty transmitter state
3867 + * as we tell the upper layers that we're all done when we've passed
3868 + * characters to the hardware, but we don't wait for them being
3869 + * actually shifted out.
3871 + spin_lock_irqsave(&port->lock, flags);
3874 + * None of our interrupts re-enable DMA, so it's thankfully ok to
3875 + * disable it once, outside the loop.
3878 + REG_WR(ser, up->regi_ser, rw_tr_dma_en, tx_dma_en);
3881 + * Make sure we have integrity between the read r_stat status
3882 + * and us writing the registers below, but don't busy-wait
3883 + * with interrupts off. We need to keep the port lock though
3884 + * (if we go SMP), so nobody else writes characters.
3886 + local_irq_restore(flags);
3887 + local_irq_save(flags);
3888 + rstat = REG_RD(ser, up->regi_ser, r_stat_din);
3889 + } while (!rstat.tr_idle);
3891 + /* Actually write the control regs (if modified) to the hardware. */
3893 + uart_update_timeout(port, termios->c_cflag, port->uartclk/8);
3894 + MODIFY_REG(up->regi_ser, rw_rec_baud_div, rx_baud_div);
3895 + MODIFY_REG(up->regi_ser, rw_rec_ctrl, rx_ctrl);
3897 + MODIFY_REG(up->regi_ser, rw_tr_baud_div, tx_baud_div);
3898 + MODIFY_REG(up->regi_ser, rw_tr_ctrl, tx_ctrl);
3900 + tx_dma_en.en = up->regi_dmaout != 0;
3901 + REG_WR(ser, up->regi_ser, rw_tr_dma_en, tx_dma_en);
3903 + xoff = REG_RD(ser, up->regi_ser, rw_xoff);
3905 + if (up->port.info && (up->port.info->tty->termios->c_iflag & IXON)) {
3906 + xoff.chr = STOP_CHAR(up->port.info->tty);
3907 + xoff.automatic = regk_ser_yes;
3909 + xoff.automatic = regk_ser_no;
3911 + MODIFY_REG(up->regi_ser, rw_xoff, xoff);
3914 + * Make sure we don't start in an automatically shut-off state due to
3915 + * a previous early exit.
3918 + REG_WR(ser, up->regi_ser, rw_xoff_clr, xoff_clr);
3920 + serial_cris_set_mctrl(&up->port, up->port.mctrl);
3921 + spin_unlock_irqrestore(&up->port.lock, flags);
3924 +static const char *
3925 +serial_cris_type(struct uart_port *port)
3930 +static void serial_cris_release_port(struct uart_port *port)
3934 +static int serial_cris_request_port(struct uart_port *port)
3939 +static void serial_cris_config_port(struct uart_port *port, int flags)
3941 + struct uart_cris_port *up = (struct uart_cris_port *)port;
3942 + up->port.type = PORT_CRIS;
3945 +#if defined(CONFIG_ETRAX_RS485)
3947 +static void cris_set_rs485_mode(struct uart_cris_port* up) {
3948 + reg_ser_rw_tr_ctrl tr_ctrl;
3949 + reg_ser_rw_rec_ctrl rec_ctrl;
3950 + reg_scope_instances regi_ser = up->regi_ser;
3952 + if (up->port_type == TYPE_485FD)
3953 + /* We do not want to change anything if we are in 485FD mode */
3956 + tr_ctrl = REG_RD(ser, regi_ser, rw_tr_ctrl);
3957 + rec_ctrl = REG_RD(ser, regi_ser, rw_rec_ctrl);
3959 + /* Set port in RS-485 mode */
3960 + if (up->rs485.enabled) {
3961 + tr_ctrl.auto_rts = regk_ser_yes;
3962 + rec_ctrl.rts_n = up->rs485.rts_after_sent ?
3963 + regk_ser_active : regk_ser_inactive;
3964 +#ifdef CONFIG_ETRAX_RS485_DISABLE_RECEIVER
3965 + rec_ctrl.half_duplex = regk_ser_yes;
3968 + /* Set port to RS-232 mode */
3970 + rec_ctrl.rts_n = regk_ser_inactive;
3971 + tr_ctrl.auto_rts = regk_ser_no;
3972 + rec_ctrl.half_duplex = regk_ser_no;
3975 + REG_WR(ser, regi_ser, rw_tr_ctrl, tr_ctrl);
3976 + REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
3979 +/* Enable/disable RS-485 mode on selected port. */
3981 +cris_enable_rs485(struct uart_cris_port* up, struct rs485_control *r)
3983 + if (up->port_type == TYPE_485FD)
3984 + /* Port in 485FD mode can not chage mode */
3987 + up->rs485.enabled = 0x1 & r->enabled;
3988 + up->rs485.rts_on_send = 0x01 & r->rts_on_send;
3989 + up->rs485.rts_after_sent = 0x01 & r->rts_after_sent;
3990 + up->rs485.delay_rts_before_send = r->delay_rts_before_send;
3992 + cris_set_rs485_mode(up);
3998 +/* Enable RS485 mode on port and send the data. Port will stay
3999 + * in 485 mode after the data has been sent.
4002 +cris_write_rs485(struct uart_cris_port* up, const unsigned char *buf,
4005 + up->rs485.enabled = 1;
4007 + /* Set the port in RS485 mode */
4008 + cris_set_rs485_mode(up);
4010 + /* Send the data */
4011 + count = serial_cris_driver.tty_driver->write(up->port.info->tty, buf, count);
4016 +#endif /* CONFIG_ETRAX_RS485 */
4018 +static int serial_cris_ioctl(struct uart_port *port, unsigned int cmd,
4019 + unsigned long arg)
4021 + struct uart_cris_port *up = (struct uart_cris_port *)port;
4024 +#if defined(CONFIG_ETRAX_RS485)
4025 + case TIOCSERSETRS485: {
4026 + struct rs485_control rs485ctrl;
4027 + if (copy_from_user(&rs485ctrl, (struct rs485_control*) arg,
4028 + sizeof(rs485ctrl)))
4031 + return cris_enable_rs485(up, &rs485ctrl);
4034 + case TIOCSERWRRS485: {
4035 + struct rs485_write rs485wr;
4036 + if (copy_from_user(&rs485wr, (struct rs485_write*)arg,
4040 + return cris_write_rs485(up, rs485wr.outc, rs485wr.outc_size);
4044 + return -ENOIOCTLCMD;
4050 +static const struct uart_ops serial_cris_pops = {
4051 + .tx_empty = serial_cris_tx_empty,
4052 + .set_mctrl = serial_cris_set_mctrl,
4053 + .get_mctrl = serial_cris_get_mctrl,
4054 + .stop_tx = serial_cris_stop_tx,
4055 + .start_tx = serial_cris_start_tx,
4056 + .send_xchar = serial_cris_send_xchar,
4057 + .stop_rx = serial_cris_stop_rx,
4058 + .enable_ms = serial_cris_enable_ms,
4059 + .break_ctl = serial_cris_break_ctl,
4060 + .startup = serial_cris_startup,
4061 + .shutdown = serial_cris_shutdown,
4062 + .set_termios = serial_cris_set_termios,
4063 + .type = serial_cris_type,
4064 + .release_port = serial_cris_release_port,
4065 + .request_port = serial_cris_request_port,
4066 + .config_port = serial_cris_config_port,
4067 + .ioctl = serial_cris_ioctl,
4071 + * It's too easy to break CONFIG_ETRAX_DEBUG_PORT_NULL and the
4072 + * no-config choices by adding and moving code to before a necessary
4073 + * early exit in all functions for the special case of
4074 + * up->regi_ser == 0. This collection of dummy functions lets us
4075 + * avoid that. Maybe there should be a generic table of dummy serial
4079 +static unsigned int serial_cris_tx_empty_dummy(struct uart_port *port)
4081 + return TIOCSER_TEMT;
4084 +static void serial_cris_set_mctrl_dummy(struct uart_port *port,
4085 + unsigned int mctrl)
4089 +static unsigned int serial_cris_get_mctrl_dummy(struct uart_port *port)
4094 +static void serial_cris_stop_tx_dummy(struct uart_port *port)
4098 +static void serial_cris_start_tx_dummy(struct uart_port *port)
4100 + /* Discard outbound characters. */
4101 + struct uart_cris_port *up = (struct uart_cris_port *)port;
4102 + struct circ_buf *xmit = &up->port.info->xmit;
4103 + xmit->tail = xmit->head;
4104 + uart_write_wakeup(port);
4107 +#define serial_cris_stop_rx_dummy serial_cris_stop_tx_dummy
4109 +#define serial_cris_enable_ms_dummy serial_cris_stop_tx_dummy
4111 +static void serial_cris_break_ctl_dummy(struct uart_port *port,
4116 +static int serial_cris_startup_dummy(struct uart_port *port)
4121 +#define serial_cris_shutdown_dummy serial_cris_stop_tx_dummy
4124 +serial_cris_set_termios_dummy(struct uart_port *port, struct termios *termios,
4125 + struct termios *old)
4129 +#define serial_cris_release_port_dummy serial_cris_stop_tx_dummy
4130 +#define serial_cris_request_port_dummy serial_cris_startup_dummy
4132 +static const struct uart_ops serial_cris_dummy_pops = {
4134 + * We *could* save one or two of those with different
4135 + * signature by casting and knowledge of the ABI, but it's
4136 + * just not worth the maintenance headache.
4137 + * For the ones we don't define here, the default (usually meaning
4138 + * "unimplemented") makes sense.
4140 + .tx_empty = serial_cris_tx_empty_dummy,
4141 + .set_mctrl = serial_cris_set_mctrl_dummy,
4142 + .get_mctrl = serial_cris_get_mctrl_dummy,
4143 + .stop_tx = serial_cris_stop_tx_dummy,
4144 + .start_tx = serial_cris_start_tx_dummy,
4145 + .stop_rx = serial_cris_stop_rx_dummy,
4146 + .enable_ms = serial_cris_enable_ms_dummy,
4147 + .break_ctl = serial_cris_break_ctl_dummy,
4148 + .startup = serial_cris_startup_dummy,
4149 + .shutdown = serial_cris_shutdown_dummy,
4150 + .set_termios = serial_cris_set_termios_dummy,
4152 + /* This one we keep the same. */
4153 + .type = serial_cris_type,
4155 + .release_port = serial_cris_release_port_dummy,
4156 + .request_port = serial_cris_request_port_dummy,
4159 + * This one we keep the same too, as long as it doesn't do
4160 + * anything else but to set the type.
4162 + .config_port = serial_cris_config_port,
4165 +static void cris_serial_port_init(struct uart_port *port, int line)
4167 + struct uart_cris_port *up = (struct uart_cris_port *)port;
4168 + static int first = 1;
4170 + if (up->initialized)
4172 + up->initialized = 1;
4173 + port->line = line;
4174 + spin_lock_init(&port->lock);
4176 + up->regi_ser == 0 ? &serial_cris_dummy_pops :
4177 + &serial_cris_pops;
4178 + port->irq = up->irq;
4179 + port->iobase = up->regi_ser ? up->regi_ser : 1;
4180 + port->uartclk = 29493000;
4183 + * We can't fit any more than 255 here (unsigned char), though
4184 + * actually UART_XMIT_SIZE characters could be pending output (if it
4185 + * wasn't for the single test in transmit_chars_dma). At time of this
4186 + * writing, the definition of "fifosize" is here the amount of
4187 + * characters that can be pending output after a start_tx call until
4188 + * tx_empty returns 1: see serial_core.c:uart_wait_until_sent. This
4189 + * matters for timeout calculations unfortunately, but keeping larger
4190 + * amounts at the DMA wouldn't win much so let's just play nice.
4192 + port->fifosize = 255;
4193 + port->flags = UPF_BOOT_AUTOCONF;
4195 +#ifdef CONFIG_ETRAX_RS485
4196 + /* Set sane defaults. */
4197 + up->rs485.rts_on_send = 0;
4198 + up->rs485.rts_after_sent = 1;
4199 + up->rs485.delay_rts_before_send = 0;
4200 + if (up->port_type > TYPE_232)
4201 + up->rs485.enabled = 1;
4203 + up->rs485.enabled = 0;
4208 +#ifdef CONFIG_ETRAX_SERIAL_PORT0
4211 +#ifdef CONFIG_ETRAX_SERIAL_PORT1
4214 +#ifdef CONFIG_ETRAX_SERIAL_PORT2
4217 +#ifdef CONFIG_ETRAX_SERIAL_PORT3
4223 +static int __init serial_cris_init(void)
4226 + reg_ser_rw_rec_ctrl rec_ctrl;
4227 + printk(KERN_INFO "Serial: CRISv32 driver $Revision: 1.78 $ ");
4229 + ret = uart_register_driver(&serial_cris_driver);
4233 + for (i = 0; i < UART_NR; i++) {
4234 + if (serial_cris_ports[i].used) {
4235 +#ifdef CONFIG_ETRAX_RS485
4236 + /* Make sure that the RTS pin stays low when allocating
4237 + * pins for a port in 485 mode.
4239 + if (serial_cris_ports[i].port_type > TYPE_232) {
4240 + rec_ctrl = REG_RD(ser, serial_cris_ports[i].regi_ser, rw_rec_ctrl);
4241 + rec_ctrl.rts_n = regk_ser_active;
4242 + REG_WR(ser, serial_cris_ports[i].regi_ser, rw_rec_ctrl, rec_ctrl);
4245 + switch (serial_cris_ports[i].regi_ser) {
4247 + if (crisv32_pinmux_alloc_fixed(pinmux_ser1)) {
4248 + printk("Failed to allocate pins for ser1, disable port\n");
4249 + serial_cris_ports[i].used = 0;
4254 + if (crisv32_pinmux_alloc_fixed(pinmux_ser2)) {
4255 + printk("Failed to allocate pins for ser2, disable port\n");
4256 + serial_cris_ports[i].used = 0;
4261 + if (crisv32_pinmux_alloc_fixed(pinmux_ser3)) {
4262 + printk("Failed to allocate pins for ser3, disable port\n");
4263 + serial_cris_ports[i].used = 0;
4269 + struct uart_port *port = &serial_cris_ports[i].port;
4270 + cris_console.index = i;
4271 + cris_serial_port_init(port, i);
4272 + uart_add_one_port(&serial_cris_driver, port);
4280 +static void __exit serial_cris_exit(void)
4283 + for (i = 0; i < UART_NR; i++)
4284 + if (serial_cris_ports[i].used) {
4285 + switch (serial_cris_ports[i].regi_ser) {
4287 + crisv32_pinmux_dealloc_fixed(pinmux_ser1);
4290 + crisv32_pinmux_dealloc_fixed(pinmux_ser2);
4293 + crisv32_pinmux_dealloc_fixed(pinmux_ser3);
4296 + uart_remove_one_port(&serial_cris_driver,
4297 + &serial_cris_ports[i].port);
4299 + uart_unregister_driver(&serial_cris_driver);
4302 +module_init(serial_cris_init);
4303 +module_exit(serial_cris_exit);
4304 --- linux-2.6.19.2.orig/drivers/usb/host/hc_crisv10.c 2007-01-10 20:10:37.000000000 +0100
4305 +++ linux-2.6.19.2.dev/drivers/usb/host/hc-crisv10.c 2007-02-26 20:58:29.000000000 +0100
4308 - * usb-host.c: ETRAX 100LX USB Host Controller Driver (HCD)
4310 - * Copyright (c) 2002, 2003 Axis Communications AB.
4311 + * ETRAX 100LX USB Host Controller Driver
4313 + * Copyright (C) 2005, 2006 Axis Communications AB
4315 + * Author: Konrad Eriksson <konrad.eriksson@axis.se>
4319 +#include <linux/module.h>
4320 #include <linux/kernel.h>
4321 -#include <linux/delay.h>
4322 -#include <linux/ioport.h>
4323 -#include <linux/sched.h>
4324 -#include <linux/slab.h>
4325 -#include <linux/errno.h>
4326 -#include <linux/unistd.h>
4327 -#include <linux/interrupt.h>
4328 #include <linux/init.h>
4329 -#include <linux/list.h>
4330 +#include <linux/moduleparam.h>
4331 #include <linux/spinlock.h>
4332 +#include <linux/usb.h>
4333 +#include <linux/platform_device.h>
4335 -#include <asm/uaccess.h>
4337 #include <asm/irq.h>
4338 -#include <asm/dma.h>
4339 -#include <asm/system.h>
4340 -#include <asm/arch/svinto.h>
4341 +#include <asm/arch/dma.h>
4342 +#include <asm/arch/io_interface_mux.h>
4344 -#include <linux/usb.h>
4345 -/* Ugly include because we don't live with the other host drivers. */
4346 -#include <../drivers/usb/core/hcd.h>
4347 -#include <../drivers/usb/core/usb.h>
4349 -#include "hc_crisv10.h"
4350 +#include "../core/hcd.h"
4351 +#include "../core/hub.h"
4352 +#include "hc-crisv10.h"
4353 +#include "hc-cris-dbg.h"
4356 +/***************************************************************************/
4357 +/***************************************************************************/
4358 +/* Host Controller settings */
4359 +/***************************************************************************/
4360 +/***************************************************************************/
4362 +#define VERSION "1.00"
4363 +#define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
4364 +#define DESCRIPTION "ETRAX 100LX USB Host Controller"
4366 #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
4367 #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
4368 #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
4370 -static const char *usb_hcd_version = "$Revision: 1.2 $";
4373 -#define KERN_DEBUG ""
4376 -#undef USB_DEBUG_RH
4377 -#undef USB_DEBUG_EPID
4378 -#undef USB_DEBUG_SB
4379 -#undef USB_DEBUG_DESC
4380 -#undef USB_DEBUG_URB
4381 -#undef USB_DEBUG_TRACE
4382 -#undef USB_DEBUG_BULK
4383 -#undef USB_DEBUG_CTRL
4384 -#undef USB_DEBUG_INTR
4385 -#undef USB_DEBUG_ISOC
4387 -#ifdef USB_DEBUG_RH
4388 -#define dbg_rh(format, arg...) printk(KERN_DEBUG __FILE__ ": (RH) " format "\n" , ## arg)
4390 -#define dbg_rh(format, arg...) do {} while (0)
4393 -#ifdef USB_DEBUG_EPID
4394 -#define dbg_epid(format, arg...) printk(KERN_DEBUG __FILE__ ": (EPID) " format "\n" , ## arg)
4396 -#define dbg_epid(format, arg...) do {} while (0)
4399 -#ifdef USB_DEBUG_SB
4400 -#define dbg_sb(format, arg...) printk(KERN_DEBUG __FILE__ ": (SB) " format "\n" , ## arg)
4402 -#define dbg_sb(format, arg...) do {} while (0)
4405 -#ifdef USB_DEBUG_CTRL
4406 -#define dbg_ctrl(format, arg...) printk(KERN_DEBUG __FILE__ ": (CTRL) " format "\n" , ## arg)
4408 -#define dbg_ctrl(format, arg...) do {} while (0)
4411 -#ifdef USB_DEBUG_BULK
4412 -#define dbg_bulk(format, arg...) printk(KERN_DEBUG __FILE__ ": (BULK) " format "\n" , ## arg)
4414 -#define dbg_bulk(format, arg...) do {} while (0)
4417 -#ifdef USB_DEBUG_INTR
4418 -#define dbg_intr(format, arg...) printk(KERN_DEBUG __FILE__ ": (INTR) " format "\n" , ## arg)
4420 -#define dbg_intr(format, arg...) do {} while (0)
4423 -#ifdef USB_DEBUG_ISOC
4424 -#define dbg_isoc(format, arg...) printk(KERN_DEBUG __FILE__ ": (ISOC) " format "\n" , ## arg)
4426 -#define dbg_isoc(format, arg...) do {} while (0)
4429 -#ifdef USB_DEBUG_TRACE
4430 -#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
4431 -#define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
4433 -#define DBFENTER do {} while (0)
4434 -#define DBFEXIT do {} while (0)
4437 -#define usb_pipeslow(pipe) (((pipe) >> 26) & 1)
4439 -/*-------------------------------------------------------------------
4441 - -------------------------------------------------------------------*/
4443 -static __u8 root_hub_dev_des[] =
4445 - 0x12, /* __u8 bLength; */
4446 - 0x01, /* __u8 bDescriptorType; Device */
4447 - 0x00, /* __le16 bcdUSB; v1.0 */
4449 - 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
4450 - 0x00, /* __u8 bDeviceSubClass; */
4451 - 0x00, /* __u8 bDeviceProtocol; */
4452 - 0x08, /* __u8 bMaxPacketSize0; 8 Bytes */
4453 - 0x00, /* __le16 idVendor; */
4455 - 0x00, /* __le16 idProduct; */
4457 - 0x00, /* __le16 bcdDevice; */
4459 - 0x00, /* __u8 iManufacturer; */
4460 - 0x02, /* __u8 iProduct; */
4461 - 0x01, /* __u8 iSerialNumber; */
4462 - 0x01 /* __u8 bNumConfigurations; */
4465 -/* Configuration descriptor */
4466 -static __u8 root_hub_config_des[] =
4468 - 0x09, /* __u8 bLength; */
4469 - 0x02, /* __u8 bDescriptorType; Configuration */
4470 - 0x19, /* __le16 wTotalLength; */
4472 - 0x01, /* __u8 bNumInterfaces; */
4473 - 0x01, /* __u8 bConfigurationValue; */
4474 - 0x00, /* __u8 iConfiguration; */
4475 - 0x40, /* __u8 bmAttributes; Bit 7: Bus-powered */
4476 - 0x00, /* __u8 MaxPower; */
4479 - 0x09, /* __u8 if_bLength; */
4480 - 0x04, /* __u8 if_bDescriptorType; Interface */
4481 - 0x00, /* __u8 if_bInterfaceNumber; */
4482 - 0x00, /* __u8 if_bAlternateSetting; */
4483 - 0x01, /* __u8 if_bNumEndpoints; */
4484 - 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
4485 - 0x00, /* __u8 if_bInterfaceSubClass; */
4486 - 0x00, /* __u8 if_bInterfaceProtocol; */
4487 - 0x00, /* __u8 if_iInterface; */
4490 - 0x07, /* __u8 ep_bLength; */
4491 - 0x05, /* __u8 ep_bDescriptorType; Endpoint */
4492 - 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
4493 - 0x03, /* __u8 ep_bmAttributes; Interrupt */
4494 - 0x08, /* __le16 ep_wMaxPacketSize; 8 Bytes */
4496 - 0xff /* __u8 ep_bInterval; 255 ms */
4499 -static __u8 root_hub_hub_des[] =
4501 - 0x09, /* __u8 bLength; */
4502 - 0x29, /* __u8 bDescriptorType; Hub-descriptor */
4503 - 0x02, /* __u8 bNbrPorts; */
4504 - 0x00, /* __u16 wHubCharacteristics; */
4506 - 0x01, /* __u8 bPwrOn2pwrGood; 2ms */
4507 - 0x00, /* __u8 bHubContrCurrent; 0 mA */
4508 - 0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
4509 - 0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
4512 -static DEFINE_TIMER(bulk_start_timer, NULL, 0, 0);
4513 -static DEFINE_TIMER(bulk_eot_timer, NULL, 0, 0);
4515 -/* We want the start timer to expire before the eot timer, because the former might start
4516 - traffic, thus making it unnecessary for the latter to time out. */
4517 -#define BULK_START_TIMER_INTERVAL (HZ/10) /* 100 ms */
4518 -#define BULK_EOT_TIMER_INTERVAL (HZ/10+2) /* 120 ms */
4520 -#define OK(x) len = (x); dbg_rh("OK(%d): line: %d", x, __LINE__); break
4521 -#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
4522 -{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
4524 -#define SLAB_FLAG (in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL)
4525 -#define KMALLOC_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
4527 -/* Most helpful debugging aid */
4528 -#define assert(expr) ((void) ((expr) ? 0 : (err("assert failed at line %d",__LINE__))))
4530 -/* Alternative assert define which stops after a failed assert. */
4532 -#define assert(expr) \
4535 - err("assert failed at line %d",__LINE__); \
4541 +/* Number of physical ports in Etrax 100LX */
4542 +#define USB_ROOT_HUB_PORTS 2
4544 -/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it dynamically?
4545 - To adjust it dynamically we would have to get an interrupt when we reach the end
4546 - of the rx descriptor list, or when we get close to the end, and then allocate more
4549 -#define NBR_OF_RX_DESC 512
4550 -#define RX_DESC_BUF_SIZE 1024
4551 -#define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
4552 +const char hc_name[] = "hc-crisv10";
4553 +const char product_desc[] = DESCRIPTION;
4555 /* The number of epids is, among other things, used for pre-allocating
4556 ctrl, bulk and isoc EP descriptors (one for each epid).
4557 @@ -221,4332 +53,4632 @@
4558 #define NBR_OF_EPIDS 32
4560 /* Support interrupt traffic intervals up to 128 ms. */
4561 -#define MAX_INTR_INTERVAL 128
4562 +#define MAX_INTR_INTERVAL 128
4564 -/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP table
4565 - must be "invalid". By this we mean that we shouldn't care about epid attentions
4566 - for this epid, or at least handle them differently from epid attentions for "valid"
4567 - epids. This define determines which one to use (don't change it). */
4568 -#define INVALID_EPID 31
4569 +/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
4570 + table must be "invalid". By this we mean that we shouldn't care about epid
4571 + attentions for this epid, or at least handle them differently from epid
4572 + attentions for "valid" epids. This define determines which one to use
4573 + (don't change it). */
4574 +#define INVALID_EPID 31
4575 /* A special epid for the bulk dummys. */
4576 -#define DUMMY_EPID 30
4578 -/* This is just a software cache for the valid entries in R_USB_EPT_DATA. */
4579 -static __u32 epid_usage_bitmask;
4581 -/* A bitfield to keep information on in/out traffic is needed to uniquely identify
4582 - an endpoint on a device, since the most significant bit which indicates traffic
4583 - direction is lacking in the ep_id field (ETRAX epids can handle both in and
4584 - out traffic on endpoints that are otherwise identical). The USB framework, however,
4585 - relies on them to be handled separately. For example, bulk IN and OUT urbs cannot
4586 - be queued in the same list, since they would block each other. */
4587 -static __u32 epid_out_traffic;
4589 -/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
4590 - Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be cache aligned. */
4591 -static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
4592 -static volatile USB_IN_Desc_t RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
4594 -/* Pointers into RxDescList. */
4595 -static volatile USB_IN_Desc_t *myNextRxDesc;
4596 -static volatile USB_IN_Desc_t *myLastRxDesc;
4597 -static volatile USB_IN_Desc_t *myPrevRxDesc;
4599 -/* EP descriptors must be 32-bit aligned. */
4600 -static volatile USB_EP_Desc_t TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4601 -static volatile USB_EP_Desc_t TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4602 -/* After each enabled bulk EP (IN or OUT) we put two disabled EP descriptors with the eol flag set,
4603 - causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
4604 - gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
4605 - EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
4607 -static volatile USB_EP_Desc_t TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
4609 -static volatile USB_EP_Desc_t TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4610 -static volatile USB_SB_Desc_t TxIsocSB_zout __attribute__ ((aligned (4)));
4612 -static volatile USB_EP_Desc_t TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
4613 -static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4)));
4615 -/* A zout transfer makes a memory access at the address of its buf pointer, which means that setting
4616 - this buf pointer to 0 will cause an access to the flash. In addition to this, setting sw_len to 0
4617 - results in a 16/32 bytes (depending on DMA burst size) transfer. Instead, we set it to 1, and point
4618 - it to this buffer. */
4619 -static int zout_buffer[4] __attribute__ ((aligned (4)));
4620 +#define DUMMY_EPID 30
4622 -/* Cache for allocating new EP and SB descriptors. */
4623 -static kmem_cache_t *usb_desc_cache;
4624 +/* Module settings */
4626 -/* Cache for the registers allocated in the top half. */
4627 -static kmem_cache_t *top_half_reg_cache;
4628 +MODULE_DESCRIPTION(DESCRIPTION);
4629 +MODULE_LICENSE("GPL");
4630 +MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
4632 -/* Cache for the data allocated in the isoc descr top half. */
4633 -static kmem_cache_t *isoc_compl_cache;
4635 -static struct usb_bus *etrax_usb_bus;
4636 +/* Module parameters */
4638 -/* This is a circular (double-linked) list of the active urbs for each epid.
4639 - The head is never removed, and new urbs are linked onto the list as
4640 - urb_entry_t elements. Don't reference urb_list directly; use the wrapper
4641 - functions instead. Note that working with these lists might require spinlock
4643 -static struct list_head urb_list[NBR_OF_EPIDS];
4644 +/* 0 = No ports enabled
4645 + 1 = Only port 1 enabled (on board ethernet on devboard)
4646 + 2 = Only port 2 enabled (external connector on devboard)
4647 + 3 = Both ports enabled
4649 +static unsigned int ports = 3;
4650 +module_param(ports, uint, S_IRUGO);
4651 +MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
4653 -/* Read about the need and usage of this lock in submit_ctrl_urb. */
4654 -static spinlock_t urb_list_lock;
4656 -/* Used when unlinking asynchronously. */
4657 -static struct list_head urb_unlink_list;
4658 +/***************************************************************************/
4659 +/***************************************************************************/
4660 +/* Shared global variables for this module */
4661 +/***************************************************************************/
4662 +/***************************************************************************/
4664 -/* for returning string descriptors in UTF-16LE */
4665 -static int ascii2utf (char *ascii, __u8 *utf, int utfmax)
4668 +/* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
4669 +static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4671 - for (retval = 0; *ascii && utfmax > 1; utfmax -= 2, retval += 2) {
4672 - *utf++ = *ascii++ & 0x7f;
4677 +static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4679 -static int usb_root_hub_string (int id, int serial, char *type, __u8 *data, int len)
4682 +/* EP descriptor lists for period transfers. Must be 32-bit aligned. */
4683 +static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
4684 +static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
4686 - // assert (len > (2 * (sizeof (buf) + 1)));
4687 - // assert (strlen (type) <= 8);
4688 +static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4689 +static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
4693 - *data++ = 4; *data++ = 3; /* 4 bytes data */
4694 - *data++ = 0; *data++ = 0; /* some language id */
4698 - } else if (id == 1) {
4699 - sprintf (buf, "%x", serial);
4701 - // product description
4702 - } else if (id == 2) {
4703 - sprintf (buf, "USB %s Root Hub", type);
4705 - // id 3 == vendor description
4707 - // unsupported IDs --> "stall"
4711 - data [0] = 2 + ascii2utf (buf, data + 2, len - 2);
4715 +static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
4717 -/* Wrappers around the list functions (include/linux/list.h). */
4718 +/* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
4719 + causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
4720 + gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
4721 + EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
4723 +static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
4725 -static inline int urb_list_empty(int epid)
4726 +/* List of URB pointers, where each points to the active URB for a epid.
4727 + For Bulk, Ctrl and Intr this means which URB that currently is added to
4728 + DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
4729 + URB has completed is the queue examined and the first URB in queue is
4730 + removed and moved to the activeUrbList while its state change to STARTED and
4731 + its transfer(s) gets added to DMA list (exception Isoc where URBs enter
4732 + state STARTED directly and added transfers added to DMA lists). */
4733 +static struct urb *activeUrbList[NBR_OF_EPIDS];
4735 +/* Additional software state info for each epid */
4736 +static struct etrax_epid epid_state[NBR_OF_EPIDS];
4738 +/* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
4739 + even if there is new data waiting to be processed */
4740 +static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
4741 +static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
4743 +/* We want the start timer to expire before the eot timer, because the former
4744 + might start traffic, thus making it unnecessary for the latter to time
4746 +#define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
4747 +#define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
4749 +/* Delay before a URB completion happen when it's scheduled to be delayed */
4750 +#define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
4752 +/* Simplifying macros for checking software state info of a epid */
4753 +/* ----------------------------------------------------------------------- */
4754 +#define epid_inuse(epid) epid_state[epid].inuse
4755 +#define epid_out_traffic(epid) epid_state[epid].out_traffic
4756 +#define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
4757 +#define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
4760 +/***************************************************************************/
4761 +/***************************************************************************/
4762 +/* DEBUG FUNCTIONS */
4763 +/***************************************************************************/
4764 +/***************************************************************************/
4765 +/* Note that these functions are always available in their "__" variants,
4766 + for use in error situations. The "__" missing variants are controlled by
4767 + the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
4768 +static void __dump_urb(struct urb* purb)
4770 - return list_empty(&urb_list[epid]);
4771 + struct crisv10_urb_priv *urb_priv = purb->hcpriv;
4774 + urb_num = urb_priv->urb_num;
4776 + printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
4777 + printk("dev :0x%08lx\n", (unsigned long)purb->dev);
4778 + printk("pipe :0x%08x\n", purb->pipe);
4779 + printk("status :%d\n", purb->status);
4780 + printk("transfer_flags :0x%08x\n", purb->transfer_flags);
4781 + printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
4782 + printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
4783 + printk("actual_length :%d\n", purb->actual_length);
4784 + printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
4785 + printk("start_frame :%d\n", purb->start_frame);
4786 + printk("number_of_packets :%d\n", purb->number_of_packets);
4787 + printk("interval :%d\n", purb->interval);
4788 + printk("error_count :%d\n", purb->error_count);
4789 + printk("context :0x%08lx\n", (unsigned long)purb->context);
4790 + printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
4793 +static void __dump_in_desc(volatile struct USB_IN_Desc *in)
4795 + printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
4796 + printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
4797 + printk(" command : 0x%04x\n", in->command);
4798 + printk(" next : 0x%08lx\n", in->next);
4799 + printk(" buf : 0x%08lx\n", in->buf);
4800 + printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
4801 + printk(" status : 0x%04x\n\n", in->status);
4804 +static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
4806 + char tt = (sb->command & 0x30) >> 4;
4811 + tt_string = "zout";
4817 + tt_string = "out";
4820 + tt_string = "setup";
4823 + tt_string = "unknown (weird)";
4826 + printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
4827 + printk(" command:0x%04x (", sb->command);
4828 + printk("rem:%d ", (sb->command & 0x3f00) >> 8);
4829 + printk("full:%d ", (sb->command & 0x40) >> 6);
4830 + printk("tt:%d(%s) ", tt, tt_string);
4831 + printk("intr:%d ", (sb->command & 0x8) >> 3);
4832 + printk("eot:%d ", (sb->command & 0x2) >> 1);
4833 + printk("eol:%d)", sb->command & 0x1);
4834 + printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
4835 + printk(" next:0x%08lx", sb->next);
4836 + printk(" buf:0x%08lx\n", sb->buf);
4840 +static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
4842 + printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
4843 + printk(" command:0x%04x (", ep->command);
4844 + printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
4845 + printk("enable:%d ", (ep->command & 0x10) >> 4);
4846 + printk("intr:%d ", (ep->command & 0x8) >> 3);
4847 + printk("eof:%d ", (ep->command & 0x2) >> 1);
4848 + printk("eol:%d)", ep->command & 0x1);
4849 + printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
4850 + printk(" next:0x%08lx", ep->next);
4851 + printk(" sub:0x%08lx\n", ep->sub);
4854 -/* Returns first urb for this epid, or NULL if list is empty. */
4855 -static inline struct urb *urb_list_first(int epid)
4856 +static inline void __dump_ep_list(int pipe_type)
4858 - struct urb *first_urb = 0;
4859 + volatile struct USB_EP_Desc *ep;
4860 + volatile struct USB_EP_Desc *first_ep;
4861 + volatile struct USB_SB_Desc *sb;
4863 + switch (pipe_type)
4866 + first_ep = &TxBulkEPList[0];
4868 + case PIPE_CONTROL:
4869 + first_ep = &TxCtrlEPList[0];
4871 + case PIPE_INTERRUPT:
4872 + first_ep = &TxIntrEPList[0];
4874 + case PIPE_ISOCHRONOUS:
4875 + first_ep = &TxIsocEPList[0];
4878 + warn("Cannot dump unknown traffic type");
4883 + printk("\n\nDumping EP list...\n\n");
4886 + __dump_ep_desc(ep);
4887 + /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
4888 + sb = ep->sub ? phys_to_virt(ep->sub) : 0;
4890 + __dump_sb_desc(sb);
4891 + sb = sb->next ? phys_to_virt(sb->next) : 0;
4893 + ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
4895 - if (!urb_list_empty(epid)) {
4896 - /* Get the first urb (i.e. head->next). */
4897 - urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
4898 - first_urb = urb_entry->urb;
4901 + } while (ep != first_ep);
4904 -/* Adds an urb_entry last in the list for this epid. */
4905 -static inline void urb_list_add(struct urb *urb, int epid)
4906 +static inline void __dump_ept_data(int epid)
4908 - urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), KMALLOC_FLAG);
4909 - assert(urb_entry);
4910 + unsigned long flags;
4911 + __u32 r_usb_ept_data;
4913 - urb_entry->urb = urb;
4914 - list_add_tail(&urb_entry->list, &urb_list[epid]);
4915 + if (epid < 0 || epid > 31) {
4916 + printk("Cannot dump ept data for invalid epid %d\n", epid);
4920 + local_irq_save(flags);
4921 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
4923 + r_usb_ept_data = *R_USB_EPT_DATA;
4924 + local_irq_restore(flags);
4926 + printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
4927 + if (r_usb_ept_data == 0) {
4928 + /* No need for more detailed printing. */
4931 + printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
4932 + printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
4933 + printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
4934 + printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
4935 + printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
4936 + printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
4937 + printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
4938 + printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
4939 + printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
4940 + printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
4941 + printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
4942 + printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
4945 +static inline void __dump_ept_data_iso(int epid)
4947 + unsigned long flags;
4950 + if (epid < 0 || epid > 31) {
4951 + printk("Cannot dump ept data for invalid epid %d\n", epid);
4955 + local_irq_save(flags);
4956 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
4958 + ept_data = *R_USB_EPT_DATA_ISO;
4959 + local_irq_restore(flags);
4961 + printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
4962 + if (ept_data == 0) {
4963 + /* No need for more detailed printing. */
4966 + printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
4968 + printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
4970 + printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
4972 + printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
4974 + printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
4976 + printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
4980 -/* Search through the list for an element that contains this urb. (The list
4981 - is expected to be short and the one we are about to delete will often be
4982 - the first in the list.) */
4983 -static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid)
4984 +static inline void __dump_ept_data_list(void)
4986 - struct list_head *entry;
4987 - struct list_head *tmp;
4988 - urb_entry_t *urb_entry;
4990 - list_for_each_safe(entry, tmp, &urb_list[epid]) {
4991 - urb_entry = list_entry(entry, urb_entry_t, list);
4992 - assert(urb_entry);
4993 - assert(urb_entry->urb);
4995 - if (urb_entry->urb == urb) {
5003 -/* Delete an urb from the list. */
5004 -static inline void urb_list_del(struct urb *urb, int epid)
5006 - urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
5007 - assert(urb_entry);
5008 + printk("Dumping the whole R_USB_EPT_DATA list\n");
5010 - /* Delete entry and free. */
5011 - list_del(&urb_entry->list);
5013 + for (i = 0; i < 32; i++) {
5014 + __dump_ept_data(i);
5018 +static void debug_epid(int epid) {
5021 + if(epid_isoc(epid)) {
5022 + __dump_ept_data_iso(epid);
5024 + __dump_ept_data(epid);
5027 + printk("Bulk:\n");
5028 + for(i = 0; i < 32; i++) {
5029 + if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
5031 + printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
5035 + printk("Ctrl:\n");
5036 + for(i = 0; i < 32; i++) {
5037 + if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
5039 + printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
5043 + printk("Intr:\n");
5044 + for(i = 0; i < MAX_INTR_INTERVAL; i++) {
5045 + if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
5047 + printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
5051 + printk("Isoc:\n");
5052 + for(i = 0; i < 32; i++) {
5053 + if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
5055 + printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
5059 + __dump_ept_data_list();
5060 + __dump_ep_list(PIPE_INTERRUPT);
5066 +char* hcd_status_to_str(__u8 bUsbStatus) {
5067 + static char hcd_status_str[128];
5068 + hcd_status_str[0] = '\0';
5069 + if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
5070 + strcat(hcd_status_str, "ourun ");
5072 + if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
5073 + strcat(hcd_status_str, "perror ");
5075 + if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
5076 + strcat(hcd_status_str, "device_mode ");
5078 + if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
5079 + strcat(hcd_status_str, "host_mode ");
5081 + if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
5082 + strcat(hcd_status_str, "started ");
5084 + if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
5085 + strcat(hcd_status_str, "running ");
5087 + return hcd_status_str;
5091 +char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
5092 + static char sblist_to_str_buff[128];
5093 + char tmp[32], tmp2[32];
5094 + sblist_to_str_buff[0] = '\0';
5095 + while(sb_desc != NULL) {
5096 + switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
5097 + case 0: sprintf(tmp, "zout"); break;
5098 + case 1: sprintf(tmp, "in"); break;
5099 + case 2: sprintf(tmp, "out"); break;
5100 + case 3: sprintf(tmp, "setup"); break;
5102 + sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
5103 + strcat(sblist_to_str_buff, tmp2);
5104 + if(sb_desc->next != 0) {
5105 + sb_desc = phys_to_virt(sb_desc->next);
5110 + return sblist_to_str_buff;
5113 +char* port_status_to_str(__u16 wPortStatus) {
5114 + static char port_status_str[128];
5115 + port_status_str[0] = '\0';
5116 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
5117 + strcat(port_status_str, "connected ");
5119 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
5120 + strcat(port_status_str, "enabled ");
5122 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
5123 + strcat(port_status_str, "suspended ");
5125 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
5126 + strcat(port_status_str, "reset ");
5128 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
5129 + strcat(port_status_str, "full-speed ");
5131 + strcat(port_status_str, "low-speed ");
5133 + return port_status_str;
5137 +char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
5138 + static char endpoint_to_str_buff[128];
5140 + int epnum = ed->bEndpointAddress & 0x0F;
5141 + int dir = ed->bEndpointAddress & 0x80;
5142 + int type = ed->bmAttributes & 0x03;
5143 + endpoint_to_str_buff[0] = '\0';
5144 + sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
5147 + sprintf(tmp, " ctrl");
5150 + sprintf(tmp, " isoc");
5153 + sprintf(tmp, " bulk");
5156 + sprintf(tmp, " intr");
5159 + strcat(endpoint_to_str_buff, tmp);
5161 + sprintf(tmp, " in");
5163 + sprintf(tmp, " out");
5165 + strcat(endpoint_to_str_buff, tmp);
5167 + return endpoint_to_str_buff;
5170 +/* Debug helper functions for Transfer Controller */
5171 +char* pipe_to_str(unsigned int pipe) {
5172 + static char pipe_to_str_buff[128];
5174 + sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
5175 + sprintf(tmp, " type:%s", str_type(pipe));
5176 + strcat(pipe_to_str_buff, tmp);
5178 + sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
5179 + strcat(pipe_to_str_buff, tmp);
5180 + sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
5181 + strcat(pipe_to_str_buff, tmp);
5182 + return pipe_to_str_buff;
5185 -/* Move an urb to the end of the list. */
5186 -static inline void urb_list_move_last(struct urb *urb, int epid)
5188 - urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
5189 - assert(urb_entry);
5191 - list_move_tail(&urb_entry->list, &urb_list[epid]);
5194 -/* Get the next urb in the list. */
5195 -static inline struct urb *urb_list_next(struct urb *urb, int epid)
5197 - urb_entry_t *urb_entry = __urb_list_entry(urb, epid);
5198 +#define USB_DEBUG_DESC 1
5200 - assert(urb_entry);
5201 +#ifdef USB_DEBUG_DESC
5202 +#define dump_in_desc(x) __dump_in_desc(x)
5203 +#define dump_sb_desc(...) __dump_sb_desc(...)
5204 +#define dump_ep_desc(x) __dump_ep_desc(x)
5205 +#define dump_ept_data(x) __dump_ept_data(x)
5207 +#define dump_in_desc(...) do {} while (0)
5208 +#define dump_sb_desc(...) do {} while (0)
5209 +#define dump_ep_desc(...) do {} while (0)
5212 - if (urb_entry->list.next != &urb_list[epid]) {
5213 - struct list_head *elem = urb_entry->list.next;
5214 - urb_entry = list_entry(elem, urb_entry_t, list);
5215 - return urb_entry->urb;
5221 +/* Uncomment this to enable massive function call trace
5222 + #define USB_DEBUG_TRACE */
5224 +#ifdef USB_DEBUG_TRACE
5225 +#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
5226 +#define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
5228 +#define DBFENTER do {} while (0)
5229 +#define DBFEXIT do {} while (0)
5232 -/* For debug purposes only. */
5233 -static inline void urb_list_dump(int epid)
5235 - struct list_head *entry;
5236 - struct list_head *tmp;
5237 - urb_entry_t *urb_entry;
5240 - info("Dumping urb list for epid %d", epid);
5242 - list_for_each_safe(entry, tmp, &urb_list[epid]) {
5243 - urb_entry = list_entry(entry, urb_entry_t, list);
5244 - info(" entry %d, urb = 0x%lx", i, (unsigned long)urb_entry->urb);
5247 +#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
5248 +{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
5250 -static void init_rx_buffers(void);
5251 -static int etrax_rh_unlink_urb(struct urb *urb);
5252 -static void etrax_rh_send_irq(struct urb *urb);
5253 -static void etrax_rh_init_int_timer(struct urb *urb);
5254 -static void etrax_rh_int_timer_do(unsigned long ptr);
5256 -static int etrax_usb_setup_epid(struct urb *urb);
5257 -static int etrax_usb_lookup_epid(struct urb *urb);
5258 -static int etrax_usb_allocate_epid(void);
5259 -static void etrax_usb_free_epid(int epid);
5261 -static int etrax_remove_from_sb_list(struct urb *urb);
5263 -static void* etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size,
5264 - unsigned mem_flags, dma_addr_t *dma);
5265 -static void etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma);
5267 -static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid);
5268 -static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid);
5269 -static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid);
5270 -static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid);
5272 -static int etrax_usb_submit_bulk_urb(struct urb *urb);
5273 -static int etrax_usb_submit_ctrl_urb(struct urb *urb);
5274 -static int etrax_usb_submit_intr_urb(struct urb *urb);
5275 -static int etrax_usb_submit_isoc_urb(struct urb *urb);
5277 -static int etrax_usb_submit_urb(struct urb *urb, unsigned mem_flags);
5278 -static int etrax_usb_unlink_urb(struct urb *urb, int status);
5279 -static int etrax_usb_get_frame_number(struct usb_device *usb_dev);
5281 -static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc);
5282 -static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc);
5283 -static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc);
5284 -static void etrax_usb_hc_interrupt_bottom_half(void *data);
5286 -static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data);
5289 -/* The following is a list of interrupt handlers for the host controller interrupts we use.
5290 - They are called from etrax_usb_hc_interrupt_bottom_half. */
5291 -static void etrax_usb_hc_isoc_eof_interrupt(void);
5292 -static void etrax_usb_hc_bulk_eot_interrupt(int timer_induced);
5293 -static void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg);
5294 -static void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg);
5295 -static void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg);
5297 -static int etrax_rh_submit_urb (struct urb *urb);
5299 -/* Forward declaration needed because they are used in the rx interrupt routine. */
5300 -static void etrax_usb_complete_urb(struct urb *urb, int status);
5301 -static void etrax_usb_complete_bulk_urb(struct urb *urb, int status);
5302 -static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status);
5303 -static void etrax_usb_complete_intr_urb(struct urb *urb, int status);
5304 -static void etrax_usb_complete_isoc_urb(struct urb *urb, int status);
5305 +/* Most helpful debugging aid */
5306 +#define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
5308 -static int etrax_usb_hc_init(void);
5309 -static void etrax_usb_hc_cleanup(void);
5311 -static struct usb_operations etrax_usb_device_operations =
5313 - .get_frame_number = etrax_usb_get_frame_number,
5314 - .submit_urb = etrax_usb_submit_urb,
5315 - .unlink_urb = etrax_usb_unlink_urb,
5316 - .buffer_alloc = etrax_usb_buffer_alloc,
5317 - .buffer_free = etrax_usb_buffer_free
5319 +/***************************************************************************/
5320 +/***************************************************************************/
5321 +/* Forward declarations */
5322 +/***************************************************************************/
5323 +/***************************************************************************/
5324 +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
5325 +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
5326 +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
5327 +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
5329 +void rh_port_status_change(__u16[]);
5330 +int rh_clear_port_feature(__u8, __u16);
5331 +int rh_set_port_feature(__u8, __u16);
5332 +static void rh_disable_port(unsigned int port);
5334 +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
5337 +static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
5339 +static void tc_free_epid(struct usb_host_endpoint *ep);
5340 +static int tc_allocate_epid(void);
5341 +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
5342 +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
5345 +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
5347 +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
5349 +static inline struct urb *urb_list_first(int epid);
5350 +static inline void urb_list_add(struct urb *urb, int epid,
5352 +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
5353 +static inline void urb_list_del(struct urb *urb, int epid);
5354 +static inline void urb_list_move_last(struct urb *urb, int epid);
5355 +static inline struct urb *urb_list_next(struct urb *urb, int epid);
5357 +int create_sb_for_urb(struct urb *urb, int mem_flags);
5358 +int init_intr_urb(struct urb *urb, int mem_flags);
5360 +static inline void etrax_epid_set(__u8 index, __u32 data);
5361 +static inline void etrax_epid_clear_error(__u8 index);
5362 +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
5364 +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
5365 +static inline __u32 etrax_epid_get(__u8 index);
5367 +/* We're accessing the same register position in Etrax so
5368 + when we do full access the internal difference doesn't matter */
5369 +#define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
5370 +#define etrax_epid_iso_get(index) etrax_epid_get(index)
5373 +static void tc_dma_process_isoc_urb(struct urb *urb);
5374 +static void tc_dma_process_queue(int epid);
5375 +static void tc_dma_unlink_intr_urb(struct urb *urb);
5376 +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
5377 +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
5379 +static void tc_bulk_start_timer_func(unsigned long dummy);
5380 +static void tc_bulk_eot_timer_func(unsigned long dummy);
5383 +/*************************************************************/
5384 +/*************************************************************/
5385 +/* Host Controler Driver block */
5386 +/*************************************************************/
5387 +/*************************************************************/
5389 +/* HCD operations */
5390 +static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
5391 +static int crisv10_hcd_reset(struct usb_hcd *);
5392 +static int crisv10_hcd_start(struct usb_hcd *);
5393 +static void crisv10_hcd_stop(struct usb_hcd *);
5395 +static int crisv10_hcd_suspend(struct device *, u32, u32);
5396 +static int crisv10_hcd_resume(struct device *, u32);
5397 +#endif /* CONFIG_PM */
5398 +static int crisv10_hcd_get_frame(struct usb_hcd *);
5400 +static int tc_urb_enqueue(struct usb_hcd *, struct usb_host_endpoint *ep, struct urb *, gfp_t mem_flags);
5401 +static int tc_urb_dequeue(struct usb_hcd *, struct urb *);
5402 +static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
5404 +static int rh_status_data_request(struct usb_hcd *, char *);
5405 +static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
5408 +static int crisv10_hcd_hub_suspend(struct usb_hcd *);
5409 +static int crisv10_hcd_hub_resume(struct usb_hcd *);
5410 +#endif /* CONFIG_PM */
5411 +#ifdef CONFIG_USB_OTG
5412 +static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
5413 +#endif /* CONFIG_USB_OTG */
5415 +/* host controller driver interface */
5416 +static const struct hc_driver crisv10_hc_driver =
5418 + .description = hc_name,
5419 + .product_desc = product_desc,
5420 + .hcd_priv_size = sizeof(struct crisv10_hcd),
5422 + /* Attaching IRQ handler manualy in probe() */
5423 + /* .irq = crisv10_hcd_irq, */
5425 + .flags = HCD_USB11,
5427 + /* called to init HCD and root hub */
5428 + .reset = crisv10_hcd_reset,
5429 + .start = crisv10_hcd_start,
5431 + /* cleanly make HCD stop writing memory and doing I/O */
5432 + .stop = crisv10_hcd_stop,
5434 + /* return current frame number */
5435 + .get_frame_number = crisv10_hcd_get_frame,
5438 + /* Manage i/o requests via the Transfer Controller */
5439 + .urb_enqueue = tc_urb_enqueue,
5440 + .urb_dequeue = tc_urb_dequeue,
5442 + /* hw synch, freeing endpoint resources that urb_dequeue can't */
5443 + .endpoint_disable = tc_endpoint_disable,
5446 + /* Root Hub support */
5447 + .hub_status_data = rh_status_data_request,
5448 + .hub_control = rh_control_request,
5450 + .hub_suspend = rh_suspend_request,
5451 + .hub_resume = rh_resume_request,
5452 +#endif /* CONFIG_PM */
5453 +#ifdef CONFIG_USB_OTG
5454 + .start_port_reset = crisv10_hcd_start_port_reset,
5455 +#endif /* CONFIG_USB_OTG */
5458 -/* Note that these functions are always available in their "__" variants, for use in
5459 - error situations. The "__" missing variants are controlled by the USB_DEBUG_DESC/
5460 - USB_DEBUG_URB macros. */
5461 -static void __dump_urb(struct urb* purb)
5463 - printk("\nurb :0x%08lx\n", (unsigned long)purb);
5464 - printk("dev :0x%08lx\n", (unsigned long)purb->dev);
5465 - printk("pipe :0x%08x\n", purb->pipe);
5466 - printk("status :%d\n", purb->status);
5467 - printk("transfer_flags :0x%08x\n", purb->transfer_flags);
5468 - printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
5469 - printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
5470 - printk("actual_length :%d\n", purb->actual_length);
5471 - printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
5472 - printk("start_frame :%d\n", purb->start_frame);
5473 - printk("number_of_packets :%d\n", purb->number_of_packets);
5474 - printk("interval :%d\n", purb->interval);
5475 - printk("error_count :%d\n", purb->error_count);
5476 - printk("context :0x%08lx\n", (unsigned long)purb->context);
5477 - printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
5480 -static void __dump_in_desc(volatile USB_IN_Desc_t *in)
5482 - printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
5483 - printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
5484 - printk(" command : 0x%04x\n", in->command);
5485 - printk(" next : 0x%08lx\n", in->next);
5486 - printk(" buf : 0x%08lx\n", in->buf);
5487 - printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
5488 - printk(" status : 0x%04x\n\n", in->status);
5491 + * conversion between pointers to a hcd and the corresponding
5495 -static void __dump_sb_desc(volatile USB_SB_Desc_t *sb)
5496 +static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
5498 - char tt = (sb->command & 0x30) >> 4;
5503 - tt_string = "zout";
5509 - tt_string = "out";
5512 - tt_string = "setup";
5515 - tt_string = "unknown (weird)";
5518 - printk("\n USB_SB_Desc at 0x%08lx\n", (unsigned long)sb);
5519 - printk(" command : 0x%04x\n", sb->command);
5520 - printk(" rem : %d\n", (sb->command & 0x3f00) >> 8);
5521 - printk(" full : %d\n", (sb->command & 0x40) >> 6);
5522 - printk(" tt : %d (%s)\n", tt, tt_string);
5523 - printk(" intr : %d\n", (sb->command & 0x8) >> 3);
5524 - printk(" eot : %d\n", (sb->command & 0x2) >> 1);
5525 - printk(" eol : %d\n", sb->command & 0x1);
5526 - printk(" sw_len : 0x%04x (%d)\n", sb->sw_len, sb->sw_len);
5527 - printk(" next : 0x%08lx\n", sb->next);
5528 - printk(" buf : 0x%08lx\n\n", sb->buf);
5529 + return (struct crisv10_hcd *) hcd->hcd_priv;
5533 -static void __dump_ep_desc(volatile USB_EP_Desc_t *ep)
5534 +static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
5536 - printk("\nUSB_EP_Desc at 0x%08lx\n", (unsigned long)ep);
5537 - printk(" command : 0x%04x\n", ep->command);
5538 - printk(" ep_id : %d\n", (ep->command & 0x1f00) >> 8);
5539 - printk(" enable : %d\n", (ep->command & 0x10) >> 4);
5540 - printk(" intr : %d\n", (ep->command & 0x8) >> 3);
5541 - printk(" eof : %d\n", (ep->command & 0x2) >> 1);
5542 - printk(" eol : %d\n", ep->command & 0x1);
5543 - printk(" hw_len : 0x%04x (%d)\n", ep->hw_len, ep->hw_len);
5544 - printk(" next : 0x%08lx\n", ep->next);
5545 - printk(" sub : 0x%08lx\n\n", ep->sub);
5546 + return container_of((void *) hcd, struct usb_hcd, hcd_priv);
5549 -static inline void __dump_ep_list(int pipe_type)
5550 +/* check if specified port is in use */
5551 +static inline int port_in_use(unsigned int port)
5553 - volatile USB_EP_Desc_t *ep;
5554 - volatile USB_EP_Desc_t *first_ep;
5555 - volatile USB_SB_Desc_t *sb;
5557 - switch (pipe_type)
5560 - first_ep = &TxBulkEPList[0];
5562 - case PIPE_CONTROL:
5563 - first_ep = &TxCtrlEPList[0];
5565 - case PIPE_INTERRUPT:
5566 - first_ep = &TxIntrEPList[0];
5568 - case PIPE_ISOCHRONOUS:
5569 - first_ep = &TxIsocEPList[0];
5572 - warn("Cannot dump unknown traffic type");
5577 - printk("\n\nDumping EP list...\n\n");
5580 - __dump_ep_desc(ep);
5581 - /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
5582 - sb = ep->sub ? phys_to_virt(ep->sub) : 0;
5584 - __dump_sb_desc(sb);
5585 - sb = sb->next ? phys_to_virt(sb->next) : 0;
5587 - ep = (volatile USB_EP_Desc_t *)(phys_to_virt(ep->next));
5589 - } while (ep != first_ep);
5590 + return ports & (1 << port);
5593 -static inline void __dump_ept_data(int epid)
5594 +/* number of ports in use */
5595 +static inline unsigned int num_ports(void)
5597 - unsigned long flags;
5598 - __u32 r_usb_ept_data;
5600 - if (epid < 0 || epid > 31) {
5601 - printk("Cannot dump ept data for invalid epid %d\n", epid);
5605 - save_flags(flags);
5607 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
5609 - r_usb_ept_data = *R_USB_EPT_DATA;
5610 - restore_flags(flags);
5612 - printk("\nR_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
5613 - if (r_usb_ept_data == 0) {
5614 - /* No need for more detailed printing. */
5617 - printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
5618 - printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
5619 - printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
5620 - printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
5621 - printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
5622 - printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
5623 - printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
5624 - printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
5625 - printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
5626 - printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
5627 - printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
5628 - printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
5629 + unsigned int i, num = 0;
5630 + for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
5631 + if (port_in_use(i))
5636 -static inline void __dump_ept_data_list(void)
5637 +/* map hub port number to the port number used internally by the HC */
5638 +static inline unsigned int map_port(unsigned int port)
5642 - printk("Dumping the whole R_USB_EPT_DATA list\n");
5644 - for (i = 0; i < 32; i++) {
5645 - __dump_ept_data(i);
5647 + unsigned int i, num = 0;
5648 + for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
5649 + if (port_in_use(i))
5650 + if (++num == port)
5654 -#ifdef USB_DEBUG_DESC
5655 -#define dump_in_desc(...) __dump_in_desc(...)
5656 -#define dump_sb_desc(...) __dump_sb_desc(...)
5657 -#define dump_ep_desc(...) __dump_ep_desc(...)
5659 -#define dump_in_desc(...) do {} while (0)
5660 -#define dump_sb_desc(...) do {} while (0)
5661 -#define dump_ep_desc(...) do {} while (0)
5664 -#ifdef USB_DEBUG_URB
5665 -#define dump_urb(x) __dump_urb(x)
5667 -#define dump_urb(x) do {} while (0)
5668 +/* size of descriptors in slab cache */
5670 +#define MAX(x, y) ((x) > (y) ? (x) : (y))
5673 -static void init_rx_buffers(void)
5678 +/******************************************************************/
5679 +/* Hardware Interrupt functions */
5680 +/******************************************************************/
5682 +/* Fast interrupt handler for HC */
5683 +static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
5685 + struct usb_hcd *hcd = vcd;
5686 + struct crisv10_irq_reg reg;
5688 + unsigned long flags;
5692 + ASSERT(hcd != NULL);
5695 + /* Turn of other interrupts while handling these sensitive cases */
5696 + local_irq_save(flags);
5698 + /* Read out which interrupts that are flaged */
5699 + irq_mask = *R_USB_IRQ_MASK_READ;
5700 + reg.r_usb_irq_mask_read = irq_mask;
5702 + /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
5703 + R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
5704 + clears the ourun and perror fields of R_USB_STATUS. */
5705 + reg.r_usb_status = *R_USB_STATUS;
5707 + /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
5709 + reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
5711 + /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
5712 + port_status interrupt. */
5713 + reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
5714 + reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
5716 + /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
5717 + /* Note: the lower 11 bits contain the actual frame number, sent with each
5719 + reg.r_usb_fm_number = *R_USB_FM_NUMBER;
5721 + /* Interrupts are handled in order of priority. */
5722 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
5723 + crisv10_hcd_port_status_irq(®);
5725 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
5726 + crisv10_hcd_epid_attn_irq(®);
5728 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
5729 + crisv10_hcd_ctl_status_irq(®);
5731 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
5732 + crisv10_hcd_isoc_eof_irq(®);
5734 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
5735 + /* Update/restart the bulk start timer since obviously the channel is
5737 + mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
5738 + /* Update/restart the bulk eot timer since we just received an bulk eot
5740 + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
5742 + /* Check for finished bulk transfers on epids */
5743 + check_finished_bulk_tx_epids(hcd, 0);
5745 + local_irq_restore(flags);
5748 + return IRQ_HANDLED;
5752 +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
5753 + struct usb_hcd *hcd = reg->hcd;
5754 + struct crisv10_urb_priv *urb_priv;
5758 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
5759 + if (test_bit(epid, (void *)®->r_usb_epid_attn)) {
5764 + if (epid == DUMMY_EPID || epid == INVALID_EPID) {
5765 + /* We definitely don't care about these ones. Besides, they are
5766 + always disabled, so any possible disabling caused by the
5767 + epid attention interrupt is irrelevant. */
5768 + warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid);
5772 + if(!epid_inuse(epid)) {
5773 + irq_err("Epid attention on epid:%d that isn't in use\n", epid);
5774 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5779 + /* Note that although there are separate R_USB_EPT_DATA and
5780 + R_USB_EPT_DATA_ISO registers, they are located at the same address and
5781 + are of the same size. In other words, this read should be ok for isoc
5783 + ept_data = etrax_epid_get(epid);
5784 + error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
5786 + /* Get the active URB for this epid. We blatantly assume
5787 + that only this URB could have caused the epid attention. */
5788 + urb = activeUrbList[epid];
5789 + if (urb == NULL) {
5790 + irq_err("Attention on epid:%d error:%d with no active URB.\n",
5791 + epid, error_code);
5792 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5797 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
5800 + /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
5801 + if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
5803 + /* Isoc traffic doesn't have error_count_in/error_count_out. */
5804 + if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
5805 + (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
5806 + IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
5807 + /* Check if URB allready is marked for late-finish, we can get
5808 + several 3rd error for Intr traffic when a device is unplugged */
5809 + if(urb_priv->later_data == NULL) {
5811 + irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
5812 + str_dir(urb->pipe), str_type(urb->pipe),
5813 + (unsigned int)urb, urb_priv->urb_num);
5815 + tc_finish_urb_later(hcd, urb, -EPROTO);
5818 + } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
5819 + irq_warn("Perror for epid:%d\n", epid);
5820 + printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
5821 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5825 + if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
5826 + /* invalid ep_id */
5827 + panic("Perror because of invalid epid."
5828 + " Deconfigured too early?");
5830 + /* past eof1, near eof, zout transfer, setup transfer */
5831 + /* Dump the urb and the relevant EP descriptor. */
5832 + panic("Something wrong with DMA descriptor contents."
5833 + " Too much traffic inserted?");
5835 + } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
5836 + /* buffer ourun */
5837 + printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
5838 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5842 - for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
5843 - RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
5844 - RxDescList[i].command = 0;
5845 - RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
5846 - RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
5847 - RxDescList[i].hw_len = 0;
5848 - RxDescList[i].status = 0;
5850 - /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as USB_IN_Desc
5851 - for the relevant fields.) */
5852 - prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
5853 + panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
5855 + irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
5856 + str_dir(urb->pipe), str_type(urb->pipe));
5857 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5862 + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5864 + /* Not really a protocol error, just says that the endpoint gave
5865 + a stall response. Note that error_code cannot be stall for isoc. */
5866 + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
5867 + panic("Isoc traffic cannot stall");
5870 - RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
5871 - RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
5872 - RxDescList[i].next = virt_to_phys(&RxDescList[0]);
5873 - RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
5874 - RxDescList[i].hw_len = 0;
5875 - RxDescList[i].status = 0;
5876 + tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
5877 + str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
5878 + tc_finish_urb(hcd, urb, -EPIPE);
5880 + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5882 + /* Two devices responded to a transaction request. Must be resolved
5883 + by software. FIXME: Reset ports? */
5884 + panic("Bus error for epid %d."
5885 + " Two devices responded to transaction request\n",
5888 + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5890 + /* DMA overrun or underrun. */
5891 + irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
5892 + str_dir(urb->pipe), str_type(urb->pipe));
5894 + /* It seems that error_code = buffer_error in
5895 + R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
5896 + are the same error. */
5897 + tc_finish_urb(hcd, urb, -EPROTO);
5899 + irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
5900 + str_dir(urb->pipe), str_type(urb->pipe));
5901 + dump_ept_data(epid);
5908 +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
5910 + __u16 port_reg[USB_ROOT_HUB_PORTS];
5912 + port_reg[0] = reg->r_usb_rh_port_status_1;
5913 + port_reg[1] = reg->r_usb_rh_port_status_2;
5914 + rh_port_status_change(port_reg);
5918 +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
5922 + struct crisv10_urb_priv *urb_priv;
5926 + for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
5928 + /* Only check epids that are in use, is valid and has SB list */
5929 + if (!epid_inuse(epid) || epid == INVALID_EPID ||
5930 + TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
5931 + /* Nothing here to see. */
5934 + ASSERT(epid_isoc(epid));
5936 + /* Get the active URB for this epid (if any). */
5937 + urb = activeUrbList[epid];
5939 + isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
5942 + if(!epid_out_traffic(epid)) {
5943 + /* Sanity check. */
5944 + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
5946 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
5949 + if (urb_priv->urb_state == NOT_STARTED) {
5950 + /* If ASAP is not set and urb->start_frame is the current frame,
5951 + start the transfer. */
5952 + if (!(urb->transfer_flags & URB_ISO_ASAP) &&
5953 + (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
5954 + /* EP should not be enabled if we're waiting for start_frame */
5955 + ASSERT((TxIsocEPList[epid].command &
5956 + IO_STATE(USB_EP_command, enable, yes)) == 0);
5958 + isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
5959 + TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
5961 + /* This urb is now active. */
5962 + urb_priv->urb_state = STARTED;
5972 +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
5974 + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
5977 + ASSERT(crisv10_hcd);
5979 + irq_dbg("ctr_status_irq, controller status: %s\n",
5980 + hcd_status_to_str(reg->r_usb_status));
5982 + /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
5983 + list for the corresponding epid? */
5984 + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
5985 + panic("USB controller got ourun.");
5987 + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
5989 + /* Before, etrax_usb_do_intr_recover was called on this epid if it was
5990 + an interrupt pipe. I don't see how re-enabling all EP descriptors
5991 + will help if there was a programming error. */
5992 + panic("USB controller got perror.");
5995 + /* Keep track of USB Controller, if it's running or not */
5996 + if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
5997 + crisv10_hcd->running = 1;
5999 + crisv10_hcd->running = 0;
6002 + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
6003 + /* We should never operate in device mode. */
6004 + panic("USB controller in device mode.");
6007 + /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
6008 + using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
6009 + set_bit(HCD_FLAG_SAW_IRQ, ®->hcd->flags);
6015 +/******************************************************************/
6016 +/* Host Controller interface functions */
6017 +/******************************************************************/
6019 +static inline void crisv10_ready_wait(void) {
6020 + volatile int timeout = 10000;
6021 + /* Check the busy bit of USB controller in Etrax */
6022 + while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
6024 + if(timeout == 0) {
6025 + warn("Timeout while waiting for USB controller to be idle\n");
6029 +/* reset host controller */
6030 +static int crisv10_hcd_reset(struct usb_hcd *hcd)
6033 + hcd_dbg(hcd, "reset\n");
6036 + /* Reset the USB interface. */
6039 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
6040 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6041 + IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
6048 +/* start host controller */
6049 +static int crisv10_hcd_start(struct usb_hcd *hcd)
6052 + hcd_dbg(hcd, "start\n");
6054 + crisv10_ready_wait();
6056 + /* Start processing of USB traffic. */
6058 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
6059 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6060 + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
6064 + hcd->state = HC_STATE_RUNNING;
6070 +/* stop host controller */
6071 +static void crisv10_hcd_stop(struct usb_hcd *hcd)
6074 + hcd_dbg(hcd, "stop\n");
6075 + crisv10_hcd_reset(hcd);
6079 +/* return the current frame number */
6080 +static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
6084 + return (*R_USB_FM_NUMBER & 0x7ff);
6087 +#ifdef CONFIG_USB_OTG
6089 +static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
6091 + return 0; /* no-op for now */
6094 +#endif /* CONFIG_USB_OTG */
6097 +/******************************************************************/
6098 +/* Root Hub functions */
6099 +/******************************************************************/
6101 +/* root hub status */
6102 +static const struct usb_hub_status rh_hub_status =
6108 +/* root hub descriptor */
6109 +static const u8 rh_hub_descr[] =
6111 + 0x09, /* bDescLength */
6112 + 0x29, /* bDescriptorType */
6113 + USB_ROOT_HUB_PORTS, /* bNbrPorts */
6114 + 0x00, /* wHubCharacteristics */
6116 + 0x01, /* bPwrOn2pwrGood */
6117 + 0x00, /* bHubContrCurrent */
6118 + 0x00, /* DeviceRemovable */
6119 + 0xff /* PortPwrCtrlMask */
6122 +/* Actual holder of root hub status*/
6123 +struct crisv10_rh rh;
6125 +/* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
6126 +int rh_init(void) {
6128 + /* Reset port status flags */
6129 + for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
6130 + rh.wPortChange[i] = 0;
6131 + rh.wPortStatusPrev[i] = 0;
6136 +#define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
6137 + (1<<USB_PORT_FEAT_ENABLE)|\
6138 + (1<<USB_PORT_FEAT_SUSPEND)|\
6139 + (1<<USB_PORT_FEAT_RESET))
6141 +/* Handle port status change interrupt (called from bottom part interrupt) */
6142 +void rh_port_status_change(__u16 port_reg[]) {
6146 + for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
6147 + /* Xor out changes since last read, masked for important flags */
6148 + wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
6149 + /* Or changes together with (if any) saved changes */
6150 + rh.wPortChange[i] |= wChange;
6151 + /* Save new status */
6152 + rh.wPortStatusPrev[i] = port_reg[i];
6155 + rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
6156 + port_status_to_str(wChange),
6157 + port_status_to_str(port_reg[i]));
6162 +/* Construct port status change bitmap for the root hub */
6163 +static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
6165 + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6170 + * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
6171 + * return bitmap indicating ports with status change
6174 + spin_lock(&crisv10_hcd->lock);
6175 + for (i = 1; i <= crisv10_hcd->num_ports; i++) {
6176 + if (rh.wPortChange[map_port(i)]) {
6178 + rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
6179 + port_status_to_str(rh.wPortChange[map_port(i)]),
6180 + port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
6183 + spin_unlock(&crisv10_hcd->lock);
6185 + return *buf == 0 ? 0 : 1;
6188 +/* Handle a control request for the root hub (called from hcd_driver) */
6189 +static int rh_control_request(struct usb_hcd *hcd,
6196 + struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6201 + switch (typeReq) {
6202 + case GetHubDescriptor:
6203 + rh_dbg("GetHubDescriptor\n");
6204 + len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
6205 + memcpy(buf, rh_hub_descr, len);
6206 + buf[2] = crisv10_hcd->num_ports;
6208 + case GetHubStatus:
6209 + rh_dbg("GetHubStatus\n");
6210 + len = min_t(unsigned int, sizeof rh_hub_status, wLength);
6211 + memcpy(buf, &rh_hub_status, len);
6213 + case GetPortStatus:
6214 + if (!wIndex || wIndex > crisv10_hcd->num_ports)
6216 + rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
6217 + port_status_to_str(rh.wPortChange[map_port(wIndex)]),
6218 + port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
6219 + *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
6220 + *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
6222 + case SetHubFeature:
6223 + rh_dbg("SetHubFeature\n");
6224 + case ClearHubFeature:
6225 + rh_dbg("ClearHubFeature\n");
6227 + case C_HUB_OVER_CURRENT:
6228 + case C_HUB_LOCAL_POWER:
6229 + rh_warn("Not implemented hub request:%d \n", typeReq);
6230 + /* not implemented */
6236 + case SetPortFeature:
6237 + if (!wIndex || wIndex > crisv10_hcd->num_ports)
6239 + if(rh_set_port_feature(map_port(wIndex), wValue))
6242 + case ClearPortFeature:
6243 + if (!wIndex || wIndex > crisv10_hcd->num_ports)
6245 + if(rh_clear_port_feature(map_port(wIndex), wValue))
6249 + rh_warn("Unknown hub request: %d\n", typeReq);
6257 +int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
6258 + __u8 bUsbCommand = 0;
6259 + switch(wFeature) {
6260 + case USB_PORT_FEAT_RESET:
6261 + rh_dbg("SetPortFeature: reset\n");
6262 + bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
6265 + case USB_PORT_FEAT_SUSPEND:
6266 + rh_dbg("SetPortFeature: suspend\n");
6267 + bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
6270 + case USB_PORT_FEAT_POWER:
6271 + rh_dbg("SetPortFeature: power\n");
6273 + case USB_PORT_FEAT_C_CONNECTION:
6274 + rh_dbg("SetPortFeature: c_connection\n");
6276 + case USB_PORT_FEAT_C_RESET:
6277 + rh_dbg("SetPortFeature: c_reset\n");
6279 + case USB_PORT_FEAT_C_OVER_CURRENT:
6280 + rh_dbg("SetPortFeature: c_over_current\n");
6284 + /* Select which port via the port_sel field */
6285 + bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
6287 + /* Make sure the controller isn't busy. */
6288 + crisv10_ready_wait();
6289 + /* Send out the actual command to the USB controller */
6290 + *R_USB_COMMAND = bUsbCommand;
6292 + /* If port reset then also bring USB controller into running state */
6293 + if(wFeature == USB_PORT_FEAT_RESET) {
6294 + /* Wait a while for controller to first become started after port reset */
6295 + udelay(12000); /* 12ms blocking wait */
6297 + /* Make sure the controller isn't busy. */
6298 + crisv10_ready_wait();
6300 + /* If all enabled ports were disabled the host controller goes down into
6301 + started mode, so we need to bring it back into the running state.
6302 + (This is safe even if it's already in the running state.) */
6304 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
6305 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6306 + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
6311 + rh_dbg("SetPortFeature: unknown feature\n");
6317 +int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
6318 + switch(wFeature) {
6319 + case USB_PORT_FEAT_ENABLE:
6320 + rh_dbg("ClearPortFeature: enable\n");
6321 + rh_disable_port(bPort);
6323 + case USB_PORT_FEAT_SUSPEND:
6324 + rh_dbg("ClearPortFeature: suspend\n");
6326 + case USB_PORT_FEAT_POWER:
6327 + rh_dbg("ClearPortFeature: power\n");
6330 + case USB_PORT_FEAT_C_ENABLE:
6331 + rh_dbg("ClearPortFeature: c_enable\n");
6333 + case USB_PORT_FEAT_C_SUSPEND:
6334 + rh_dbg("ClearPortFeature: c_suspend\n");
6336 + case USB_PORT_FEAT_C_CONNECTION:
6337 + rh_dbg("ClearPortFeature: c_connection\n");
6339 + case USB_PORT_FEAT_C_OVER_CURRENT:
6340 + rh_dbg("ClearPortFeature: c_over_current\n");
6342 + case USB_PORT_FEAT_C_RESET:
6343 + rh_dbg("ClearPortFeature: c_reset\n");
6346 + rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
6349 + rh_dbg("ClearPortFeature: unknown feature\n");
6357 +/* Handle a suspend request for the root hub (called from hcd_driver) */
6358 +static int rh_suspend_request(struct usb_hcd *hcd)
6360 + return 0; /* no-op for now */
6363 +/* Handle a resume request for the root hub (called from hcd_driver) */
6364 +static int rh_resume_request(struct usb_hcd *hcd)
6366 + return 0; /* no-op for now */
6368 +#endif /* CONFIG_PM */
6372 +/* Wrapper function for workaround port disable registers in USB controller */
6373 +static void rh_disable_port(unsigned int port) {
6374 + volatile int timeout = 10000;
6375 + volatile char* usb_portx_disable;
6378 + usb_portx_disable = R_USB_PORT1_DISABLE;
6381 + usb_portx_disable = R_USB_PORT2_DISABLE;
6384 + /* Invalid port index */
6387 + /* Set disable flag in special register */
6388 + *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
6389 + /* Wait until not enabled anymore */
6390 + while((rh.wPortStatusPrev[port] &
6391 + IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
6393 + if(timeout == 0) {
6394 + warn("Timeout while waiting for port %d to become disabled\n", port);
6396 + /* clear disable flag in special register */
6397 + *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
6398 + rh_info("Physical port %d disabled\n", port+1);
6402 +/******************************************************************/
6403 +/* Transfer Controller (TC) functions */
6404 +/******************************************************************/
6406 +/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
6408 + To adjust it dynamically we would have to get an interrupt when we reach
6409 + the end of the rx descriptor list, or when we get close to the end, and
6410 + then allocate more descriptors. */
6411 +#define NBR_OF_RX_DESC 512
6412 +#define RX_DESC_BUF_SIZE 1024
6413 +#define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
6415 - myNextRxDesc = &RxDescList[0];
6416 - myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
6417 - myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
6419 - *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
6420 - *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
6421 +/* Local variables for Transfer Controller */
6422 +/* --------------------------------------- */
6426 +/* This is a circular (double-linked) list of the active urbs for each epid.
6427 + The head is never removed, and new urbs are linked onto the list as
6428 + urb_entry_t elements. Don't reference urb_list directly; use the wrapper
6429 + functions instead (which includes spin_locks) */
6430 +static struct list_head urb_list[NBR_OF_EPIDS];
6432 -static void init_tx_bulk_ep(void)
6435 +/* Read about the need and usage of this lock in submit_ctrl_urb. */
6436 +/* Lock for URB lists for each EPID */
6437 +static spinlock_t urb_list_lock;
6440 +/* Lock for EPID array register (R_USB_EPT_x) in Etrax */
6441 +static spinlock_t etrax_epid_lock;
6443 - for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
6444 - CHECK_ALIGN(&TxBulkEPList[i]);
6445 - TxBulkEPList[i].hw_len = 0;
6446 - TxBulkEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
6447 - TxBulkEPList[i].sub = 0;
6448 - TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[i + 1]);
6450 - /* Initiate two EPs, disabled and with the eol flag set. No need for any
6451 - preserved epid. */
6453 - /* The first one has the intr flag set so we get an interrupt when the DMA
6454 - channel is about to become disabled. */
6455 - CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
6456 - TxBulkDummyEPList[i][0].hw_len = 0;
6457 - TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
6458 - IO_STATE(USB_EP_command, eol, yes) |
6459 - IO_STATE(USB_EP_command, intr, yes));
6460 - TxBulkDummyEPList[i][0].sub = 0;
6461 - TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
6463 - /* The second one. */
6464 - CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
6465 - TxBulkDummyEPList[i][1].hw_len = 0;
6466 - TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
6467 - IO_STATE(USB_EP_command, eol, yes));
6468 - TxBulkDummyEPList[i][1].sub = 0;
6469 - /* The last dummy's next pointer is the same as the current EP's next pointer. */
6470 - TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
6472 +/* Lock for dma8 sub0 handling */
6473 +static spinlock_t etrax_dma8_sub0_lock;
6475 - /* Configure the last one. */
6476 - CHECK_ALIGN(&TxBulkEPList[i]);
6477 - TxBulkEPList[i].hw_len = 0;
6478 - TxBulkEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) |
6479 - IO_FIELD(USB_EP_command, epid, i));
6480 - TxBulkEPList[i].sub = 0;
6481 - TxBulkEPList[i].next = virt_to_phys(&TxBulkEPList[0]);
6483 - /* No need configuring dummy EPs for the last one as it will never be used for
6484 - bulk traffic (i == INVALD_EPID at this point). */
6486 - /* Set up to start on the last EP so we will enable it when inserting traffic
6487 - for the first time (imitating the situation where the DMA has stopped
6488 - because there was no more traffic). */
6489 - *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
6490 - /* No point in starting the bulk channel yet.
6491 - *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
6494 +/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
6495 + Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
6497 +static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
6498 +static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
6500 -static void init_tx_ctrl_ep(void)
6503 +/* Pointers into RxDescList. */
6504 +static volatile struct USB_IN_Desc *myNextRxDesc;
6505 +static volatile struct USB_IN_Desc *myLastRxDesc;
6508 +/* A zout transfer makes a memory access at the address of its buf pointer,
6509 + which means that setting this buf pointer to 0 will cause an access to the
6510 + flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
6511 + (depending on DMA burst size) transfer.
6512 + Instead, we set it to 1, and point it to this buffer. */
6513 +static int zout_buffer[4] __attribute__ ((aligned (4)));
6515 - for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
6516 - CHECK_ALIGN(&TxCtrlEPList[i]);
6517 - TxCtrlEPList[i].hw_len = 0;
6518 - TxCtrlEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
6519 - TxCtrlEPList[i].sub = 0;
6520 - TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[i + 1]);
6522 +/* Cache for allocating new EP and SB descriptors. */
6523 +static kmem_cache_t *usb_desc_cache;
6525 - CHECK_ALIGN(&TxCtrlEPList[i]);
6526 - TxCtrlEPList[i].hw_len = 0;
6527 - TxCtrlEPList[i].command = (IO_STATE(USB_EP_command, eol, yes) |
6528 - IO_FIELD(USB_EP_command, epid, i));
6529 +/* Cache for the data allocated in the isoc descr top half. */
6530 +static kmem_cache_t *isoc_compl_cache;
6532 - TxCtrlEPList[i].sub = 0;
6533 - TxCtrlEPList[i].next = virt_to_phys(&TxCtrlEPList[0]);
6534 +/* Cache for the data allocated when delayed finishing of URBs */
6535 +static kmem_cache_t *later_data_cache;
6537 - *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[0]);
6538 - *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
6541 +/* Counter to keep track of how many Isoc EP we have sat up. Used to enable
6542 + and disable iso_eof interrupt. We only need these interrupts when we have
6543 + Isoc data endpoints (consumes CPU cycles).
6544 + FIXME: This could be more fine granular, so this interrupt is only enabled
6545 + when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
6546 +static int isoc_epid_counter;
6548 +/* Protecting wrapper functions for R_USB_EPT_x */
6549 +/* -------------------------------------------- */
6550 +static inline void etrax_epid_set(__u8 index, __u32 data) {
6551 + unsigned long flags;
6552 + spin_lock_irqsave(&etrax_epid_lock, flags);
6553 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6555 + *R_USB_EPT_DATA = data;
6556 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
6559 +static inline void etrax_epid_clear_error(__u8 index) {
6560 + unsigned long flags;
6561 + spin_lock_irqsave(&etrax_epid_lock, flags);
6562 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6564 + *R_USB_EPT_DATA &=
6565 + ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
6566 + IO_MASK(R_USB_EPT_DATA, error_count_out) |
6567 + IO_MASK(R_USB_EPT_DATA, error_code));
6568 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
6571 +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
6573 + unsigned long flags;
6574 + spin_lock_irqsave(&etrax_epid_lock, flags);
6575 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6578 + *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
6579 + *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
6581 + *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
6582 + *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
6584 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
6587 +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
6588 + unsigned long flags;
6590 + spin_lock_irqsave(&etrax_epid_lock, flags);
6591 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6594 + toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
6596 + toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
6598 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
6603 +static inline __u32 etrax_epid_get(__u8 index) {
6604 + unsigned long flags;
6606 + spin_lock_irqsave(&etrax_epid_lock, flags);
6607 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6609 + data = *R_USB_EPT_DATA;
6610 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
6617 +/* Main functions for Transfer Controller */
6618 +/* -------------------------------------- */
6620 +/* Init structs, memories and lists used by Transfer Controller */
6621 +int tc_init(struct usb_hcd *hcd) {
6623 + /* Clear software state info for all epids */
6624 + memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
6626 + /* Set Invalid and Dummy as being in use and disabled */
6627 + epid_state[INVALID_EPID].inuse = 1;
6628 + epid_state[DUMMY_EPID].inuse = 1;
6629 + epid_state[INVALID_EPID].disabled = 1;
6630 + epid_state[DUMMY_EPID].disabled = 1;
6632 + /* Clear counter for how many Isoc epids we have sat up */
6633 + isoc_epid_counter = 0;
6635 + /* Initialize the urb list by initiating a head for each list.
6636 + Also reset list hodling active URB for each epid */
6637 + for (i = 0; i < NBR_OF_EPIDS; i++) {
6638 + INIT_LIST_HEAD(&urb_list[i]);
6639 + activeUrbList[i] = NULL;
6642 + /* Init lock for URB lists */
6643 + spin_lock_init(&urb_list_lock);
6644 + /* Init lock for Etrax R_USB_EPT register */
6645 + spin_lock_init(&etrax_epid_lock);
6646 + /* Init lock for Etrax dma8 sub0 handling */
6647 + spin_lock_init(&etrax_dma8_sub0_lock);
6649 + /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
6651 + /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
6652 + allocate SB descriptors from this cache. This is ok since
6653 + sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
6654 + usb_desc_cache = kmem_cache_create("usb_desc_cache",
6655 + sizeof(struct USB_EP_Desc), 0,
6656 + SLAB_HWCACHE_ALIGN, 0, 0);
6657 + if(usb_desc_cache == NULL) {
6661 + /* Create slab cache for speedy allocation of memory for isoc bottom-half
6662 + interrupt handling */
6663 + isoc_compl_cache =
6664 + kmem_cache_create("isoc_compl_cache",
6665 + sizeof(struct crisv10_isoc_complete_data),
6666 + 0, SLAB_HWCACHE_ALIGN, 0, 0);
6667 + if(isoc_compl_cache == NULL) {
6671 + /* Create slab cache for speedy allocation of memory for later URB finish
6673 + later_data_cache =
6674 + kmem_cache_create("later_data_cache",
6675 + sizeof(struct urb_later_data),
6676 + 0, SLAB_HWCACHE_ALIGN, 0, 0);
6677 + if(later_data_cache == NULL) {
6682 + /* Initiate the bulk start timer. */
6683 + init_timer(&bulk_start_timer);
6684 + bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
6685 + bulk_start_timer.function = tc_bulk_start_timer_func;
6686 + add_timer(&bulk_start_timer);
6689 + /* Initiate the bulk eot timer. */
6690 + init_timer(&bulk_eot_timer);
6691 + bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
6692 + bulk_eot_timer.function = tc_bulk_eot_timer_func;
6693 + bulk_eot_timer.data = (unsigned long)hcd;
6694 + add_timer(&bulk_eot_timer);
6699 +/* Uninitialize all resources used by Transfer Controller */
6700 +void tc_destroy(void) {
6702 + /* Destroy all slab cache */
6703 + kmem_cache_destroy(usb_desc_cache);
6704 + kmem_cache_destroy(isoc_compl_cache);
6705 + kmem_cache_destroy(later_data_cache);
6707 + /* Remove timers */
6708 + del_timer(&bulk_start_timer);
6709 + del_timer(&bulk_eot_timer);
6712 +static void restart_dma8_sub0(void) {
6713 + unsigned long flags;
6714 + spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
6715 + /* Verify that the dma is not running */
6716 + if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
6717 + struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
6718 + while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
6719 + ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
6721 + /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID.
6722 + * ep->next is already a physical address; no need for a virt_to_phys. */
6723 + *R_DMA_CH8_SUB0_EP = ep->next;
6724 + /* Restart the DMA */
6725 + *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
6727 + spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
6730 +/* queue an URB with the transfer controller (called from hcd_driver) */
6731 +static int tc_urb_enqueue(struct usb_hcd *hcd,
6732 + struct usb_host_endpoint *ep,
6734 + gfp_t mem_flags) {
6739 + unsigned long flags;
6740 + struct crisv10_urb_priv *urb_priv;
6741 + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6744 + if(!(crisv10_hcd->running)) {
6745 + /* The USB Controller is not running, probably because no device is
6746 + attached. No idea to enqueue URBs then */
6747 + tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
6748 + (unsigned int)urb);
6752 + maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
6753 + /* Special case check for In Isoc transfers. Specification states that each
6754 + In Isoc transfer consists of one packet and therefore it should fit into
6755 + the transfer-buffer of an URB.
6756 + We do the check here to be sure (an invalid scenario can be produced with
6757 + parameters to the usbtest suite) */
6758 + if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
6759 + (urb->transfer_buffer_length < maxpacket)) {
6760 + tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
6764 + /* Check if there is enough bandwidth for periodic transfer */
6765 + if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
6766 + /* only check (and later claim) if not already claimed */
6767 + if (urb->bandwidth == 0) {
6768 + bustime = usb_check_bandwidth(urb->dev, urb);
6769 + if (bustime < 0) {
6770 + tc_err("Not enough periodic bandwidth\n");
6776 + /* Check if there is a epid for URBs destination, if not this function
6778 + epid = tc_setup_epid(ep, urb, mem_flags);
6780 + tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
6785 + if(urb == activeUrbList[epid]) {
6786 + tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
6790 + if(urb_list_entry(urb, epid)) {
6791 + tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
6795 + /* If we actively have flaged endpoint as disabled then refuse submition */
6796 + if(epid_state[epid].disabled) {
6800 + /* Allocate and init HC-private data for URB */
6801 + if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
6805 + urb_priv = urb->hcpriv;
6807 + tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
6808 + (unsigned int)urb, urb_priv->urb_num, epid,
6809 + pipe_to_str(urb->pipe), urb->transfer_buffer_length);
6811 + /* Create and link SBs required for this URB */
6812 + retval = create_sb_for_urb(urb, mem_flags);
6814 + tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
6815 + urb_priv->urb_num);
6816 + urb_priv_free(hcd, urb);
6821 + /* Init intr EP pool if this URB is a INTR transfer. This pool is later
6822 + used when inserting EPs in the TxIntrEPList. We do the alloc here
6823 + so we can't run out of memory later */
6824 + if(usb_pipeint(urb->pipe)) {
6825 + retval = init_intr_urb(urb, mem_flags);
6827 + tc_warn("Failed to init Intr URB\n");
6828 + urb_priv_free(hcd, urb);
6834 + /* Disable other access when inserting USB */
6835 + local_irq_save(flags);
6837 + /* Claim bandwidth, if needed */
6839 + usb_claim_bandwidth(urb->dev, urb, bustime, 0);
6842 + /* Add URB to EP queue */
6843 + urb_list_add(urb, epid, mem_flags);
6845 + if(usb_pipeisoc(urb->pipe)) {
6846 + /* Special processing of Isoc URBs. */
6847 + tc_dma_process_isoc_urb(urb);
6849 + /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
6850 + tc_dma_process_queue(epid);
6853 + local_irq_restore(flags);
6859 +/* remove an URB from the transfer controller queues (called from hcd_driver)*/
6860 +static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) {
6861 + struct crisv10_urb_priv *urb_priv;
6862 + unsigned long flags;
6866 + /* Disable interrupts here since a descriptor interrupt for the isoc epid
6867 + will modify the sb list. This could possibly be done more granular, but
6868 + urb_dequeue should not be used frequently anyway.
6870 + local_irq_save(flags);
6872 + urb_priv = urb->hcpriv;
6875 + /* This happens if a device driver calls unlink on an urb that
6876 + was never submitted (lazy driver) or if the urb was completed
6877 + while dequeue was being called. */
6878 + tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
6879 + local_irq_restore(flags);
6882 + epid = urb_priv->epid;
6884 + tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
6885 + (urb == activeUrbList[epid]) ? "active" : "queued",
6886 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
6887 + str_type(urb->pipe), epid, urb->status,
6888 + (urb_priv->later_data) ? "later-sched" : "");
6890 + /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
6891 + that isn't active can be dequeued by just removing it from the queue */
6892 + if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
6893 + usb_pipeint(urb->pipe)) {
6895 + /* Check if URB haven't gone further than the queue */
6896 + if(urb != activeUrbList[epid]) {
6897 + ASSERT(urb_priv->later_data == NULL);
6898 + tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
6899 + " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
6900 + str_dir(urb->pipe), str_type(urb->pipe), epid);
6902 + /* Finish the URB with error status from USB core */
6903 + tc_finish_urb(hcd, urb, urb->status);
6904 + local_irq_restore(flags);
6909 + /* Set URB status to Unlink for handling when interrupt comes. */
6910 + urb_priv->urb_state = UNLINK;
6912 + /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
6913 + switch(usb_pipetype(urb->pipe)) {
6915 + /* Check if EP still is enabled */
6916 + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6917 + /* The EP was enabled, disable it. */
6918 + TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6920 + /* Kicking dummy list out of the party. */
6921 + TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
6923 + case PIPE_CONTROL:
6924 + /* Check if EP still is enabled */
6925 + if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6926 + /* The EP was enabled, disable it. */
6927 + TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6930 + case PIPE_ISOCHRONOUS:
6931 + /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
6932 + finish_isoc_urb(). Because there might the case when URB is dequeued
6933 + but there are other valid URBs waiting */
6935 + /* Check if In Isoc EP still is enabled */
6936 + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6937 + /* The EP was enabled, disable it. */
6938 + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6941 + case PIPE_INTERRUPT:
6942 + /* Special care is taken for interrupt URBs. EPs are unlinked in
6949 + /* Asynchronous unlink, finish the URB later from scheduled or other
6950 + event (data finished, error) */
6951 + tc_finish_urb_later(hcd, urb, urb->status);
6953 + local_irq_restore(flags);
6959 +static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
6960 + volatile int timeout = 10000;
6962 + struct crisv10_urb_priv* urb_priv;
6963 + unsigned long flags;
6965 + volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
6966 + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
6967 + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
6969 + int type = epid_state[epid].type;
6971 + /* Setting this flag will cause enqueue() to return -ENOENT for new
6972 + submitions on this endpoint and finish_urb() wont process queue further */
6973 + epid_state[epid].disabled = 1;
6977 + /* Check if EP still is enabled */
6978 + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6979 + /* The EP was enabled, disable it. */
6980 + TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6981 + tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
6983 + /* Do busy-wait until DMA not using this EP descriptor anymore */
6984 + while((*R_DMA_CH8_SUB0_EP ==
6985 + virt_to_phys(&TxBulkEPList[epid])) &&
6987 + if(timeout == 0) {
6988 + warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
6989 + " epid:%d\n", epid);
6994 + case PIPE_CONTROL:
6995 + /* Check if EP still is enabled */
6996 + if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
6997 + /* The EP was enabled, disable it. */
6998 + TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
6999 + tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
7001 + /* Do busy-wait until DMA not using this EP descriptor anymore */
7002 + while((*R_DMA_CH8_SUB1_EP ==
7003 + virt_to_phys(&TxCtrlEPList[epid])) &&
7005 + if(timeout == 0) {
7006 + warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
7007 + " epid:%d\n", epid);
7012 + case PIPE_INTERRUPT:
7013 + local_irq_save(flags);
7014 + /* Disable all Intr EPs belonging to epid */
7015 + first_ep = &TxIntrEPList[0];
7016 + curr_ep = first_ep;
7018 + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
7019 + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
7021 + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
7023 + curr_ep = phys_to_virt(curr_ep->next);
7024 + } while (curr_ep != first_ep);
7026 + local_irq_restore(flags);
7029 + case PIPE_ISOCHRONOUS:
7030 + /* Check if EP still is enabled */
7031 + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7032 + tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
7033 + /* The EP was enabled, disable it. */
7034 + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7036 + while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
7038 + if(timeout == 0) {
7039 + warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
7040 + " epid:%d\n", epid);
7046 + local_irq_save(flags);
7048 + /* Finish if there is active URB for this endpoint */
7049 + if(activeUrbList[epid] != NULL) {
7050 + urb = activeUrbList[epid];
7051 + urb_priv = urb->hcpriv;
7053 + tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
7054 + (urb == activeUrbList[epid]) ? "active" : "queued",
7055 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7056 + str_type(urb->pipe), epid, urb->status,
7057 + (urb_priv->later_data) ? "later-sched" : "");
7059 + tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
7060 + ASSERT(activeUrbList[epid] == NULL);
7063 + /* Finish any queued URBs for this endpoint. There won't be any resubmitions
7064 + because epid_disabled causes enqueue() to fail for this endpoint */
7065 + while((urb = urb_list_first(epid)) != NULL) {
7066 + urb_priv = urb->hcpriv;
7069 + tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
7070 + (urb == activeUrbList[epid]) ? "active" : "queued",
7071 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7072 + str_type(urb->pipe), epid, urb->status,
7073 + (urb_priv->later_data) ? "later-sched" : "");
7075 + tc_finish_urb(hcd, urb, -ENOENT);
7077 + epid_state[epid].disabled = 0;
7078 + local_irq_restore(flags);
7081 +/* free resources associated with an endpoint (called from hcd_driver) */
7082 +static void tc_endpoint_disable(struct usb_hcd *hcd,
7083 + struct usb_host_endpoint *ep) {
7085 + /* Only free epid if it has been allocated. We get two endpoint_disable
7086 + requests for ctrl endpoints so ignore the second one */
7087 + if(ep->hcpriv != NULL) {
7088 + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7089 + int epid = ep_priv->epid;
7090 + tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
7091 + (unsigned int)ep, (unsigned int)ep->hcpriv,
7092 + endpoint_to_str(&(ep->desc)), epid);
7094 + tc_sync_finish_epid(hcd, epid);
7096 + ASSERT(activeUrbList[epid] == NULL);
7097 + ASSERT(list_empty(&urb_list[epid]));
7101 + tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
7102 + (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
7107 +static void tc_finish_urb_later_proc(void *data) {
7108 + unsigned long flags;
7109 + struct urb_later_data* uld = (struct urb_later_data*)data;
7110 + local_irq_save(flags);
7111 + if(uld->urb == NULL) {
7112 + late_dbg("Later finish of URB = NULL (allready finished)\n");
7114 + struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
7116 + if(urb_priv->urb_num == uld->urb_num) {
7117 + late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
7118 + urb_priv->urb_num);
7119 + if(uld->status != uld->urb->status) {
7120 + errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
7121 + uld->urb->status, uld->status);
7123 + if(uld != urb_priv->later_data) {
7124 + panic("Scheduled uld not same as URBs uld\n");
7126 + tc_finish_urb(uld->hcd, uld->urb, uld->status);
7128 + late_warn("Ignoring later finish of URB:0x%x[%d]"
7129 + ", urb_num doesn't match current URB:0x%x[%d]",
7130 + (unsigned int)(uld->urb), uld->urb_num,
7131 + (unsigned int)(uld->urb), urb_priv->urb_num);
7134 + local_irq_restore(flags);
7135 + kmem_cache_free(later_data_cache, uld);
7138 +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
7140 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7141 + struct urb_later_data* uld;
7145 + if(urb_priv->later_data != NULL) {
7146 + /* Later-finish allready scheduled for this URB, just update status to
7147 + return when finishing later */
7148 + errno_dbg("Later-finish schedule change URB status:%d with new"
7149 + " status:%d\n", urb_priv->later_data->status, status);
7151 + urb_priv->later_data->status = status;
7155 + uld = kmem_cache_alloc(later_data_cache, SLAB_ATOMIC);
7160 + uld->urb_num = urb_priv->urb_num;
7161 + uld->status = status;
7163 + INIT_WORK(&uld->ws, tc_finish_urb_later_proc, uld);
7164 + urb_priv->later_data = uld;
7166 + /* Schedule the finishing of the URB to happen later */
7167 + schedule_delayed_work(&uld->ws, LATER_TIMER_DELAY);
7170 +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
7173 +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
7174 + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
7175 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7181 + ASSERT(urb_priv != NULL);
7182 + epid = urb_priv->epid;
7183 + urb_num = urb_priv->urb_num;
7185 + if(urb != activeUrbList[epid]) {
7186 + if(urb_list_entry(urb, epid)) {
7187 + /* Remove this URB from the list. Only happens when URB are finished
7188 + before having been processed (dequeing) */
7189 + urb_list_del(urb, epid);
7191 + tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
7192 + " epid:%d\n", (unsigned int)urb, urb_num, epid);
7196 + /* Cancel any pending later-finish of this URB */
7197 + if(urb_priv->later_data) {
7198 + urb_priv->later_data->urb = NULL;
7201 + /* For an IN pipe, we always set the actual length, regardless of whether
7202 + there was an error or not (which means the device driver can use the data
7203 + if it wants to). */
7204 + if(usb_pipein(urb->pipe)) {
7205 + urb->actual_length = urb_priv->rx_offset;
7207 + /* Set actual_length for OUT urbs also; the USB mass storage driver seems
7209 + if (status == 0 && urb->status == -EINPROGRESS) {
7210 + urb->actual_length = urb->transfer_buffer_length;
7212 + /* We wouldn't know of any partial writes if there was an error. */
7213 + urb->actual_length = 0;
7218 + /* URB status mangling */
7219 + if(urb->status == -EINPROGRESS) {
7220 + /* The USB core hasn't changed the status, let's set our finish status */
7221 + urb->status = status;
7223 + if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
7224 + usb_pipein(urb->pipe) &&
7225 + (urb->actual_length != urb->transfer_buffer_length)) {
7226 + /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
7227 + max length) is to be treated as an error. */
7228 + errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
7229 + " data:%d\n", (unsigned int)urb, urb_num,
7230 + urb->actual_length);
7231 + urb->status = -EREMOTEIO;
7234 + if(urb_priv->urb_state == UNLINK) {
7235 + /* URB has been requested to be unlinked asynchronously */
7236 + urb->status = -ECONNRESET;
7237 + errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
7238 + (unsigned int)urb, urb_num, urb->status);
7241 + /* The USB Core wants to signal some error via the URB, pass it through */
7244 + /* use completely different finish function for Isoc URBs */
7245 + if(usb_pipeisoc(urb->pipe)) {
7246 + tc_finish_isoc_urb(hcd, urb, status);
7250 + /* Do special unlinking of EPs for Intr traffic */
7251 + if(usb_pipeint(urb->pipe)) {
7252 + tc_dma_unlink_intr_urb(urb);
7255 + /* Release allocated bandwidth for periodic transfers */
7256 + if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
7257 + usb_release_bandwidth(urb->dev, urb, 0);
7259 + /* This URB is active on EP */
7260 + if(urb == activeUrbList[epid]) {
7261 + /* We need to fiddle with the toggle bits because the hardware doesn't do
7263 + toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
7264 + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
7265 + usb_pipeout(urb->pipe), toggle);
7267 + /* Checks for Ctrl and Bulk EPs */
7268 + switch(usb_pipetype(urb->pipe)) {
7270 + /* Check so Bulk EP realy is disabled before finishing active URB */
7271 + ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
7272 + IO_STATE(USB_EP_command, enable, no));
7273 + /* Disable sub-pointer for EP to avoid next tx_interrupt() to
7274 + process Bulk EP. */
7275 + TxBulkEPList[epid].sub = 0;
7276 + /* No need to wait for the DMA before changing the next pointer.
7277 + The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
7278 + the last one (INVALID_EPID) for actual traffic. */
7279 + TxBulkEPList[epid].next =
7280 + virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
7282 + case PIPE_CONTROL:
7283 + /* Check so Ctrl EP realy is disabled before finishing active URB */
7284 + ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
7285 + IO_STATE(USB_EP_command, enable, no));
7286 + /* Disable sub-pointer for EP to avoid next tx_interrupt() to
7287 + process Ctrl EP. */
7288 + TxCtrlEPList[epid].sub = 0;
7293 + /* Free HC-private URB data*/
7294 + urb_priv_free(hcd, urb);
7297 + errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
7298 + (unsigned int)urb, urb_num, str_dir(urb->pipe),
7299 + str_type(urb->pipe), urb->actual_length, urb->status);
7301 + tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
7302 + (unsigned int)urb, urb_num, str_dir(urb->pipe),
7303 + str_type(urb->pipe), urb->actual_length, urb->status);
7306 + /* If we just finished an active URB, clear active pointer. */
7307 + if (urb == activeUrbList[epid]) {
7308 + /* Make URB not active on EP anymore */
7309 + activeUrbList[epid] = NULL;
7311 + if(urb->status == 0) {
7312 + /* URB finished sucessfully, process queue to see if there are any more
7313 + URBs waiting before we call completion function.*/
7314 + if(crisv10_hcd->running) {
7315 + /* Only process queue if USB controller is running */
7316 + tc_dma_process_queue(epid);
7318 + tc_warn("No processing of queue for epid:%d, USB Controller not"
7319 + " running\n", epid);
7324 + /* Hand the URB from HCD to its USB device driver, using its completion
7326 + usb_hcd_giveback_urb (hcd, urb);
7328 + /* Check the queue once more if the URB returned with error, because we
7329 + didn't do it before the completion function because the specification
7330 + states that the queue should not restart until all it's unlinked
7331 + URBs have been fully retired, with the completion functions run */
7332 + if(crisv10_hcd->running) {
7333 + /* Only process queue if USB controller is running */
7334 + tc_dma_process_queue(epid);
7336 + tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
7343 +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
7345 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7347 + volatile int timeout = 10000;
7350 + epid = urb_priv->epid;
7352 + ASSERT(usb_pipeisoc(urb->pipe));
7354 + /* Set that all isoc packets have status and length set before
7355 + completing the urb. */
7356 + for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
7357 + urb->iso_frame_desc[i].actual_length = 0;
7358 + urb->iso_frame_desc[i].status = -EPROTO;
7361 + /* Check if the URB is currently active (done or error) */
7362 + if(urb == activeUrbList[epid]) {
7363 + /* Check if there are another In Isoc URB queued for this epid */
7364 + if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
7365 + /* Move it from queue to active and mark it started so Isoc transfers
7366 + won't be interrupted.
7367 + All Isoc URBs data transfers are already added to DMA lists so we
7368 + don't have to insert anything in DMA lists here. */
7369 + activeUrbList[epid] = urb_list_first(epid);
7370 + ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
7372 + urb_list_del(activeUrbList[epid], epid);
7375 + errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
7376 + " status:%d, new waiting URB:0x%x[%d]\n",
7377 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7378 + str_type(urb->pipe), urb_priv->isoc_packet_counter,
7379 + urb->number_of_packets, urb->status,
7380 + (unsigned int)activeUrbList[epid],
7381 + ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
7384 + } else { /* No other URB queued for this epid */
7386 + errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
7387 + " status:%d, no new URB waiting\n",
7388 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7389 + str_type(urb->pipe), urb_priv->isoc_packet_counter,
7390 + urb->number_of_packets, urb->status);
7393 + /* Check if EP is still enabled, then shut it down. */
7394 + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7395 + isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
7397 + /* Should only occur for In Isoc EPs where SB isn't consumed. */
7398 + ASSERT(usb_pipein(urb->pipe));
7400 + /* Disable it and wait for it to stop */
7401 + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7403 + /* Ah, the luxury of busy-wait. */
7404 + while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
7406 + if(timeout == 0) {
7407 + warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid);
7411 + /* Unlink SB to say that epid is finished. */
7412 + TxIsocEPList[epid].sub = 0;
7413 + TxIsocEPList[epid].hw_len = 0;
7415 + /* No URB active for EP anymore */
7416 + activeUrbList[epid] = NULL;
7418 + } else { /* Finishing of not active URB (queued up with SBs thought) */
7419 + isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
7420 + " SB queued but not active\n",
7421 + (unsigned int)urb, str_dir(urb->pipe),
7422 + urb_priv->isoc_packet_counter, urb->number_of_packets,
7424 + if(usb_pipeout(urb->pipe)) {
7425 + /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
7426 + struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
7428 + iter_sb = TxIsocEPList[epid].sub ?
7429 + phys_to_virt(TxIsocEPList[epid].sub) : 0;
7432 + /* SB that is linked before this URBs first SB */
7433 + while (iter_sb && (iter_sb != urb_priv->first_sb)) {
7434 + prev_sb = iter_sb;
7435 + iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7438 + if (iter_sb == 0) {
7439 + /* Unlink of the URB currently being transmitted. */
7441 + iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
7444 + while (iter_sb && (iter_sb != urb_priv->last_sb)) {
7445 + iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7449 + next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7451 + /* This should only happen if the DMA has completed
7452 + processing the SB list for this EP while interrupts
7454 + isoc_dbg("Isoc urb not found, already sent?\n");
7458 + prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
7460 + TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
7465 + /* Free HC-private URB data*/
7466 + urb_priv_free(hcd, urb);
7468 + usb_release_bandwidth(urb->dev, urb, 0);
7470 + /* Hand the URB from HCD to its USB device driver, using its completion
7472 + usb_hcd_giveback_urb (hcd, urb);
7475 +static __u32 urb_num = 0;
7477 +/* allocate and initialize URB private data */
7478 +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
7480 + struct crisv10_urb_priv *urb_priv;
7482 + urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
7485 + memset(urb_priv, 0, sizeof *urb_priv);
7487 + urb_priv->epid = epid;
7488 + urb_priv->urb_state = NOT_STARTED;
7490 + urb->hcpriv = urb_priv;
7491 + /* Assign URB a sequence number, and increment counter */
7492 + urb_priv->urb_num = urb_num;
7497 +/* free URB private data */
7498 +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
7500 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7501 + ASSERT(urb_priv != 0);
7503 + /* Check it has any SBs linked that needs to be freed*/
7504 + if(urb_priv->first_sb != NULL) {
7505 + struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
7507 + first_sb = urb_priv->first_sb;
7508 + last_sb = urb_priv->last_sb;
7510 + while(first_sb != last_sb) {
7511 + next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
7512 + kmem_cache_free(usb_desc_cache, first_sb);
7513 + first_sb = next_sb;
7516 + kmem_cache_free(usb_desc_cache, last_sb);
7520 + /* Check if it has any EPs in its Intr pool that also needs to be freed */
7521 + if(urb_priv->intr_ep_pool_length > 0) {
7522 + for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
7523 + kfree(urb_priv->intr_ep_pool[i]);
7526 + tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
7527 + urb_priv->intr_ep_pool_length, (unsigned int)urb);
7532 + urb->hcpriv = NULL;
7535 +static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
7536 + struct crisv10_ep_priv *ep_priv;
7538 + ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
7541 + memset(ep_priv, 0, sizeof *ep_priv);
7543 + ep->hcpriv = ep_priv;
7547 +static void ep_priv_free(struct usb_host_endpoint *ep) {
7548 + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7551 + ep->hcpriv = NULL;
7554 +/* EPID handling functions, managing EP-list in Etrax through wrappers */
7555 +/* ------------------------------------------------------------------- */
7557 +/* Sets up a new EPID for an endpoint or returns existing if found */
7558 +static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
7561 + char devnum, endpoint, out_traffic, slow;
7564 + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7568 + /* Check if a valid epid already is setup for this endpoint */
7569 + if(ep_priv != NULL) {
7570 + return ep_priv->epid;
7573 + /* We must find and initiate a new epid for this urb. */
7574 + epid = tc_allocate_epid();
7577 + /* Failed to allocate a new epid. */
7582 + /* We now have a new epid to use. Claim it. */
7583 + epid_state[epid].inuse = 1;
7585 + /* Init private data for new endpoint */
7586 + if(ep_priv_create(ep, mem_flags) != 0) {
7589 + ep_priv = ep->hcpriv;
7590 + ep_priv->epid = epid;
7592 + devnum = usb_pipedevice(urb->pipe);
7593 + endpoint = usb_pipeendpoint(urb->pipe);
7594 + slow = (urb->dev->speed == USB_SPEED_LOW);
7595 + maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
7597 + if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
7598 + /* We want both IN and OUT control traffic to be put on the same
7602 + out_traffic = usb_pipeout(urb->pipe);
7605 + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
7606 + epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
7607 + /* FIXME: Change any to the actual port? */
7608 + IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
7609 + IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
7610 + IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
7611 + IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
7612 + etrax_epid_iso_set(epid, epid_data);
7614 + epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
7615 + IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
7616 + /* FIXME: Change any to the actual port? */
7617 + IO_STATE(R_USB_EPT_DATA, port, any) |
7618 + IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
7619 + IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
7620 + IO_FIELD(R_USB_EPT_DATA, dev, devnum);
7621 + etrax_epid_set(epid, epid_data);
7624 + epid_state[epid].out_traffic = out_traffic;
7625 + epid_state[epid].type = usb_pipetype(urb->pipe);
7627 + tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
7628 + (unsigned int)ep, epid, devnum, endpoint, maxlen,
7629 + str_type(urb->pipe), out_traffic ? "out" : "in",
7630 + slow ? "low" : "full");
7632 + /* Enable Isoc eof interrupt if we set up the first Isoc epid */
7633 + if(usb_pipeisoc(urb->pipe)) {
7634 + isoc_epid_counter++;
7635 + if(isoc_epid_counter == 1) {
7636 + isoc_warn("Enabled Isoc eof interrupt\n");
7637 + *R_USB_IRQ_MASK_SET |= IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
7645 +static void tc_free_epid(struct usb_host_endpoint *ep) {
7646 + unsigned long flags;
7647 + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7649 + volatile int timeout = 10000;
7653 + if (ep_priv == NULL) {
7654 + tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
7659 + epid = ep_priv->epid;
7661 + /* Disable Isoc eof interrupt if we free the last Isoc epid */
7662 + if(epid_isoc(epid)) {
7663 + ASSERT(isoc_epid_counter > 0);
7664 + isoc_epid_counter--;
7665 + if(isoc_epid_counter == 0) {
7666 + *R_USB_IRQ_MASK_SET &= ~IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
7667 + isoc_warn("Disabled Isoc eof interrupt\n");
7671 + /* Take lock manualy instead of in epid_x_x wrappers,
7672 + because we need to be polling here */
7673 + spin_lock_irqsave(&etrax_epid_lock, flags);
7675 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
7677 + while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
7679 + if(timeout == 0) {
7680 + warn("Timeout while waiting for epid:%d to drop hold\n", epid);
7682 + /* This will, among other things, set the valid field to 0. */
7683 + *R_USB_EPT_DATA = 0;
7684 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
7686 + /* Free resource in software state info list */
7687 + epid_state[epid].inuse = 0;
7689 + /* Free private endpoint data */
7695 +static int tc_allocate_epid(void) {
7698 + for (i = 0; i < NBR_OF_EPIDS; i++) {
7699 + if (!epid_inuse(i)) {
7705 + tc_warn("Found no free epids\n");
7711 -static void init_tx_intr_ep(void)
7714 +/* Wrappers around the list functions (include/linux/list.h). */
7715 +/* ---------------------------------------------------------- */
7716 +static inline int __urb_list_empty(int epid) {
7718 + retval = list_empty(&urb_list[epid]);
7723 +/* Returns first urb for this epid, or NULL if list is empty. */
7724 +static inline struct urb *urb_list_first(int epid) {
7725 + unsigned long flags;
7726 + struct urb *first_urb = 0;
7727 + spin_lock_irqsave(&urb_list_lock, flags);
7728 + if (!__urb_list_empty(epid)) {
7729 + /* Get the first urb (i.e. head->next). */
7730 + urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
7731 + first_urb = urb_entry->urb;
7733 + spin_unlock_irqrestore(&urb_list_lock, flags);
7737 - /* Read comment at zout_buffer declaration for an explanation to this. */
7738 - TxIntrSB_zout.sw_len = 1;
7739 - TxIntrSB_zout.next = 0;
7740 - TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
7741 - TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
7742 - IO_STATE(USB_SB_command, tt, zout) |
7743 - IO_STATE(USB_SB_command, full, yes) |
7744 - IO_STATE(USB_SB_command, eot, yes) |
7745 - IO_STATE(USB_SB_command, eol, yes));
7747 - for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
7748 - CHECK_ALIGN(&TxIntrEPList[i]);
7749 - TxIntrEPList[i].hw_len = 0;
7750 - TxIntrEPList[i].command =
7751 - (IO_STATE(USB_EP_command, eof, yes) |
7752 - IO_STATE(USB_EP_command, enable, yes) |
7753 - IO_FIELD(USB_EP_command, epid, INVALID_EPID));
7754 - TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
7755 - TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
7757 +/* Adds an urb_entry last in the list for this epid. */
7758 +static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
7759 + unsigned long flags;
7760 + urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
7761 + ASSERT(urb_entry);
7763 + urb_entry->urb = urb;
7764 + spin_lock_irqsave(&urb_list_lock, flags);
7765 + list_add_tail(&urb_entry->list, &urb_list[epid]);
7766 + spin_unlock_irqrestore(&urb_list_lock, flags);
7769 - CHECK_ALIGN(&TxIntrEPList[i]);
7770 - TxIntrEPList[i].hw_len = 0;
7771 - TxIntrEPList[i].command =
7772 - (IO_STATE(USB_EP_command, eof, yes) |
7773 - IO_STATE(USB_EP_command, eol, yes) |
7774 - IO_STATE(USB_EP_command, enable, yes) |
7775 - IO_FIELD(USB_EP_command, epid, INVALID_EPID));
7776 - TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
7777 - TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
7779 - *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
7780 - *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
7782 +/* Search through the list for an element that contains this urb. (The list
7783 + is expected to be short and the one we are about to delete will often be
7784 + the first in the list.)
7785 + Should be protected by spin_locks in calling function */
7786 +static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
7787 + struct list_head *entry;
7788 + struct list_head *tmp;
7789 + urb_entry_t *urb_entry;
7791 + list_for_each_safe(entry, tmp, &urb_list[epid]) {
7792 + urb_entry = list_entry(entry, urb_entry_t, list);
7793 + ASSERT(urb_entry);
7794 + ASSERT(urb_entry->urb);
7796 + if (urb_entry->urb == urb) {
7803 +/* Same function as above but for global use. Protects list by spinlock */
7804 +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
7805 + unsigned long flags;
7806 + urb_entry_t *urb_entry;
7807 + spin_lock_irqsave(&urb_list_lock, flags);
7808 + urb_entry = __urb_list_entry(urb, epid);
7809 + spin_unlock_irqrestore(&urb_list_lock, flags);
7810 + return (urb_entry);
7813 -static void init_tx_isoc_ep(void)
7816 +/* Delete an urb from the list. */
7817 +static inline void urb_list_del(struct urb *urb, int epid) {
7818 + unsigned long flags;
7819 + urb_entry_t *urb_entry;
7821 + /* Delete entry and free. */
7822 + spin_lock_irqsave(&urb_list_lock, flags);
7823 + urb_entry = __urb_list_entry(urb, epid);
7824 + ASSERT(urb_entry);
7826 + list_del(&urb_entry->list);
7827 + spin_unlock_irqrestore(&urb_list_lock, flags);
7832 +/* Move an urb to the end of the list. */
7833 +static inline void urb_list_move_last(struct urb *urb, int epid) {
7834 + unsigned long flags;
7835 + urb_entry_t *urb_entry;
7837 + spin_lock_irqsave(&urb_list_lock, flags);
7838 + urb_entry = __urb_list_entry(urb, epid);
7839 + ASSERT(urb_entry);
7841 + list_del(&urb_entry->list);
7842 + list_add_tail(&urb_entry->list, &urb_list[epid]);
7843 + spin_unlock_irqrestore(&urb_list_lock, flags);
7846 - /* Read comment at zout_buffer declaration for an explanation to this. */
7847 - TxIsocSB_zout.sw_len = 1;
7848 - TxIsocSB_zout.next = 0;
7849 - TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
7850 - TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
7851 - IO_STATE(USB_SB_command, tt, zout) |
7852 - IO_STATE(USB_SB_command, full, yes) |
7853 - IO_STATE(USB_SB_command, eot, yes) |
7854 - IO_STATE(USB_SB_command, eol, yes));
7856 - /* The last isochronous EP descriptor is a dummy. */
7858 - for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
7859 - CHECK_ALIGN(&TxIsocEPList[i]);
7860 - TxIsocEPList[i].hw_len = 0;
7861 - TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
7862 - TxIsocEPList[i].sub = 0;
7863 - TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
7864 +/* Get the next urb in the list. */
7865 +static inline struct urb *urb_list_next(struct urb *urb, int epid) {
7866 + unsigned long flags;
7867 + urb_entry_t *urb_entry;
7869 + spin_lock_irqsave(&urb_list_lock, flags);
7870 + urb_entry = __urb_list_entry(urb, epid);
7871 + ASSERT(urb_entry);
7873 + if (urb_entry->list.next != &urb_list[epid]) {
7874 + struct list_head *elem = urb_entry->list.next;
7875 + urb_entry = list_entry(elem, urb_entry_t, list);
7876 + spin_unlock_irqrestore(&urb_list_lock, flags);
7877 + return urb_entry->urb;
7879 + spin_unlock_irqrestore(&urb_list_lock, flags);
7884 +struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
7886 + struct USB_EP_Desc *ep_desc;
7887 + ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
7888 + if(ep_desc == NULL)
7890 + memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
7892 + ep_desc->hw_len = 0;
7893 + ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
7894 + IO_STATE(USB_EP_command, enable, yes));
7895 + if(sb_desc == NULL) {
7898 + ep_desc->sub = virt_to_phys(sb_desc);
7908 +#define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
7909 +#define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
7910 +#define CMD_FULL IO_STATE(USB_SB_command, full, yes)
7912 +/* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
7913 + SBs. Also used by create_sb_in() to avoid same allocation procedure at two
7915 +struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
7916 + int datalen, int mem_flags) {
7917 + struct USB_SB_Desc *sb_desc;
7918 + sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
7919 + if(sb_desc == NULL)
7921 + memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
7923 + sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
7924 + IO_STATE(USB_SB_command, eot, yes);
7926 + sb_desc->sw_len = datalen;
7927 + if(data != NULL) {
7928 + sb_desc->buf = virt_to_phys(data);
7932 + if(sb_prev != NULL) {
7933 + sb_prev->next = virt_to_phys(sb_desc);
7938 +/* Creates a copy of an existing SB by allocation space for it and copy
7940 +struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
7941 + struct USB_SB_Desc *sb_desc;
7942 + sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
7943 + if(sb_desc == NULL)
7946 + memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
7950 +/* A specific create_sb function for creation of in SBs. This is due to
7951 + that datalen in In SBs shows how many packets we are expecting. It also
7952 + sets up the rem field to show if how many bytes we expect in last packet
7953 + if it's not a full one */
7954 +struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
7955 + int maxlen, int mem_flags) {
7956 + struct USB_SB_Desc *sb_desc;
7957 + sb_desc = create_sb(sb_prev, TT_IN, NULL,
7958 + datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
7959 + if(sb_desc == NULL)
7961 + sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
7965 +void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
7966 + sb_desc->command |= flags;
7969 +int create_sb_for_urb(struct urb *urb, int mem_flags) {
7970 + int is_out = !usb_pipein(urb->pipe);
7971 + int type = usb_pipetype(urb->pipe);
7972 + int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
7973 + int buf_len = urb->transfer_buffer_length;
7974 + void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
7975 + struct USB_SB_Desc *sb_desc = NULL;
7977 + struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
7978 + ASSERT(urb_priv != NULL);
7981 + case PIPE_CONTROL:
7983 + sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
7984 + if(sb_desc == NULL)
7986 + set_sb_cmds(sb_desc, CMD_FULL);
7988 + /* Attach first SB to URB */
7989 + urb_priv->first_sb = sb_desc;
7991 + if (is_out) { /* Out Control URB */
7992 + /* If this Control OUT transfer has an optional data stage we add
7993 + an OUT token before the mandatory IN (status) token */
7994 + if ((buf_len > 0) && buf) {
7995 + sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
7996 + if(sb_desc == NULL)
7998 + set_sb_cmds(sb_desc, CMD_FULL);
8001 + /* Status stage */
8002 + /* The data length has to be exactly 1. This is due to a requirement
8003 + of the USB specification that a host must be prepared to receive
8004 + data in the status phase */
8005 + sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
8006 + if(sb_desc == NULL)
8008 + } else { /* In control URB */
8010 + sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
8011 + if(sb_desc == NULL)
8014 + /* Status stage */
8015 + /* Read comment at zout_buffer declaration for an explanation to this. */
8016 + sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
8017 + if(sb_desc == NULL)
8019 + /* Set descriptor interrupt flag for in URBs so we can finish URB after
8020 + zout-packet has been sent */
8021 + set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
8023 + /* Set end-of-list flag in last SB */
8024 + set_sb_cmds(sb_desc, CMD_EOL);
8025 + /* Attach last SB to URB */
8026 + urb_priv->last_sb = sb_desc;
8030 + if (is_out) { /* Out Bulk URB */
8031 + sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
8032 + if(sb_desc == NULL)
8034 + /* The full field is set to yes, even if we don't actually check that
8035 + this is a full-length transfer (i.e., that transfer_buffer_length %
8037 + Setting full prevents the USB controller from sending an empty packet
8038 + in that case. However, if URB_ZERO_PACKET was set we want that. */
8039 + if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
8040 + set_sb_cmds(sb_desc, CMD_FULL);
8042 + } else { /* In Bulk URB */
8043 + sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
8044 + if(sb_desc == NULL)
8047 + /* Set end-of-list flag for last SB */
8048 + set_sb_cmds(sb_desc, CMD_EOL);
8050 + /* Attach SB to URB */
8051 + urb_priv->first_sb = sb_desc;
8052 + urb_priv->last_sb = sb_desc;
8055 + case PIPE_INTERRUPT:
8056 + if(is_out) { /* Out Intr URB */
8057 + sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
8058 + if(sb_desc == NULL)
8061 + /* The full field is set to yes, even if we don't actually check that
8062 + this is a full-length transfer (i.e., that transfer_buffer_length %
8064 + Setting full prevents the USB controller from sending an empty packet
8065 + in that case. However, if URB_ZERO_PACKET was set we want that. */
8066 + if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
8067 + set_sb_cmds(sb_desc, CMD_FULL);
8069 + /* Only generate TX interrupt if it's a Out URB*/
8070 + set_sb_cmds(sb_desc, CMD_INTR);
8072 + } else { /* In Intr URB */
8073 + sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
8074 + if(sb_desc == NULL)
8077 + /* Set end-of-list flag for last SB */
8078 + set_sb_cmds(sb_desc, CMD_EOL);
8080 + /* Attach SB to URB */
8081 + urb_priv->first_sb = sb_desc;
8082 + urb_priv->last_sb = sb_desc;
8085 + case PIPE_ISOCHRONOUS:
8086 + if(is_out) { /* Out Isoc URB */
8088 + if(urb->number_of_packets == 0) {
8089 + tc_err("Can't create SBs for Isoc URB with zero packets\n");
8092 + /* Create one SB descriptor for each packet and link them together. */
8093 + for(i = 0; i < urb->number_of_packets; i++) {
8094 + if (urb->iso_frame_desc[i].length > 0) {
8096 + sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
8097 + urb->iso_frame_desc[i].offset,
8098 + urb->iso_frame_desc[i].length, mem_flags);
8099 + if(sb_desc == NULL)
8102 + /* Check if it's a full length packet */
8103 + if (urb->iso_frame_desc[i].length ==
8104 + usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
8105 + set_sb_cmds(sb_desc, CMD_FULL);
8108 + } else { /* zero length packet */
8109 + sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
8110 + if(sb_desc == NULL)
8112 + set_sb_cmds(sb_desc, CMD_FULL);
8114 + /* Attach first SB descriptor to URB */
8116 + urb_priv->first_sb = sb_desc;
8119 + /* Set interrupt and end-of-list flags in last SB */
8120 + set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
8121 + /* Attach last SB descriptor to URB */
8122 + urb_priv->last_sb = sb_desc;
8123 + tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
8124 + urb->number_of_packets, (unsigned int)urb);
8125 + } else { /* In Isoc URB */
8126 + /* Actual number of packets is not relevant for periodic in traffic as
8127 + long as it is more than zero. Set to 1 always. */
8128 + sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
8129 + if(sb_desc == NULL)
8131 + /* Set end-of-list flags for SB */
8132 + set_sb_cmds(sb_desc, CMD_EOL);
8134 + /* Attach SB to URB */
8135 + urb_priv->first_sb = sb_desc;
8136 + urb_priv->last_sb = sb_desc;
8140 + tc_err("Unknown pipe-type\n");
8147 +int init_intr_urb(struct urb *urb, int mem_flags) {
8148 + struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
8149 + struct USB_EP_Desc* ep_desc;
8154 + ASSERT(urb_priv != NULL);
8155 + ASSERT(usb_pipeint(urb->pipe));
8156 + /* We can't support interval longer than amount of eof descriptors in
8158 + if(urb->interval > MAX_INTR_INTERVAL) {
8159 + tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
8160 + MAX_INTR_INTERVAL);
8164 + /* We assume that the SB descriptors already have been setup */
8165 + ASSERT(urb_priv->first_sb != NULL);
8167 + /* Round of the interval to 2^n, it is obvious that this code favours
8168 + smaller numbers, but that is actually a good thing */
8169 + /* FIXME: The "rounding error" for larger intervals will be quite
8170 + large. For in traffic this shouldn't be a problem since it will only
8171 + mean that we "poll" more often. */
8172 + interval = urb->interval;
8173 + for (i = 0; interval; i++) {
8174 + interval = interval >> 1;
8176 + urb_priv->interval = 1 << (i - 1);
8178 + /* We can only have max interval for Out Interrupt due to that we can only
8179 + handle one linked in EP for a certain epid in the Intr descr array at the
8180 + time. The USB Controller in the Etrax 100LX continues to process Intr EPs
8181 + so we have no way of knowing which one that caused the actual transfer if
8182 + we have several linked in. */
8183 + if(usb_pipeout(urb->pipe)) {
8184 + urb_priv->interval = MAX_INTR_INTERVAL;
8187 + /* Calculate amount of EPs needed */
8188 + ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
8190 + for(i = 0; i < ep_count; i++) {
8191 + ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
8192 + if(ep_desc == NULL) {
8193 + /* Free any descriptors that we may have allocated before failure */
8196 + kfree(urb_priv->intr_ep_pool[i]);
8200 + urb_priv->intr_ep_pool[i] = ep_desc;
8202 + urb_priv->intr_ep_pool_length = ep_count;
8206 +/* DMA RX/TX functions */
8207 +/* ----------------------- */
8209 +static void tc_dma_init_rx_list(void) {
8212 + /* Setup descriptor list except last one */
8213 + for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
8214 + RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
8215 + RxDescList[i].command = 0;
8216 + RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
8217 + RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
8218 + RxDescList[i].hw_len = 0;
8219 + RxDescList[i].status = 0;
8221 + /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
8222 + USB_IN_Desc for the relevant fields.) */
8223 + prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
8226 + /* Special handling of last descriptor */
8227 + RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
8228 + RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
8229 + RxDescList[i].next = virt_to_phys(&RxDescList[0]);
8230 + RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
8231 + RxDescList[i].hw_len = 0;
8232 + RxDescList[i].status = 0;
8234 + /* Setup list pointers that show progress in list */
8235 + myNextRxDesc = &RxDescList[0];
8236 + myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
8238 + flush_etrax_cache();
8239 + /* Point DMA to first descriptor in list and start it */
8240 + *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
8241 + *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
8245 +static void tc_dma_init_tx_bulk_list(void) {
8247 + volatile struct USB_EP_Desc *epDescr;
8249 + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8250 + epDescr = &(TxBulkEPList[i]);
8251 + CHECK_ALIGN(epDescr);
8252 + epDescr->hw_len = 0;
8253 + epDescr->command = IO_FIELD(USB_EP_command, epid, i);
8255 + epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
8257 + /* Initiate two EPs, disabled and with the eol flag set. No need for any
8258 + preserved epid. */
8260 + /* The first one has the intr flag set so we get an interrupt when the DMA
8261 + channel is about to become disabled. */
8262 + CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
8263 + TxBulkDummyEPList[i][0].hw_len = 0;
8264 + TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
8265 + IO_STATE(USB_EP_command, eol, yes) |
8266 + IO_STATE(USB_EP_command, intr, yes));
8267 + TxBulkDummyEPList[i][0].sub = 0;
8268 + TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
8270 + /* The second one. */
8271 + CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
8272 + TxBulkDummyEPList[i][1].hw_len = 0;
8273 + TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
8274 + IO_STATE(USB_EP_command, eol, yes));
8275 + TxBulkDummyEPList[i][1].sub = 0;
8276 + /* The last dummy's next pointer is the same as the current EP's next pointer. */
8277 + TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
8280 + /* Special handling of last descr in list, make list circular */
8281 + epDescr = &TxBulkEPList[i];
8282 + CHECK_ALIGN(epDescr);
8283 + epDescr->hw_len = 0;
8284 + epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
8285 + IO_FIELD(USB_EP_command, epid, i);
8287 + epDescr->next = virt_to_phys(&TxBulkEPList[0]);
8289 + /* Init DMA sub-channel pointers to last item in each list */
8290 + *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
8291 + /* No point in starting the bulk channel yet.
8292 + *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
8295 +static void tc_dma_init_tx_ctrl_list(void) {
8297 + volatile struct USB_EP_Desc *epDescr;
8299 + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8300 + epDescr = &(TxCtrlEPList[i]);
8301 + CHECK_ALIGN(epDescr);
8302 + epDescr->hw_len = 0;
8303 + epDescr->command = IO_FIELD(USB_EP_command, epid, i);
8305 + epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
8307 + /* Special handling of last descr in list, make list circular */
8308 + epDescr = &TxCtrlEPList[i];
8309 + CHECK_ALIGN(epDescr);
8310 + epDescr->hw_len = 0;
8311 + epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
8312 + IO_FIELD(USB_EP_command, epid, i);
8314 + epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
8316 + /* Init DMA sub-channel pointers to last item in each list */
8317 + *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
8318 + /* No point in starting the ctrl channel yet.
8319 + *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
8323 +static void tc_dma_init_tx_intr_list(void) {
8326 + TxIntrSB_zout.sw_len = 1;
8327 + TxIntrSB_zout.next = 0;
8328 + TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
8329 + TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
8330 + IO_STATE(USB_SB_command, tt, zout) |
8331 + IO_STATE(USB_SB_command, full, yes) |
8332 + IO_STATE(USB_SB_command, eot, yes) |
8333 + IO_STATE(USB_SB_command, eol, yes));
8335 + for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
8336 + CHECK_ALIGN(&TxIntrEPList[i]);
8337 + TxIntrEPList[i].hw_len = 0;
8338 + TxIntrEPList[i].command =
8339 + (IO_STATE(USB_EP_command, eof, yes) |
8340 + IO_STATE(USB_EP_command, enable, yes) |
8341 + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8342 + TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
8343 + TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
8346 + /* Special handling of last descr in list, make list circular */
8347 + CHECK_ALIGN(&TxIntrEPList[i]);
8348 + TxIntrEPList[i].hw_len = 0;
8349 + TxIntrEPList[i].command =
8350 + (IO_STATE(USB_EP_command, eof, yes) |
8351 + IO_STATE(USB_EP_command, eol, yes) |
8352 + IO_STATE(USB_EP_command, enable, yes) |
8353 + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8354 + TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
8355 + TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
8357 + intr_dbg("Initiated Intr EP descriptor list\n");
8360 + /* Connect DMA 8 sub-channel 2 to first in list */
8361 + *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
8364 +static void tc_dma_init_tx_isoc_list(void) {
8369 + /* Read comment at zout_buffer declaration for an explanation to this. */
8370 + TxIsocSB_zout.sw_len = 1;
8371 + TxIsocSB_zout.next = 0;
8372 + TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
8373 + TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
8374 + IO_STATE(USB_SB_command, tt, zout) |
8375 + IO_STATE(USB_SB_command, full, yes) |
8376 + IO_STATE(USB_SB_command, eot, yes) |
8377 + IO_STATE(USB_SB_command, eol, yes));
8379 + /* The last isochronous EP descriptor is a dummy. */
8380 + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8381 + CHECK_ALIGN(&TxIsocEPList[i]);
8382 + TxIsocEPList[i].hw_len = 0;
8383 + TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
8384 + TxIsocEPList[i].sub = 0;
8385 + TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
8388 + CHECK_ALIGN(&TxIsocEPList[i]);
8389 + TxIsocEPList[i].hw_len = 0;
8391 + /* Must enable the last EP descr to get eof interrupt. */
8392 + TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
8393 + IO_STATE(USB_EP_command, eof, yes) |
8394 + IO_STATE(USB_EP_command, eol, yes) |
8395 + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8396 + TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
8397 + TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
8399 + *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
8400 + *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
8403 +static int tc_dma_init(struct usb_hcd *hcd) {
8404 + tc_dma_init_rx_list();
8405 + tc_dma_init_tx_bulk_list();
8406 + tc_dma_init_tx_ctrl_list();
8407 + tc_dma_init_tx_intr_list();
8408 + tc_dma_init_tx_isoc_list();
8410 + if (cris_request_dma(USB_TX_DMA_NBR,
8411 + "ETRAX 100LX built-in USB (Tx)",
8412 + DMA_VERBOSE_ON_ERROR,
8414 + err("Could not allocate DMA ch 8 for USB");
8418 + if (cris_request_dma(USB_RX_DMA_NBR,
8419 + "ETRAX 100LX built-in USB (Rx)",
8420 + DMA_VERBOSE_ON_ERROR,
8422 + err("Could not allocate DMA ch 9 for USB");
8426 + *R_IRQ_MASK2_SET =
8427 + /* Note that these interrupts are not used. */
8428 + IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
8429 + /* Sub channel 1 (ctrl) descr. interrupts are used. */
8430 + IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
8431 + IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
8432 + /* Sub channel 3 (isoc) descr. interrupts are used. */
8433 + IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
8435 + /* Note that the dma9_descr interrupt is not used. */
8436 + *R_IRQ_MASK2_SET =
8437 + IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
8438 + IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
8440 + if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
8441 + "ETRAX 100LX built-in USB (Rx)", hcd)) {
8442 + err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
8446 + if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
8447 + "ETRAX 100LX built-in USB (Tx)", hcd)) {
8448 + err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
8455 +static void tc_dma_destroy(void) {
8456 + free_irq(ETRAX_USB_RX_IRQ, NULL);
8457 + free_irq(ETRAX_USB_TX_IRQ, NULL);
8459 + cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
8460 + cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
8464 +static void tc_dma_link_intr_urb(struct urb *urb);
8466 +/* Handle processing of Bulk, Ctrl and Intr queues */
8467 +static void tc_dma_process_queue(int epid) {
8469 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8470 + unsigned long flags;
8473 + if(epid_state[epid].disabled) {
8474 + /* Don't process any URBs on a disabled endpoint */
8478 + /* Do not disturb us while fiddling with EPs and epids */
8479 + local_irq_save(flags);
8481 + /* For bulk, Ctrl and Intr can we only have one URB active at a time for
8483 + if(activeUrbList[epid] != NULL) {
8484 + /* An URB is already active on EP, skip checking queue */
8485 + local_irq_restore(flags);
8489 + urb = urb_list_first(epid);
8491 + /* No URB waiting in EP queue. Nothing do to */
8492 + local_irq_restore(flags);
8496 + urb_priv = urb->hcpriv;
8497 + ASSERT(urb_priv != NULL);
8498 + ASSERT(urb_priv->urb_state == NOT_STARTED);
8499 + ASSERT(!usb_pipeisoc(urb->pipe));
8501 + /* Remove this URB from the queue and move it to active */
8502 + activeUrbList[epid] = urb;
8503 + urb_list_del(urb, epid);
8505 + urb_priv->urb_state = STARTED;
8507 + /* Reset error counters (regardless of which direction this traffic is). */
8508 + etrax_epid_clear_error(epid);
8510 + /* Special handling of Intr EP lists */
8511 + if(usb_pipeint(urb->pipe)) {
8512 + tc_dma_link_intr_urb(urb);
8513 + local_irq_restore(flags);
8517 + /* Software must preset the toggle bits for Bulk and Ctrl */
8518 + if(usb_pipecontrol(urb->pipe)) {
8519 + /* Toggle bits are initialized only during setup transaction in a
8521 + etrax_epid_set_toggle(epid, 0, 0);
8522 + etrax_epid_set_toggle(epid, 1, 0);
8524 + toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
8525 + usb_pipeout(urb->pipe));
8526 + etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
8529 + tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
8530 + (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
8531 + sblist_to_str(urb_priv->first_sb));
8533 + /* We start the DMA sub channel without checking if it's running or not,
8535 + 1) If it's already running, issuing the start command is a nop.
8536 + 2) We avoid a test-and-set race condition. */
8537 + switch(usb_pipetype(urb->pipe)) {
8539 + /* Assert that the EP descriptor is disabled. */
8540 + ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
8542 + /* Set up and enable the EP descriptor. */
8543 + TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8544 + TxBulkEPList[epid].hw_len = 0;
8545 + TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8547 + /* Check if the dummy list is already with us (if several urbs were queued). */
8548 + if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
8549 + tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
8550 + (unsigned long)urb, epid);
8552 + /* We don't need to check if the DMA is at this EP or not before changing the
8553 + next pointer, since we will do it in one 32-bit write (EP descriptors are
8554 + 32-bit aligned). */
8555 + TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
8558 + restart_dma8_sub0();
8560 + /* Update/restart the bulk start timer since we just started the channel.*/
8561 + mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
8562 + /* Update/restart the bulk eot timer since we just inserted traffic. */
8563 + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
8565 + case PIPE_CONTROL:
8566 + /* Assert that the EP descriptor is disabled. */
8567 + ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
8569 + /* Set up and enable the EP descriptor. */
8570 + TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8571 + TxCtrlEPList[epid].hw_len = 0;
8572 + TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8574 + *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
8577 + local_irq_restore(flags);
8580 +static void tc_dma_link_intr_urb(struct urb *urb) {
8581 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8582 + volatile struct USB_EP_Desc *tmp_ep;
8583 + struct USB_EP_Desc *ep_desc;
8587 + ASSERT(urb_priv != NULL);
8588 + epid = urb_priv->epid;
8589 + ASSERT(urb_priv->interval > 0);
8590 + ASSERT(urb_priv->intr_ep_pool_length > 0);
8592 + tmp_ep = &TxIntrEPList[0];
8594 + /* Only insert one EP descriptor in list for Out Intr URBs.
8595 + We can only handle Out Intr with interval of 128ms because
8596 + it's not possible to insert several Out Intr EPs because they
8597 + are not consumed by the DMA. */
8598 + if(usb_pipeout(urb->pipe)) {
8599 + ep_desc = urb_priv->intr_ep_pool[0];
8601 + ep_desc->next = tmp_ep->next;
8602 + tmp_ep->next = virt_to_phys(ep_desc);
8605 + /* Loop through Intr EP descriptor list and insert EP for URB at
8606 + specified interval */
8608 + /* Each EP descriptor with eof flag sat signals a new frame */
8609 + if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
8610 + /* Insert a EP from URBs EP pool at correct interval */
8611 + if ((i % urb_priv->interval) == 0) {
8612 + ep_desc = urb_priv->intr_ep_pool[pool_idx];
8614 + ep_desc->next = tmp_ep->next;
8615 + tmp_ep->next = virt_to_phys(ep_desc);
8617 + ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
8621 + tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
8622 + } while(tmp_ep != &TxIntrEPList[0]);
8625 + intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
8626 + sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
8628 + /* We start the DMA sub channel without checking if it's running or not,
8630 + 1) If it's already running, issuing the start command is a nop.
8631 + 2) We avoid a test-and-set race condition. */
8632 + *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
8635 +static void tc_dma_process_isoc_urb(struct urb *urb) {
8636 + unsigned long flags;
8637 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8640 + /* Do not disturb us while fiddling with EPs and epids */
8641 + local_irq_save(flags);
8644 + ASSERT(urb_priv->first_sb);
8645 + epid = urb_priv->epid;
8647 + if(activeUrbList[epid] == NULL) {
8648 + /* EP is idle, so make this URB active */
8649 + activeUrbList[epid] = urb;
8650 + urb_list_del(urb, epid);
8651 + ASSERT(TxIsocEPList[epid].sub == 0);
8652 + ASSERT(!(TxIsocEPList[epid].command &
8653 + IO_STATE(USB_EP_command, enable, yes)));
8655 + /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
8656 + if(usb_pipein(urb->pipe)) {
8657 + /* Each EP for In Isoc will have only one SB descriptor, setup when
8658 + submitting the first active urb. We do it here by copying from URBs
8659 + pre-allocated SB. */
8660 + memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
8661 + sizeof(TxIsocSBList[epid]));
8662 + TxIsocEPList[epid].hw_len = 0;
8663 + TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
8665 + /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
8666 + TxIsocEPList[epid].hw_len = 0;
8667 + TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8669 + isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
8670 + " last_sb::0x%x\n",
8671 + (unsigned int)urb, urb_priv->urb_num, epid,
8672 + (unsigned int)(urb_priv->first_sb),
8673 + (unsigned int)(urb_priv->last_sb));
8676 + if (urb->transfer_flags & URB_ISO_ASAP) {
8677 + /* The isoc transfer should be started as soon as possible. The
8678 + start_frame field is a return value if URB_ISO_ASAP was set. Comparing
8679 + R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
8680 + token is sent 2 frames later. I'm not sure how this affects usage of
8681 + the start_frame field by the device driver, or how it affects things
8682 + when USB_ISO_ASAP is not set, so therefore there's no compensation for
8683 + the 2 frame "lag" here. */
8684 + urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
8685 + TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8686 + urb_priv->urb_state = STARTED;
8687 + isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
8688 + urb->start_frame);
8690 + /* Not started yet. */
8691 + urb_priv->urb_state = NOT_STARTED;
8692 + isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
8693 + (unsigned int)urb);
8697 + /* An URB is already active on the EP. Leave URB in queue and let
8698 + finish_isoc_urb process it after current active URB */
8699 + ASSERT(TxIsocEPList[epid].sub != 0);
8701 + if(usb_pipein(urb->pipe)) {
8702 + /* Because there already is a active In URB on this epid we do nothing
8703 + and the finish_isoc_urb() function will handle switching to next URB*/
8705 + } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
8706 + struct USB_SB_Desc *temp_sb_desc;
8708 + /* Set state STARTED to all Out Isoc URBs added to SB list because we
8709 + don't know how many of them that are finished before descr interrupt*/
8710 + urb_priv->urb_state = STARTED;
8712 + /* Find end of current SB list by looking for SB with eol flag sat */
8713 + temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
8714 + while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
8715 + IO_STATE(USB_SB_command, eol, yes)) {
8716 + ASSERT(temp_sb_desc->next);
8717 + temp_sb_desc = phys_to_virt(temp_sb_desc->next);
8720 + isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
8721 + " sub:0x%x eol:0x%x\n",
8722 + (unsigned int)urb, urb_priv->urb_num,
8723 + (unsigned int)(urb_priv->first_sb),
8724 + (unsigned int)(urb_priv->last_sb), epid,
8725 + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
8726 + (unsigned int)temp_sb_desc);
8728 + /* Next pointer must be set before eol is removed. */
8729 + temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
8730 + /* Clear the previous end of list flag since there is a new in the
8731 + added SB descriptor list. */
8732 + temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
8734 + if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
8736 + /* 8.8.5 in Designer's Reference says we should check for and correct
8737 + any errors in the EP here. That should not be necessary if
8738 + epid_attn is handled correctly, so we assume all is ok. */
8739 + epid_data = etrax_epid_iso_get(epid);
8740 + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
8741 + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
8742 + isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
8743 + " URB:0x%x[%d]\n",
8744 + IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
8745 + (unsigned int)urb, urb_priv->urb_num);
8748 + /* The SB list was exhausted. */
8749 + if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
8750 + /* The new sublist did not get processed before the EP was
8751 + disabled. Setup the EP again. */
8753 + if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
8754 + isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
8755 + ", restarting from this URBs SB:0x%x\n",
8756 + epid, (unsigned int)temp_sb_desc,
8757 + (unsigned int)(urb_priv->first_sb));
8758 + TxIsocEPList[epid].hw_len = 0;
8759 + TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8760 + urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
8761 + /* Enable the EP again so data gets processed this time */
8762 + TxIsocEPList[epid].command |=
8763 + IO_STATE(USB_EP_command, enable, yes);
8766 + /* The EP has been disabled but not at end this URB (god knows
8767 + where). This should generate an epid_attn so we should not be
8769 + isoc_warn("EP was disabled on sb:0x%x before SB list for"
8770 + " URB:0x%x[%d] got processed\n",
8771 + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
8772 + (unsigned int)urb, urb_priv->urb_num);
8775 + /* This might happend if we are slow on this function and isn't
8777 + isoc_dbg("EP was disabled and finished with SBs from appended"
8778 + " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
8784 + /* Start the DMA sub channel */
8785 + *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
8787 + local_irq_restore(flags);
8790 +static void tc_dma_unlink_intr_urb(struct urb *urb) {
8791 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8792 + volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
8793 + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
8794 + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
8795 + volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
8798 + volatile int timeout = 10000;
8801 + /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
8804 + ASSERT(urb_priv->intr_ep_pool_length > 0);
8805 + epid = urb_priv->epid;
8807 + /* First disable all Intr EPs belonging to epid for this URB */
8808 + first_ep = &TxIntrEPList[0];
8809 + curr_ep = first_ep;
8811 + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
8812 + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
8814 + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
8816 + curr_ep = phys_to_virt(curr_ep->next);
8817 + } while (curr_ep != first_ep);
8820 + /* Now unlink all EPs belonging to this epid from Descr list */
8821 + first_ep = &TxIntrEPList[0];
8822 + curr_ep = first_ep;
8824 + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
8825 + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
8826 + /* This is the one we should unlink. */
8827 + unlink_ep = next_ep;
8829 + /* Actually unlink the EP from the DMA list. */
8830 + curr_ep->next = unlink_ep->next;
8832 + /* Wait until the DMA is no longer at this descriptor. */
8833 + while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
8835 + if(timeout == 0) {
8836 + warn("Timeout while waiting for DMA-TX-Intr to leave unlink EP\n");
8841 + curr_ep = phys_to_virt(curr_ep->next);
8842 + } while (curr_ep != first_ep);
8844 + if(count != urb_priv->intr_ep_pool_length) {
8845 + intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
8846 + urb_priv->intr_ep_pool_length, (unsigned int)urb,
8847 + urb_priv->urb_num);
8849 + intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
8850 + urb_priv->intr_ep_pool_length, (unsigned int)urb);
8854 +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
8856 + unsigned long flags;
8859 + struct crisv10_urb_priv * urb_priv;
8862 + /* Protect TxEPList */
8863 + local_irq_save(flags);
8865 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
8866 + /* A finished EP descriptor is disabled and has a valid sub pointer */
8867 + if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
8868 + (TxBulkEPList[epid].sub != 0)) {
8870 + /* Get the active URB for this epid */
8871 + urb = activeUrbList[epid];
8872 + /* Sanity checks */
8874 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
8877 + /* Only handle finished out Bulk EPs here,
8878 + and let RX interrupt take care of the rest */
8879 + if(!epid_out_traffic(epid)) {
8884 + tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
8885 + epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
8886 + urb_priv->urb_num);
8888 + tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
8889 + epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
8890 + urb_priv->urb_num);
8893 + if(urb_priv->urb_state == UNLINK) {
8894 + /* This Bulk URB is requested to be unlinked, that means that the EP
8895 + has been disabled and we might not have sent all data */
8896 + tc_finish_urb(hcd, urb, urb->status);
8900 + ASSERT(urb_priv->urb_state == STARTED);
8901 + if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
8902 + tc_err("Endpoint got disabled before reaching last sb\n");
8905 + epid_data = etrax_epid_get(epid);
8906 + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
8907 + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
8908 + /* This means that the endpoint has no error, is disabled
8909 + and had inserted traffic, i.e. transfer successfully completed. */
8910 + tc_finish_urb(hcd, urb, 0);
8912 + /* Shouldn't happen. We expect errors to be caught by epid
8914 + tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
8915 + epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
8918 + tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
8922 + local_irq_restore(flags);
8925 +static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
8926 + unsigned long flags;
8929 + struct crisv10_urb_priv * urb_priv;
8932 + /* Protect TxEPList */
8933 + local_irq_save(flags);
8935 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
8936 + if(epid == DUMMY_EPID)
8939 + /* A finished EP descriptor is disabled and has a valid sub pointer */
8940 + if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
8941 + (TxCtrlEPList[epid].sub != 0)) {
8943 + /* Get the active URB for this epid */
8944 + urb = activeUrbList[epid];
8947 + tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
8951 + /* Sanity checks */
8952 + ASSERT(usb_pipein(urb->pipe));
8953 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
8955 + if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
8956 + tc_err("Endpoint got disabled before reaching last sb\n");
8959 + epid_data = etrax_epid_get(epid);
8960 + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
8961 + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
8962 + /* This means that the endpoint has no error, is disabled
8963 + and had inserted traffic, i.e. transfer successfully completed. */
8965 + /* Check if RX-interrupt for In Ctrl has been processed before
8966 + finishing the URB */
8967 + if(urb_priv->ctrl_rx_done) {
8968 + tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
8969 + (unsigned int)urb, urb_priv->urb_num);
8970 + tc_finish_urb(hcd, urb, 0);
8972 + /* If we get zout descriptor interrupt before RX was done for a
8973 + In Ctrl transfer, then we flag that and it will be finished
8974 + in the RX-Interrupt */
8975 + urb_priv->ctrl_zout_done = 1;
8976 + tc_dbg("Got zout descr interrupt before RX interrupt\n");
8979 + /* Shouldn't happen. We expect errors to be caught by epid
8981 + tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
8982 + __dump_ep_desc(&(TxCtrlEPList[epid]));
8983 + __dump_ept_data(epid);
8987 + local_irq_restore(flags);
8990 +/* This function goes through all epids that are setup for Out Isoc transfers
8991 + and marks (isoc_out_done) all queued URBs that the DMA has finished
8993 + No URB completetion is done here to make interrupt routine return quickly.
8994 + URBs are completed later with help of complete_isoc_bottom_half() that
8995 + becomes schedules when this functions is finished. */
8996 +static void check_finished_isoc_tx_epids(void) {
8997 + unsigned long flags;
9000 + struct crisv10_urb_priv * urb_priv;
9001 + struct USB_SB_Desc* sb_desc;
9004 + /* Protect TxIsocEPList */
9005 + local_irq_save(flags);
9007 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9008 + if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
9009 + !epid_out_traffic(epid)) {
9010 + /* Nothing here to see. */
9013 + ASSERT(epid_inuse(epid));
9014 + ASSERT(epid_isoc(epid));
9016 + sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
9017 + /* Find the last descriptor of the currently active URB for this ep.
9018 + This is the first descriptor in the sub list marked for a descriptor
9020 + while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
9021 + sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
9025 + isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
9026 + epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
9027 + (unsigned int)sb_desc);
9029 + urb = activeUrbList[epid];
9031 + isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
9036 + while(urb && !epid_done) {
9037 + /* Sanity check. */
9038 + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
9039 + ASSERT(usb_pipeout(urb->pipe));
9041 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9043 + ASSERT(urb_priv->urb_state == STARTED ||
9044 + urb_priv->urb_state == UNLINK);
9046 + if (sb_desc != urb_priv->last_sb) {
9047 + /* This urb has been sent. */
9048 + urb_priv->isoc_out_done = 1;
9050 + } else { /* Found URB that has last_sb as the interrupt reason */
9052 + /* Check if EP has been disabled, meaning that all transfers are done*/
9053 + if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
9054 + ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
9055 + IO_STATE(USB_SB_command, eol, yes));
9056 + ASSERT(sb_desc->next == 0);
9057 + urb_priv->isoc_out_done = 1;
9059 + isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
9060 + (unsigned int)urb, urb_priv->urb_num);
9062 + /* Stop looking any further in queue */
9067 + if(urb == activeUrbList[epid]) {
9068 + urb = urb_list_first(epid);
9070 + urb = urb_list_next(urb, epid);
9073 + } /* END: while(urb && !epid_done) */
9076 + local_irq_restore(flags);
9080 +/* This is where the Out Isoc URBs are realy completed. This function is
9081 + scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
9082 + are done. This functions completes all URBs earlier marked with
9083 + isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
9085 +static void complete_isoc_bottom_half(void *data) {
9086 + struct crisv10_isoc_complete_data *comp_data;
9087 + struct usb_iso_packet_descriptor *packet;
9088 + struct crisv10_urb_priv * urb_priv;
9089 + unsigned long flags;
9095 + comp_data = (struct crisv10_isoc_complete_data*)data;
9097 + local_irq_save(flags);
9099 + for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
9100 + if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
9101 + /* Only check valid Out Isoc epids */
9105 + isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
9106 + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
9108 + /* The descriptor interrupt handler has marked all transmitted Out Isoc
9109 + URBs with isoc_out_done. Now we traverse all epids and for all that
9110 + have out Isoc traffic we traverse its URB list and complete the
9111 + transmitted URBs. */
9113 + while (!epid_done) {
9115 + /* Get the active urb (if any) */
9116 + urb = activeUrbList[epid];
9118 + isoc_dbg("No active URB on epid:%d anymore\n", epid);
9123 + /* Sanity check. */
9124 + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
9125 + ASSERT(usb_pipeout(urb->pipe));
9127 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9130 + if (!(urb_priv->isoc_out_done)) {
9131 + /* We have reached URB that isn't flaged done yet, stop traversing. */
9132 + isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
9133 + " before not yet flaged URB:0x%x[%d]\n",
9134 + epid, (unsigned int)urb, urb_priv->urb_num);
9139 + /* This urb has been sent. */
9140 + isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
9141 + (unsigned int)urb, urb_priv->urb_num);
9143 + /* Set ok on transfered packets for this URB and finish it */
9144 + for (i = 0; i < urb->number_of_packets; i++) {
9145 + packet = &urb->iso_frame_desc[i];
9146 + packet->status = 0;
9147 + packet->actual_length = packet->length;
9149 + urb_priv->isoc_packet_counter = urb->number_of_packets;
9150 + tc_finish_urb(comp_data->hcd, urb, 0);
9152 + } /* END: while(!epid_done) */
9153 + } /* END: for(epid...) */
9155 + local_irq_restore(flags);
9156 + kmem_cache_free(isoc_compl_cache, comp_data);
9160 +static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
9161 + unsigned long flags;
9164 + struct crisv10_urb_priv * urb_priv;
9165 + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
9166 + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
9168 + /* Protect TxintrEPList */
9169 + local_irq_save(flags);
9171 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9172 + if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
9173 + /* Nothing to see on this epid. Only check valid Out Intr epids */
9177 + urb = activeUrbList[epid];
9179 + intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
9183 + /* Sanity check. */
9184 + ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
9185 + ASSERT(usb_pipeout(urb->pipe));
9187 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9190 + /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
9192 + curr_ep = &TxIntrEPList[0];
9194 + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
9195 + if(next_ep == urb_priv->intr_ep_pool[0]) {
9196 + /* We found the Out Intr EP for this epid */
9198 + /* Disable it so it doesn't get processed again */
9199 + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
9201 + /* Finish the active Out Intr URB with status OK */
9202 + tc_finish_urb(hcd, urb, 0);
9204 + curr_ep = phys_to_virt(curr_ep->next);
9205 + } while (curr_ep != &TxIntrEPList[1]);
9208 + local_irq_restore(flags);
9211 +/* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
9212 +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
9213 + struct usb_hcd *hcd = (struct usb_hcd*)vhc;
9216 + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
9217 + /* Clear this interrupt */
9218 + *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
9219 + restart_dma8_sub0();
9222 + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
9223 + /* Clear this interrupt */
9224 + *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
9225 + check_finished_ctrl_tx_epids(hcd);
9228 + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
9229 + /* Clear this interrupt */
9230 + *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
9231 + check_finished_intr_tx_epids(hcd);
9234 + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
9235 + struct crisv10_isoc_complete_data* comp_data;
9237 + /* Flag done Out Isoc for later completion */
9238 + check_finished_isoc_tx_epids();
9240 + /* Clear this interrupt */
9241 + *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
9242 + /* Schedule bottom half of Out Isoc completion function. This function
9243 + finishes the URBs marked with isoc_out_done */
9244 + comp_data = (struct crisv10_isoc_complete_data*)
9245 + kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
9246 + ASSERT(comp_data != NULL);
9247 + comp_data ->hcd = hcd;
9249 + INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half, comp_data);
9250 + schedule_work(&comp_data->usb_bh);
9253 + return IRQ_HANDLED;
9256 +/* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
9257 +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
9258 + unsigned long flags;
9260 + struct usb_hcd *hcd = (struct usb_hcd*)vhc;
9261 + struct crisv10_urb_priv *urb_priv;
9267 + /* Clear this interrupt. */
9268 + *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
9270 + /* Custom clear interrupt for this interrupt */
9271 + /* The reason we cli here is that we call the driver's callback functions. */
9272 + local_irq_save(flags);
9274 + /* Note that this while loop assumes that all packets span only
9275 + one rx descriptor. */
9276 + while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
9277 + epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
9278 + /* Get the active URB for this epid */
9279 + urb = activeUrbList[epid];
9281 + ASSERT(epid_inuse(epid));
9283 + dma_err("No urb for epid %d in rx interrupt\n", epid);
9287 + /* Check if any errors on epid */
9289 + if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
9290 + __u32 r_usb_ept_data;
9292 + if (usb_pipeisoc(urb->pipe)) {
9293 + r_usb_ept_data = etrax_epid_iso_get(epid);
9294 + if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
9295 + (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
9296 + (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
9297 + /* Not an error, just a failure to receive an expected iso
9298 + in packet in this frame. This is not documented
9299 + in the designers reference. Continue processing.
9301 + } else real_error = 1;
9302 + } else real_error = 1;
9306 + dma_err("Error in RX descr on epid:%d for URB 0x%x",
9307 + epid, (unsigned int)urb);
9308 + dump_ept_data(epid);
9309 + dump_in_desc(myNextRxDesc);
9313 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9315 + ASSERT(urb_priv->urb_state == STARTED ||
9316 + urb_priv->urb_state == UNLINK);
9318 + if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
9319 + (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
9320 + (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
9322 + /* We get nodata for empty data transactions, and the rx descriptor's
9323 + hw_len field is not valid in that case. No data to copy in other
9325 + if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
9326 + /* No data to copy */
9329 + dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
9330 + (unsigned int)urb, epid, myNextRxDesc->hw_len,
9331 + urb_priv->rx_offset);
9333 + /* Only copy data if URB isn't flaged to be unlinked*/
9334 + if(urb_priv->urb_state != UNLINK) {
9335 + /* Make sure the data fits in the buffer. */
9336 + if(urb_priv->rx_offset + myNextRxDesc->hw_len
9337 + <= urb->transfer_buffer_length) {
9339 + /* Copy the data to URBs buffer */
9340 + memcpy(urb->transfer_buffer + urb_priv->rx_offset,
9341 + phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
9342 + urb_priv->rx_offset += myNextRxDesc->hw_len;
9344 + /* Signal overflow when returning URB */
9345 + urb->status = -EOVERFLOW;
9346 + tc_finish_urb_later(hcd, urb, urb->status);
9351 + /* Check if it was the last packet in the transfer */
9352 + if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
9353 + /* Special handling for In Ctrl URBs. */
9354 + if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
9355 + !(urb_priv->ctrl_zout_done)) {
9356 + /* Flag that RX part of Ctrl transfer is done. Because zout descr
9357 + interrupt hasn't happend yet will the URB be finished in the
9359 + urb_priv->ctrl_rx_done = 1;
9360 + tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
9361 + " for zout\n", (unsigned int)urb);
9363 + tc_finish_urb(hcd, urb, 0);
9366 + } else { /* ISOC RX */
9368 + isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
9369 + epid, (unsigned int)urb);
9372 + struct usb_iso_packet_descriptor *packet;
9374 + if (urb_priv->urb_state == UNLINK) {
9375 + isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
9377 + } else if (urb_priv->urb_state == NOT_STARTED) {
9378 + isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
9382 + packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
9384 + packet->status = 0;
9386 + if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
9387 + /* We get nodata for empty data transactions, and the rx descriptor's
9388 + hw_len field is not valid in that case. We copy 0 bytes however to
9390 + packet->actual_length = 0;
9392 + packet->actual_length = myNextRxDesc->hw_len;
9393 + /* Make sure the data fits in the buffer. */
9394 + ASSERT(packet->actual_length <= packet->length);
9395 + memcpy(urb->transfer_buffer + packet->offset,
9396 + phys_to_virt(myNextRxDesc->buf), packet->actual_length);
9397 + if(packet->actual_length > 0)
9398 + isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
9399 + packet->actual_length, urb_priv->isoc_packet_counter,
9400 + (unsigned int)urb, urb_priv->urb_num);
9403 + /* Increment the packet counter. */
9404 + urb_priv->isoc_packet_counter++;
9406 + /* Note that we don't care about the eot field in the rx descriptor's
9407 + status. It will always be set for isoc traffic. */
9408 + if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
9409 + /* Complete the urb with status OK. */
9410 + tc_finish_urb(hcd, urb, 0);
9415 + myNextRxDesc->status = 0;
9416 + myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
9417 + myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
9418 + myLastRxDesc = myNextRxDesc;
9419 + myNextRxDesc = phys_to_virt(myNextRxDesc->next);
9420 + flush_etrax_cache();
9421 + *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
9424 + local_irq_restore(flags);
9426 + return IRQ_HANDLED;
9429 +static void tc_bulk_start_timer_func(unsigned long dummy) {
9430 + /* We might enable an EP descriptor behind the current DMA position when
9431 + it's about to decide that there are no more bulk traffic and it should
9432 + stop the bulk channel.
9433 + Therefore we periodically check if the bulk channel is stopped and there
9434 + is an enabled bulk EP descriptor, in which case we start the bulk
9437 + if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
9440 + timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
9442 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9443 + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
9444 + timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
9446 + restart_dma8_sub0();
9448 + /* Restart the bulk eot timer since we just started the bulk channel.*/
9449 + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
9451 + /* No need to search any further. */
9456 + timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
9460 +static void tc_bulk_eot_timer_func(unsigned long dummy) {
9461 + struct usb_hcd *hcd = (struct usb_hcd*)dummy;
9463 + /* Because of a race condition in the top half, we might miss a bulk eot.
9464 + This timer "simulates" a bulk eot if we don't get one for a while,
9465 + hopefully correcting the situation. */
9466 + timer_dbg("bulk_eot_timer timed out.\n");
9467 + check_finished_bulk_tx_epids(hcd, 1);
9471 +/*************************************************************/
9472 +/*************************************************************/
9473 +/* Device driver block */
9474 +/*************************************************************/
9475 +/*************************************************************/
9477 +/* Forward declarations for device driver functions */
9478 +static int devdrv_hcd_probe(struct device *);
9479 +static int devdrv_hcd_remove(struct device *);
9481 +static int devdrv_hcd_suspend(struct device *, u32, u32);
9482 +static int devdrv_hcd_resume(struct device *, u32);
9483 +#endif /* CONFIG_PM */
9486 +static struct platform_device *devdrv_hc_platform_device;
9488 +/* device driver interface */
9489 +static struct device_driver devdrv_hc_device_driver = {
9490 + .name = (char *) hc_name,
9491 + .bus = &platform_bus_type,
9493 + .probe = devdrv_hcd_probe,
9494 + .remove = devdrv_hcd_remove,
9497 + .suspend = devdrv_hcd_suspend,
9498 + .resume = devdrv_hcd_resume,
9499 +#endif /* CONFIG_PM */
9502 - CHECK_ALIGN(&TxIsocEPList[i]);
9503 - TxIsocEPList[i].hw_len = 0;
9505 - /* Must enable the last EP descr to get eof interrupt. */
9506 - TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
9507 - IO_STATE(USB_EP_command, eof, yes) |
9508 - IO_STATE(USB_EP_command, eol, yes) |
9509 - IO_FIELD(USB_EP_command, epid, INVALID_EPID));
9510 - TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
9511 - TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
9513 - *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
9514 - *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
9519 -static void etrax_usb_unlink_intr_urb(struct urb *urb)
9520 +/* initialize the host controller and driver */
9521 +static int __init_or_module devdrv_hcd_probe(struct device *dev)
9523 - volatile USB_EP_Desc_t *first_ep; /* First EP in the list. */
9524 - volatile USB_EP_Desc_t *curr_ep; /* Current EP, the iterator. */
9525 - volatile USB_EP_Desc_t *next_ep; /* The EP after current. */
9526 - volatile USB_EP_Desc_t *unlink_ep; /* The one we should remove from the list. */
9530 - /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the List". */
9534 - epid = ((etrax_urb_priv_t *)urb->hcpriv)->epid;
9536 - first_ep = &TxIntrEPList[0];
9537 - curr_ep = first_ep;
9540 - /* Note that this loop removes all EP descriptors with this epid. This assumes
9541 - that all EP descriptors belong to the one and only urb for this epid. */
9544 - next_ep = (USB_EP_Desc_t *)phys_to_virt(curr_ep->next);
9546 - if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
9548 - dbg_intr("Found EP to unlink for epid %d", epid);
9550 - /* This is the one we should unlink. */
9551 - unlink_ep = next_ep;
9553 - /* Actually unlink the EP from the DMA list. */
9554 - curr_ep->next = unlink_ep->next;
9556 - /* Wait until the DMA is no longer at this descriptor. */
9557 - while (*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep));
9558 + struct usb_hcd *hcd;
9559 + struct crisv10_hcd *crisv10_hcd;
9562 + /* Check DMA burst length */
9563 + if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
9564 + IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
9565 + devdrv_err("Invalid DMA burst length in Etrax 100LX,"
9566 + " needs to be 32\n");
9570 + hcd = usb_create_hcd(&crisv10_hc_driver, dev, dev->bus_id);
9574 + crisv10_hcd = hcd_to_crisv10_hcd(hcd);
9575 + spin_lock_init(&crisv10_hcd->lock);
9576 + crisv10_hcd->num_ports = num_ports();
9577 + crisv10_hcd->running = 0;
9579 + dev_set_drvdata(dev, crisv10_hcd);
9581 + devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ,
9582 + ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
9584 + /* Print out chip version read from registers */
9585 + int rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
9586 + int rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
9587 + if(rev_min == 0) {
9588 + devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
9590 + devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
9593 + devdrv_info("Bulk timer interval, start:%d eot:%d\n",
9594 + BULK_START_TIMER_INTERVAL,
9595 + BULK_EOT_TIMER_INTERVAL);
9598 + /* Init root hub data structures */
9600 + devdrv_err("Failed init data for Root Hub\n");
9604 + if(port_in_use(0)) {
9605 + if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
9606 + printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
9610 + devdrv_info("Claimed interface for USB physical port 1\n");
9612 + if(port_in_use(1)) {
9613 + if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
9614 + /* Free first interface if second failed to be claimed */
9615 + if(port_in_use(0)) {
9616 + cris_free_io_interface(if_usb_1);
9618 + printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
9622 + devdrv_info("Claimed interface for USB physical port 2\n");
9625 + /* Init transfer controller structs and locks */
9626 + if((retval = tc_init(hcd)) != 0) {
9630 + /* Attach interrupt functions for DMA and init DMA controller */
9631 + if((retval = tc_dma_init(hcd)) != 0) {
9635 + /* Attach the top IRQ handler for USB controller interrupts */
9636 + if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
9637 + "ETRAX 100LX built-in USB (HC)", hcd)) {
9638 + err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
9643 + /* iso_eof is only enabled when isoc traffic is running. */
9644 + *R_USB_IRQ_MASK_SET =
9645 + /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
9646 + IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
9647 + IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
9648 + IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
9649 + IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
9652 + crisv10_ready_wait();
9653 + /* Reset the USB interface. */
9655 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
9656 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9657 + IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
9659 + /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
9660 + 0x2A30 (10800), to guarantee that control traffic gets 10% of the
9661 + bandwidth, and periodic transfer may allocate the rest (90%).
9662 + This doesn't work though.
9663 + The value 11960 is chosen to be just after the SOF token, with a couple
9664 + of bit times extra for possible bit stuffing. */
9665 + *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
9667 + crisv10_ready_wait();
9668 + /* Configure the USB interface as a host controller. */
9670 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
9671 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9672 + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
9675 + /* Check so controller not busy before enabling ports */
9676 + crisv10_ready_wait();
9678 + /* Enable selected USB ports */
9679 + if(port_in_use(0)) {
9680 + *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
9682 + *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
9684 + if(port_in_use(1)) {
9685 + *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
9687 + *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
9690 + crisv10_ready_wait();
9691 + /* Start processing of USB traffic. */
9693 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
9694 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9695 + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
9697 + /* Do not continue probing initialization before USB interface is done */
9698 + crisv10_ready_wait();
9700 + /* Register our Host Controller to USB Core
9701 + * Finish the remaining parts of generic HCD initialization: allocate the
9702 + * buffers of consistent memory, register the bus
9703 + * and call the driver's reset() and start() routines. */
9704 + retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
9705 + if (retval != 0) {
9706 + devdrv_err("Failed registering HCD driver\n");
9713 + devdrv_hcd_remove(dev);
9718 +/* cleanup after the host controller and driver */
9719 +static int __init_or_module devdrv_hcd_remove(struct device *dev)
9721 + struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
9722 + struct usb_hcd *hcd;
9726 + hcd = crisv10_hcd_to_hcd(crisv10_hcd);
9729 + /* Stop USB Controller in Etrax 100LX */
9730 + crisv10_hcd_reset(hcd);
9732 + usb_remove_hcd(hcd);
9733 + devdrv_dbg("Removed HCD from USB Core\n");
9735 + /* Free USB Controller IRQ */
9736 + free_irq(ETRAX_USB_HC_IRQ, NULL);
9738 + /* Free resources */
9743 + if(port_in_use(0)) {
9744 + cris_free_io_interface(if_usb_1);
9746 + if(port_in_use(1)) {
9747 + cris_free_io_interface(if_usb_2);
9750 + devdrv_dbg("Freed all claimed resources\n");
9758 +static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
9760 + return 0; /* no-op for now */
9763 +static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
9765 + return 0; /* no-op for now */
9768 +#endif /* CONFIG_PM */
9772 +/*************************************************************/
9773 +/*************************************************************/
9775 +/*************************************************************/
9776 +/*************************************************************/
9778 +/* register driver */
9779 +static int __init module_hcd_init(void)
9782 + if (usb_disabled())
9785 + /* Here we select enabled ports by following defines created from
9787 +#ifndef CONFIG_ETRAX_USB_HOST_PORT1
9790 +#ifndef CONFIG_ETRAX_USB_HOST_PORT2
9794 - /* Now we are free to remove it and its SB descriptor.
9795 - Note that it is assumed here that there is only one sb in the
9796 - sb list for this ep. */
9797 - kmem_cache_free(usb_desc_cache, phys_to_virt(unlink_ep->sub));
9798 - kmem_cache_free(usb_desc_cache, (USB_EP_Desc_t *)unlink_ep);
9800 + printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
9802 - curr_ep = phys_to_virt(curr_ep->next);
9803 + devdrv_hc_platform_device =
9804 + platform_device_register_simple((char *) hc_name, 0, NULL, 0);
9806 - } while (curr_ep != first_ep);
9807 - urb->hcpriv = NULL;
9808 + if (IS_ERR(devdrv_hc_platform_device))
9809 + return PTR_ERR(devdrv_hc_platform_device);
9810 + return driver_register(&devdrv_hc_device_driver);
9812 + * Note that we do not set the DMA mask for the device,
9813 + * i.e. we pretend that we will use PIO, since no specific
9814 + * allocation routines are needed for DMA buffers. This will
9815 + * cause the HCD buffer allocation routines to fall back to
9820 -void etrax_usb_do_intr_recover(int epid)
9822 - USB_EP_Desc_t *first_ep, *tmp_ep;
9823 +/* unregister driver */
9824 +static void __exit module_hcd_exit(void)
9826 + driver_unregister(&devdrv_hc_device_driver);
9831 - first_ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB2_EP);
9832 - tmp_ep = first_ep;
9834 - /* What this does is simply to walk the list of interrupt
9835 - ep descriptors and enable those that are disabled. */
9838 - if (IO_EXTRACT(USB_EP_command, epid, tmp_ep->command) == epid &&
9839 - !(tmp_ep->command & IO_MASK(USB_EP_command, enable))) {
9840 - tmp_ep->command |= IO_STATE(USB_EP_command, enable, yes);
9843 - tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next);
9845 - } while (tmp_ep != first_ep);
9851 -static int etrax_rh_unlink_urb (struct urb *urb)
9857 - hc = urb->dev->bus->hcpriv;
9859 - if (hc->rh.urb == urb) {
9861 - del_timer(&hc->rh.rh_int_timer);
9868 -static void etrax_rh_send_irq(struct urb *urb)
9871 - etrax_hc_t *hc = urb->dev->bus->hcpriv;
9875 - dbg_rh("R_USB_FM_NUMBER : 0x%08X", *R_USB_FM_NUMBER);
9876 - dbg_rh("R_USB_FM_REMAINING: 0x%08X", *R_USB_FM_REMAINING);
9879 - data |= (hc->rh.wPortChange_1) ? (1 << 1) : 0;
9880 - data |= (hc->rh.wPortChange_2) ? (1 << 2) : 0;
9882 - *((__u16 *)urb->transfer_buffer) = cpu_to_le16(data);
9883 - /* FIXME: Why is actual_length set to 1 when data is 2 bytes?
9884 - Since only 1 byte is used, why not declare data as __u8? */
9885 - urb->actual_length = 1;
9888 - if (hc->rh.send && urb->complete) {
9889 - dbg_rh("wPortChange_1: 0x%04X", hc->rh.wPortChange_1);
9890 - dbg_rh("wPortChange_2: 0x%04X", hc->rh.wPortChange_2);
9892 - urb->complete(urb, NULL);
9898 -static void etrax_rh_init_int_timer(struct urb *urb)
9904 - hc = urb->dev->bus->hcpriv;
9905 - hc->rh.interval = urb->interval;
9906 - init_timer(&hc->rh.rh_int_timer);
9907 - hc->rh.rh_int_timer.function = etrax_rh_int_timer_do;
9908 - hc->rh.rh_int_timer.data = (unsigned long)urb;
9909 - /* FIXME: Is the jiffies resolution enough? All intervals < 10 ms will be mapped
9910 - to 0, and the rest to the nearest lower 10 ms. */
9911 - hc->rh.rh_int_timer.expires = jiffies + ((HZ * hc->rh.interval) / 1000);
9912 - add_timer(&hc->rh.rh_int_timer);
9917 -static void etrax_rh_int_timer_do(unsigned long ptr)
9924 - urb = (struct urb*)ptr;
9925 - hc = urb->dev->bus->hcpriv;
9927 - if (hc->rh.send) {
9928 - etrax_rh_send_irq(urb);
9934 -static int etrax_usb_setup_epid(struct urb *urb)
9937 - char devnum, endpoint, out_traffic, slow;
9939 - unsigned long flags;
9943 - epid = etrax_usb_lookup_epid(urb);
9944 - if ((epid != -1)){
9945 - /* An epid that fits this urb has been found. */
9950 - /* We must find and initiate a new epid for this urb. */
9951 - epid = etrax_usb_allocate_epid();
9954 - /* Failed to allocate a new epid. */
9959 - /* We now have a new epid to use. Initiate it. */
9960 - set_bit(epid, (void *)&epid_usage_bitmask);
9962 - devnum = usb_pipedevice(urb->pipe);
9963 - endpoint = usb_pipeendpoint(urb->pipe);
9964 - slow = usb_pipeslow(urb->pipe);
9965 - maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
9966 - if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
9967 - /* We want both IN and OUT control traffic to be put on the same EP/SB list. */
9970 - out_traffic = usb_pipeout(urb->pipe);
9973 - save_flags(flags);
9976 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
9979 - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
9980 - *R_USB_EPT_DATA_ISO = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
9981 - /* FIXME: Change any to the actual port? */
9982 - IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
9983 - IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
9984 - IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
9985 - IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
9987 - *R_USB_EPT_DATA = IO_STATE(R_USB_EPT_DATA, valid, yes) |
9988 - IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
9989 - /* FIXME: Change any to the actual port? */
9990 - IO_STATE(R_USB_EPT_DATA, port, any) |
9991 - IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
9992 - IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
9993 - IO_FIELD(R_USB_EPT_DATA, dev, devnum);
9996 - restore_flags(flags);
9998 - if (out_traffic) {
9999 - set_bit(epid, (void *)&epid_out_traffic);
10001 - clear_bit(epid, (void *)&epid_out_traffic);
10004 - dbg_epid("Setting up epid %d with devnum %d, endpoint %d and max_len %d (%s)",
10005 - epid, devnum, endpoint, maxlen, out_traffic ? "OUT" : "IN");
10011 -static void etrax_usb_free_epid(int epid)
10013 - unsigned long flags;
10017 - if (!test_bit(epid, (void *)&epid_usage_bitmask)) {
10018 - warn("Trying to free unused epid %d", epid);
10023 - save_flags(flags);
10026 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
10028 - while (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold));
10029 - /* This will, among other things, set the valid field to 0. */
10030 - *R_USB_EPT_DATA = 0;
10031 - restore_flags(flags);
10033 - clear_bit(epid, (void *)&epid_usage_bitmask);
10036 - dbg_epid("Freed epid %d", epid);
10041 -static int etrax_usb_lookup_epid(struct urb *urb)
10045 - char devnum, endpoint, slow, out_traffic;
10047 - unsigned long flags;
10051 - devnum = usb_pipedevice(urb->pipe);
10052 - endpoint = usb_pipeendpoint(urb->pipe);
10053 - slow = usb_pipeslow(urb->pipe);
10054 - maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
10055 - if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10056 - /* We want both IN and OUT control traffic to be put on the same EP/SB list. */
10059 - out_traffic = usb_pipeout(urb->pipe);
10062 - /* Step through att epids. */
10063 - for (i = 0; i < NBR_OF_EPIDS; i++) {
10064 - if (test_bit(i, (void *)&epid_usage_bitmask) &&
10065 - test_bit(i, (void *)&epid_out_traffic) == out_traffic) {
10067 - save_flags(flags);
10069 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, i);
10072 - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10073 - data = *R_USB_EPT_DATA_ISO;
10074 - restore_flags(flags);
10076 - if ((IO_MASK(R_USB_EPT_DATA_ISO, valid) & data) &&
10077 - (IO_EXTRACT(R_USB_EPT_DATA_ISO, dev, data) == devnum) &&
10078 - (IO_EXTRACT(R_USB_EPT_DATA_ISO, ep, data) == endpoint) &&
10079 - (IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len, data) == maxlen)) {
10080 - dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)",
10081 - i, devnum, endpoint, out_traffic ? "OUT" : "IN");
10086 - data = *R_USB_EPT_DATA;
10087 - restore_flags(flags);
10089 - if ((IO_MASK(R_USB_EPT_DATA, valid) & data) &&
10090 - (IO_EXTRACT(R_USB_EPT_DATA, dev, data) == devnum) &&
10091 - (IO_EXTRACT(R_USB_EPT_DATA, ep, data) == endpoint) &&
10092 - (IO_EXTRACT(R_USB_EPT_DATA, low_speed, data) == slow) &&
10093 - (IO_EXTRACT(R_USB_EPT_DATA, max_len, data) == maxlen)) {
10094 - dbg_epid("Found epid %d for devnum %d, endpoint %d (%s)",
10095 - i, devnum, endpoint, out_traffic ? "OUT" : "IN");
10107 -static int etrax_usb_allocate_epid(void)
10113 - for (i = 0; i < NBR_OF_EPIDS; i++) {
10114 - if (!test_bit(i, (void *)&epid_usage_bitmask)) {
10115 - dbg_epid("Found free epid %d", i);
10121 - dbg_epid("Found no free epids");
10126 -static int etrax_usb_submit_urb(struct urb *urb, unsigned mem_flags)
10129 - int ret = -EINVAL;
10133 - if (!urb->dev || !urb->dev->bus) {
10136 - if (usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)) <= 0) {
10137 - info("Submit urb to pipe with maxpacketlen 0, pipe 0x%X\n", urb->pipe);
10138 - return -EMSGSIZE;
10141 - if (urb->timeout) {
10143 - warn("urb->timeout specified, ignoring.");
10146 - hc = (etrax_hc_t*)urb->dev->bus->hcpriv;
10148 - if (usb_pipedevice(urb->pipe) == hc->rh.devnum) {
10149 - /* This request is for the Virtual Root Hub. */
10150 - ret = etrax_rh_submit_urb(urb);
10152 - } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
10154 - ret = etrax_usb_submit_bulk_urb(urb);
10156 - } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10158 - ret = etrax_usb_submit_ctrl_urb(urb);
10160 - } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
10163 - if (urb->bandwidth == 0) {
10164 - bustime = usb_check_bandwidth(urb->dev, urb);
10165 - if (bustime < 0) {
10168 - ret = etrax_usb_submit_intr_urb(urb);
10170 - usb_claim_bandwidth(urb->dev, urb, bustime, 0);
10173 - /* Bandwidth already set. */
10174 - ret = etrax_usb_submit_intr_urb(urb);
10177 - } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10180 - if (urb->bandwidth == 0) {
10181 - bustime = usb_check_bandwidth(urb->dev, urb);
10182 - if (bustime < 0) {
10185 - ret = etrax_usb_submit_isoc_urb(urb);
10187 - usb_claim_bandwidth(urb->dev, urb, bustime, 0);
10190 - /* Bandwidth already set. */
10191 - ret = etrax_usb_submit_isoc_urb(urb);
10198 - printk("Submit URB error %d\n", ret);
10203 -static int etrax_usb_unlink_urb(struct urb *urb, int status)
10206 - etrax_urb_priv_t *urb_priv;
10208 - unsigned int flags;
10216 - /* Disable interrupts here since a descriptor interrupt for the isoc epid
10217 - will modify the sb list. This could possibly be done more granular, but
10218 - unlink_urb should not be used frequently anyway.
10221 - save_flags(flags);
10224 - if (!urb->dev || !urb->dev->bus) {
10225 - restore_flags(flags);
10228 - if (!urb->hcpriv) {
10229 - /* This happens if a device driver calls unlink on an urb that
10230 - was never submitted (lazy driver) or if the urb was completed
10231 - while unlink was being called. */
10232 - restore_flags(flags);
10235 - if (urb->transfer_flags & URB_ASYNC_UNLINK) {
10237 - /* If URB_ASYNC_UNLINK is set:
10239 - move to a separate urb list
10240 - call complete at next sof with ECONNRESET
10245 - call complete with ENOENT
10247 - warn("URB_ASYNC_UNLINK set, ignoring.");
10250 - /* One might think that urb->status = -EINPROGRESS would be a requirement for unlinking,
10251 - but that doesn't work for interrupt and isochronous traffic since they are completed
10252 - repeatedly, and urb->status is set then. That may in itself be a bug though. */
10254 - hc = urb->dev->bus->hcpriv;
10255 - urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10256 - epid = urb_priv->epid;
10258 - /* Set the urb status (synchronous unlink). */
10259 - urb->status = -ENOENT;
10260 - urb_priv->urb_state = UNLINK;
10262 - if (usb_pipedevice(urb->pipe) == hc->rh.devnum) {
10264 - ret = etrax_rh_unlink_urb(urb);
10266 - restore_flags(flags);
10269 - } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
10271 - dbg_bulk("Unlink of bulk urb (0x%lx)", (unsigned long)urb);
10273 - if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
10274 - /* The EP was enabled, disable it and wait. */
10275 - TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
10277 - /* Ah, the luxury of busy-wait. */
10278 - while (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[epid]));
10280 - /* Kicking dummy list out of the party. */
10281 - TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
10283 - } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10285 - dbg_ctrl("Unlink of ctrl urb (0x%lx)", (unsigned long)urb);
10287 - if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
10288 - /* The EP was enabled, disable it and wait. */
10289 - TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
10291 - /* Ah, the luxury of busy-wait. */
10292 - while (*R_DMA_CH8_SUB1_EP == virt_to_phys(&TxCtrlEPList[epid]));
10295 - } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
10297 - dbg_intr("Unlink of intr urb (0x%lx)", (unsigned long)urb);
10299 - /* Separate function because it's a tad more complicated. */
10300 - etrax_usb_unlink_intr_urb(urb);
10302 - } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10304 - dbg_isoc("Unlink of isoc urb (0x%lx)", (unsigned long)urb);
10306 - if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
10307 - /* The EP was enabled, disable it and wait. */
10308 - TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
10310 - /* Ah, the luxury of busy-wait. */
10311 - while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid]));
10315 - /* Note that we need to remove the urb from the urb list *before* removing its SB
10316 - descriptors. (This means that the isoc eof handler might get a null urb when we
10317 - are unlinking the last urb.) */
10319 - if (usb_pipetype(urb->pipe) == PIPE_BULK) {
10321 - urb_list_del(urb, epid);
10322 - TxBulkEPList[epid].sub = 0;
10323 - etrax_remove_from_sb_list(urb);
10325 - } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10327 - urb_list_del(urb, epid);
10328 - TxCtrlEPList[epid].sub = 0;
10329 - etrax_remove_from_sb_list(urb);
10331 - } else if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
10333 - urb_list_del(urb, epid);
10334 - /* Sanity check (should never happen). */
10335 - assert(urb_list_empty(epid));
10337 - /* Release allocated bandwidth. */
10338 - usb_release_bandwidth(urb->dev, urb, 0);
10340 - } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10342 - if (usb_pipeout(urb->pipe)) {
10344 - USB_SB_Desc_t *iter_sb, *prev_sb, *next_sb;
10346 - if (__urb_list_entry(urb, epid)) {
10348 - urb_list_del(urb, epid);
10349 - iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
10351 - while (iter_sb && (iter_sb != urb_priv->first_sb)) {
10352 - prev_sb = iter_sb;
10353 - iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
10356 - if (iter_sb == 0) {
10357 - /* Unlink of the URB currently being transmitted. */
10359 - iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
10362 - while (iter_sb && (iter_sb != urb_priv->last_sb)) {
10363 - iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
10366 - next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
10368 - /* This should only happen if the DMA has completed
10369 - processing the SB list for this EP while interrupts
10371 - dbg_isoc("Isoc urb not found, already sent?");
10375 - prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
10377 - TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
10380 - etrax_remove_from_sb_list(urb);
10381 - if (urb_list_empty(epid)) {
10382 - TxIsocEPList[epid].sub = 0;
10383 - dbg_isoc("Last isoc out urb epid %d", epid);
10384 - } else if (next_sb || prev_sb) {
10385 - dbg_isoc("Re-enable isoc out epid %d", epid);
10387 - TxIsocEPList[epid].hw_len = 0;
10388 - TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
10390 - TxIsocEPList[epid].sub = 0;
10391 - dbg_isoc("URB list non-empty and no SB list, EP disabled");
10394 - dbg_isoc("Urb 0x%p not found, completed already?", urb);
10398 - urb_list_del(urb, epid);
10400 - /* For in traffic there is only one SB descriptor for each EP even
10401 - though there may be several urbs (all urbs point at the same SB). */
10402 - if (urb_list_empty(epid)) {
10403 - /* No more urbs, remove the SB. */
10404 - TxIsocEPList[epid].sub = 0;
10405 - etrax_remove_from_sb_list(urb);
10407 - TxIsocEPList[epid].hw_len = 0;
10408 - TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
10411 - /* Release allocated bandwidth. */
10412 - usb_release_bandwidth(urb->dev, urb, 1);
10414 - /* Free the epid if urb list is empty. */
10415 - if (urb_list_empty(epid)) {
10416 - etrax_usb_free_epid(epid);
10418 - restore_flags(flags);
10420 - /* Must be done before calling completion handler. */
10424 - if (urb->complete) {
10425 - urb->complete(urb, NULL);
10432 -static int etrax_usb_get_frame_number(struct usb_device *usb_dev)
10436 - return (*R_USB_FM_NUMBER & 0x7ff);
10439 -static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc)
10443 - /* This interrupt handler could be used when unlinking EP descriptors. */
10445 - if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
10446 - USB_EP_Desc_t *ep;
10448 - //dbg_bulk("dma8_sub0_descr (BULK) intr.");
10450 - /* It should be safe clearing the interrupt here, since we don't expect to get a new
10451 - one until we restart the bulk channel. */
10452 - *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
10454 - /* Wait while the DMA is running (though we don't expect it to be). */
10455 - while (*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd));
10457 - /* Advance the DMA to the next EP descriptor. */
10458 - ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
10460 - //dbg_bulk("descr intr: DMA is at 0x%lx", (unsigned long)ep);
10462 - /* ep->next is already a physical address; no need for a virt_to_phys. */
10463 - *R_DMA_CH8_SUB0_EP = ep->next;
10465 - /* Start the DMA bulk channel again. */
10466 - *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
10468 - if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
10471 - etrax_urb_priv_t *urb_priv;
10472 - unsigned long int flags;
10474 - dbg_ctrl("dma8_sub1_descr (CTRL) intr.");
10475 - *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
10477 - /* The complete callback gets called so we cli. */
10478 - save_flags(flags);
10481 - for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
10482 - if ((TxCtrlEPList[epid].sub == 0) ||
10483 - (epid == DUMMY_EPID) ||
10484 - (epid == INVALID_EPID)) {
10485 - /* Nothing here to see. */
10489 - /* Get the first urb (if any). */
10490 - urb = urb_list_first(epid);
10494 - /* Sanity check. */
10495 - assert(usb_pipetype(urb->pipe) == PIPE_CONTROL);
10497 - urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10498 - assert(urb_priv);
10500 - if (urb_priv->urb_state == WAITING_FOR_DESCR_INTR) {
10501 - assert(!(TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
10503 - etrax_usb_complete_urb(urb, 0);
10507 - restore_flags(flags);
10509 - if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
10510 - dbg_intr("dma8_sub2_descr (INTR) intr.");
10511 - *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
10513 - if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
10517 - etrax_urb_priv_t *urb_priv;
10518 - USB_SB_Desc_t *sb_desc;
10520 - usb_isoc_complete_data_t *comp_data = NULL;
10522 - /* One or more isoc out transfers are done. */
10523 - dbg_isoc("dma8_sub3_descr (ISOC) intr.");
10525 - /* For each isoc out EP search for the first sb_desc with the intr flag
10526 - set. This descriptor must be the last packet from an URB. Then
10527 - traverse the URB list for the EP until the URB with urb_priv->last_sb
10528 - matching the intr-marked sb_desc is found. All URBs before this have
10532 - for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
10533 - /* Skip past epids with no SB lists, epids used for in traffic,
10534 - and special (dummy, invalid) epids. */
10535 - if ((TxIsocEPList[epid].sub == 0) ||
10536 - (test_bit(epid, (void *)&epid_out_traffic) == 0) ||
10537 - (epid == DUMMY_EPID) ||
10538 - (epid == INVALID_EPID)) {
10539 - /* Nothing here to see. */
10542 - sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
10544 - /* Find the last descriptor of the currently active URB for this ep.
10545 - This is the first descriptor in the sub list marked for a descriptor
10547 - while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
10548 - sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
10552 - dbg_isoc("Check epid %d, sub 0x%p, SB 0x%p",
10554 - phys_to_virt(TxIsocEPList[epid].sub),
10559 - /* Get the first urb (if any). */
10560 - urb = urb_list_first(epid);
10563 - while (urb && !epid_done) {
10565 - /* Sanity check. */
10566 - assert(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
10568 - if (!usb_pipeout(urb->pipe)) {
10569 - /* descr interrupts are generated only for out pipes. */
10574 - urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10575 - assert(urb_priv);
10577 - if (sb_desc != urb_priv->last_sb) {
10579 - /* This urb has been sent. */
10580 - dbg_isoc("out URB 0x%p sent", urb);
10582 - urb_priv->urb_state = TRANSFER_DONE;
10584 - } else if ((sb_desc == urb_priv->last_sb) &&
10585 - !(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
10587 - assert((sb_desc->command & IO_MASK(USB_SB_command, eol)) == IO_STATE(USB_SB_command, eol, yes));
10588 - assert(sb_desc->next == 0);
10590 - dbg_isoc("out URB 0x%p last in list, epid disabled", urb);
10591 - TxIsocEPList[epid].sub = 0;
10592 - TxIsocEPList[epid].hw_len = 0;
10593 - urb_priv->urb_state = TRANSFER_DONE;
10600 - if (!epid_done) {
10601 - urb = urb_list_next(urb, epid);
10607 - *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
10609 - comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
10610 - assert(comp_data != NULL);
10612 - INIT_WORK(&comp_data->usb_bh, etrax_usb_isoc_descr_interrupt_bottom_half, comp_data);
10613 - schedule_work(&comp_data->usb_bh);
10617 - return IRQ_HANDLED;
10620 -static void etrax_usb_isoc_descr_interrupt_bottom_half(void *data)
10622 - usb_isoc_complete_data_t *comp_data = (usb_isoc_complete_data_t*)data;
10627 - etrax_urb_priv_t *urb_priv;
10631 - dbg_isoc("dma8_sub3_descr (ISOC) bottom half.");
10633 - for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
10634 - unsigned long flags;
10636 - save_flags(flags);
10641 - /* The descriptor interrupt handler has marked all transmitted isoch. out
10642 - URBs with TRANSFER_DONE. Now we traverse all epids and for all that
10643 - have isoch. out traffic traverse its URB list and complete the
10647 - while (!epid_done) {
10649 - /* Get the first urb (if any). */
10650 - urb = urb_list_first(epid);
10656 - if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) {
10661 - if (!usb_pipeout(urb->pipe)) {
10662 - /* descr interrupts are generated only for out pipes. */
10667 - dbg_isoc("Check epid %d, SB 0x%p", epid, (char*)TxIsocEPList[epid].sub);
10669 - urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10670 - assert(urb_priv);
10672 - if (urb_priv->urb_state == TRANSFER_DONE) {
10674 - struct usb_iso_packet_descriptor *packet;
10676 - /* This urb has been sent. */
10677 - dbg_isoc("Completing isoc out URB 0x%p", urb);
10679 - for (i = 0; i < urb->number_of_packets; i++) {
10680 - packet = &urb->iso_frame_desc[i];
10681 - packet->status = 0;
10682 - packet->actual_length = packet->length;
10685 - etrax_usb_complete_isoc_urb(urb, 0);
10687 - if (urb_list_empty(epid)) {
10688 - etrax_usb_free_epid(epid);
10695 - restore_flags(flags);
10698 - kmem_cache_free(isoc_compl_cache, comp_data);
10705 -static irqreturn_t etrax_usb_rx_interrupt(int irq, void *vhc)
10708 - etrax_urb_priv_t *urb_priv;
10710 - unsigned long flags;
10712 - /* Isoc diagnostics. */
10713 - static int curr_fm = 0;
10714 - static int prev_fm = 0;
10718 - /* Clear this interrupt. */
10719 - *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
10721 - /* Note that this while loop assumes that all packets span only
10722 - one rx descriptor. */
10724 - /* The reason we cli here is that we call the driver's callback functions. */
10725 - save_flags(flags);
10728 - while (myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
10730 - epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
10731 - urb = urb_list_first(epid);
10733 - //printk("eop for epid %d, first urb 0x%lx\n", epid, (unsigned long)urb);
10736 - err("No urb for epid %d in rx interrupt", epid);
10737 - __dump_ept_data(epid);
10741 - /* Note that we cannot indescriminately assert(usb_pipein(urb->pipe)) since
10742 - ctrl pipes are not. */
10744 - if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
10745 - __u32 r_usb_ept_data;
10746 - int no_error = 0;
10748 - assert(test_bit(epid, (void *)&epid_usage_bitmask));
10750 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
10752 - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10753 - r_usb_ept_data = *R_USB_EPT_DATA_ISO;
10755 - if ((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
10756 - (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
10757 - (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
10758 - /* Not an error, just a failure to receive an expected iso
10759 - in packet in this frame. This is not documented
10760 - in the designers reference.
10764 - warn("R_USB_EPT_DATA_ISO for epid %d = 0x%x", epid, r_usb_ept_data);
10767 - r_usb_ept_data = *R_USB_EPT_DATA;
10768 - warn("R_USB_EPT_DATA for epid %d = 0x%x", epid, r_usb_ept_data);
10772 - warn("error in rx desc->status, epid %d, first urb = 0x%lx",
10773 - epid, (unsigned long)urb);
10774 - __dump_in_desc(myNextRxDesc);
10776 - warn("R_USB_STATUS = 0x%x", *R_USB_STATUS);
10778 - /* Check that ept was disabled when error occurred. */
10779 - switch (usb_pipetype(urb->pipe)) {
10781 - assert(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
10783 - case PIPE_CONTROL:
10784 - assert(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
10786 - case PIPE_INTERRUPT:
10787 - assert(!(TxIntrEPList[epid].command & IO_MASK(USB_EP_command, enable)));
10789 - case PIPE_ISOCHRONOUS:
10790 - assert(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)));
10793 - warn("etrax_usb_rx_interrupt: bad pipetype %d in urb 0x%p",
10794 - usb_pipetype(urb->pipe),
10797 - etrax_usb_complete_urb(urb, -EPROTO);
10802 - urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10803 - assert(urb_priv);
10805 - if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
10806 - (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
10807 - (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
10809 - if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
10810 - /* We get nodata for empty data transactions, and the rx descriptor's
10811 - hw_len field is not valid in that case. No data to copy in other
10814 - /* Make sure the data fits in the buffer. */
10815 - assert(urb_priv->rx_offset + myNextRxDesc->hw_len
10816 - <= urb->transfer_buffer_length);
10818 - memcpy(urb->transfer_buffer + urb_priv->rx_offset,
10819 - phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
10820 - urb_priv->rx_offset += myNextRxDesc->hw_len;
10823 - if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
10824 - if ((usb_pipetype(urb->pipe) == PIPE_CONTROL) &&
10825 - ((TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)) ==
10826 - IO_STATE(USB_EP_command, enable, yes))) {
10827 - /* The EP is still enabled, so the OUT packet used to ack
10828 - the in data is probably not processed yet. If the EP
10829 - sub pointer has not moved beyond urb_priv->last_sb mark
10830 - it for a descriptor interrupt and complete the urb in
10831 - the descriptor interrupt handler.
10833 - USB_SB_Desc_t *sub = TxCtrlEPList[urb_priv->epid].sub ? phys_to_virt(TxCtrlEPList[urb_priv->epid].sub) : 0;
10835 - while ((sub != NULL) && (sub != urb_priv->last_sb)) {
10836 - sub = sub->next ? phys_to_virt(sub->next) : 0;
10838 - if (sub != NULL) {
10839 - /* The urb has not been fully processed. */
10840 - urb_priv->urb_state = WAITING_FOR_DESCR_INTR;
10842 - warn("(CTRL) epid enabled and urb (0x%p) processed, ep->sub=0x%p", urb, (char*)TxCtrlEPList[urb_priv->epid].sub);
10843 - etrax_usb_complete_urb(urb, 0);
10846 - etrax_usb_complete_urb(urb, 0);
10850 - } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
10852 - struct usb_iso_packet_descriptor *packet;
10854 - if (urb_priv->urb_state == UNLINK) {
10855 - info("Ignoring rx data for urb being unlinked.");
10857 - } else if (urb_priv->urb_state == NOT_STARTED) {
10858 - info("What? Got rx data for urb that isn't started?");
10862 - packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
10863 - packet->status = 0;
10865 - if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
10866 - /* We get nodata for empty data transactions, and the rx descriptor's
10867 - hw_len field is not valid in that case. We copy 0 bytes however to
10868 - stay in synch. */
10869 - packet->actual_length = 0;
10871 - packet->actual_length = myNextRxDesc->hw_len;
10872 - /* Make sure the data fits in the buffer. */
10873 - assert(packet->actual_length <= packet->length);
10874 - memcpy(urb->transfer_buffer + packet->offset,
10875 - phys_to_virt(myNextRxDesc->buf), packet->actual_length);
10878 - /* Increment the packet counter. */
10879 - urb_priv->isoc_packet_counter++;
10881 - /* Note that we don't care about the eot field in the rx descriptor's status.
10882 - It will always be set for isoc traffic. */
10883 - if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
10885 - /* Out-of-synch diagnostics. */
10886 - curr_fm = (*R_USB_FM_NUMBER & 0x7ff);
10887 - if (((prev_fm + urb_priv->isoc_packet_counter) % (0x7ff + 1)) != curr_fm) {
10888 - /* This test is wrong, if there is more than one isoc
10889 - in endpoint active it will always calculate wrong
10890 - since prev_fm is shared by all endpoints.
10892 - FIXME Make this check per URB using urb->start_frame.
10894 - dbg_isoc("Out of synch? Previous frame = %d, current frame = %d",
10895 - prev_fm, curr_fm);
10898 - prev_fm = curr_fm;
10900 - /* Complete the urb with status OK. */
10901 - etrax_usb_complete_isoc_urb(urb, 0);
10907 - /* DMA IN cache bug. Flush the DMA IN buffer from the cache. (struct etrax_dma_descr
10908 - has the same layout as USB_IN_Desc for the relevant fields.) */
10909 - prepare_rx_descriptor((struct etrax_dma_descr*)myNextRxDesc);
10911 - myPrevRxDesc = myNextRxDesc;
10912 - myPrevRxDesc->command |= IO_MASK(USB_IN_command, eol);
10913 - myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
10914 - myLastRxDesc = myPrevRxDesc;
10916 - myNextRxDesc->status = 0;
10917 - myNextRxDesc = phys_to_virt(myNextRxDesc->next);
10920 - restore_flags(flags);
10924 - return IRQ_HANDLED;
10928 -/* This function will unlink the SB descriptors associated with this urb. */
10929 -static int etrax_remove_from_sb_list(struct urb *urb)
10931 - USB_SB_Desc_t *next_sb, *first_sb, *last_sb;
10932 - etrax_urb_priv_t *urb_priv;
10937 - urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
10938 - assert(urb_priv);
10940 - /* Just a sanity check. Since we don't fiddle with the DMA list the EP descriptor
10941 - doesn't really need to be disabled, it's just that we expect it to be. */
10942 - if (usb_pipetype(urb->pipe) == PIPE_BULK) {
10943 - assert(!(TxBulkEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
10944 - } else if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
10945 - assert(!(TxCtrlEPList[urb_priv->epid].command & IO_MASK(USB_EP_command, enable)));
10948 - first_sb = urb_priv->first_sb;
10949 - last_sb = urb_priv->last_sb;
10951 - assert(first_sb);
10954 - while (first_sb != last_sb) {
10955 - next_sb = (USB_SB_Desc_t *)phys_to_virt(first_sb->next);
10956 - kmem_cache_free(usb_desc_cache, first_sb);
10957 - first_sb = next_sb;
10960 - kmem_cache_free(usb_desc_cache, last_sb);
10962 - dbg_sb("%d SB descriptors freed", i);
10963 - /* Compare i with urb->number_of_packets for Isoc traffic.
10964 - Should be same when calling unlink_urb */
10971 -static int etrax_usb_submit_bulk_urb(struct urb *urb)
10975 - unsigned long flags;
10976 - etrax_urb_priv_t *urb_priv;
10980 - /* Epid allocation, empty check and list add must be protected.
10981 - Read about this in etrax_usb_submit_ctrl_urb. */
10983 - spin_lock_irqsave(&urb_list_lock, flags);
10984 - epid = etrax_usb_setup_epid(urb);
10985 - if (epid == -1) {
10987 - spin_unlock_irqrestore(&urb_list_lock, flags);
10990 - empty = urb_list_empty(epid);
10991 - urb_list_add(urb, epid);
10992 - spin_unlock_irqrestore(&urb_list_lock, flags);
10994 - dbg_bulk("Adding bulk %s urb 0x%lx to %s list, epid %d",
10995 - usb_pipein(urb->pipe) ? "IN" : "OUT", (unsigned long)urb, empty ? "empty" : "", epid);
10997 - /* Mark the urb as being in progress. */
10998 - urb->status = -EINPROGRESS;
11000 - /* Setup the hcpriv data. */
11001 - urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
11002 - assert(urb_priv != NULL);
11003 - /* This sets rx_offset to 0. */
11004 - urb_priv->urb_state = NOT_STARTED;
11005 - urb->hcpriv = urb_priv;
11008 - etrax_usb_add_to_bulk_sb_list(urb, epid);
11016 -static void etrax_usb_add_to_bulk_sb_list(struct urb *urb, int epid)
11018 - USB_SB_Desc_t *sb_desc;
11019 - etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11020 - unsigned long flags;
11025 - dbg_bulk("etrax_usb_add_to_bulk_sb_list, urb 0x%lx", (unsigned long)urb);
11027 - maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
11029 - sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11030 - assert(sb_desc != NULL);
11031 - memset(sb_desc, 0, sizeof(USB_SB_Desc_t));
11034 - if (usb_pipeout(urb->pipe)) {
11036 - dbg_bulk("Grabbing bulk OUT, urb 0x%lx, epid %d", (unsigned long)urb, epid);
11038 - /* This is probably a sanity check of the bulk transaction length
11039 - not being larger than 64 kB. */
11040 - if (urb->transfer_buffer_length > 0xffff) {
11041 - panic("urb->transfer_buffer_length > 0xffff");
11044 - sb_desc->sw_len = urb->transfer_buffer_length;
11046 - /* The rem field is don't care if it's not a full-length transfer, so setting
11047 - it shouldn't hurt. Also, rem isn't used for OUT traffic. */
11048 - sb_desc->command = (IO_FIELD(USB_SB_command, rem, 0) |
11049 - IO_STATE(USB_SB_command, tt, out) |
11050 - IO_STATE(USB_SB_command, eot, yes) |
11051 - IO_STATE(USB_SB_command, eol, yes));
11053 - /* The full field is set to yes, even if we don't actually check that this is
11054 - a full-length transfer (i.e., that transfer_buffer_length % maxlen = 0).
11055 - Setting full prevents the USB controller from sending an empty packet in
11056 - that case. However, if URB_ZERO_PACKET was set we want that. */
11057 - if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
11058 - sb_desc->command |= IO_STATE(USB_SB_command, full, yes);
11061 - sb_desc->buf = virt_to_phys(urb->transfer_buffer);
11062 - sb_desc->next = 0;
11064 - } else if (usb_pipein(urb->pipe)) {
11066 - dbg_bulk("Grabbing bulk IN, urb 0x%lx, epid %d", (unsigned long)urb, epid);
11068 - sb_desc->sw_len = urb->transfer_buffer_length ?
11069 - (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
11071 - /* The rem field is don't care if it's not a full-length transfer, so setting
11072 - it shouldn't hurt. */
11073 - sb_desc->command =
11074 - (IO_FIELD(USB_SB_command, rem,
11075 - urb->transfer_buffer_length % maxlen) |
11076 - IO_STATE(USB_SB_command, tt, in) |
11077 - IO_STATE(USB_SB_command, eot, yes) |
11078 - IO_STATE(USB_SB_command, eol, yes));
11080 - sb_desc->buf = 0;
11081 - sb_desc->next = 0;
11084 - urb_priv->first_sb = sb_desc;
11085 - urb_priv->last_sb = sb_desc;
11086 - urb_priv->epid = epid;
11088 - urb->hcpriv = urb_priv;
11090 - /* Reset toggle bits and reset error count. */
11091 - save_flags(flags);
11094 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
11097 - /* FIXME: Is this a special case since the hold field is checked,
11098 - or should we check hold in a lot of other cases as well? */
11099 - if (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) {
11100 - panic("Hold was set in %s", __FUNCTION__);
11103 - /* Reset error counters (regardless of which direction this traffic is). */
11104 - *R_USB_EPT_DATA &=
11105 - ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
11106 - IO_MASK(R_USB_EPT_DATA, error_count_out));
11108 - /* Software must preset the toggle bits. */
11109 - if (usb_pipeout(urb->pipe)) {
11111 - usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
11112 - *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
11113 - *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
11116 - usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
11117 - *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
11118 - *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
11121 - /* Assert that the EP descriptor is disabled. */
11122 - assert(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
11124 - /* The reason we set the EP's sub pointer directly instead of
11125 - walking the SB list and linking it last in the list is that we only
11126 - have one active urb at a time (the rest are queued). */
11128 - /* Note that we cannot have interrupts running when we have set the SB descriptor
11129 - but the EP is not yet enabled. If a bulk eot happens for another EP, we will
11130 - find this EP disabled and with a SB != 0, which will make us think that it's done. */
11131 - TxBulkEPList[epid].sub = virt_to_phys(sb_desc);
11132 - TxBulkEPList[epid].hw_len = 0;
11133 - /* Note that we don't have to fill in the ep_id field since this
11134 - was done when we allocated the EP descriptors in init_tx_bulk_ep. */
11136 - /* Check if the dummy list is already with us (if several urbs were queued). */
11137 - if (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0])) {
11139 - dbg_bulk("Inviting dummy list to the party for urb 0x%lx, epid %d",
11140 - (unsigned long)urb, epid);
11142 - /* The last EP in the dummy list already has its next pointer set to
11143 - TxBulkEPList[epid].next. */
11145 - /* We don't need to check if the DMA is at this EP or not before changing the
11146 - next pointer, since we will do it in one 32-bit write (EP descriptors are
11147 - 32-bit aligned). */
11148 - TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
11150 - /* Enable the EP descr. */
11151 - dbg_bulk("Enabling bulk EP for urb 0x%lx, epid %d", (unsigned long)urb, epid);
11152 - TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
11154 - /* Everything is set up, safe to enable interrupts again. */
11155 - restore_flags(flags);
11157 - /* If the DMA bulk channel isn't running, we need to restart it if it
11158 - has stopped at the last EP descriptor (DMA stopped because there was
11159 - no more traffic) or if it has stopped at a dummy EP with the intr flag
11160 - set (DMA stopped because we were too slow in inserting new traffic). */
11161 - if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
11163 - USB_EP_Desc_t *ep;
11164 - ep = (USB_EP_Desc_t *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
11165 - dbg_bulk("DMA channel not running in add");
11166 - dbg_bulk("DMA is at 0x%lx", (unsigned long)ep);
11168 - if (*R_DMA_CH8_SUB0_EP == virt_to_phys(&TxBulkEPList[NBR_OF_EPIDS - 1]) ||
11169 - (ep->command & 0x8) >> 3) {
11170 - *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
11171 - /* Update/restart the bulk start timer since we just started the channel. */
11172 - mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
11173 - /* Update/restart the bulk eot timer since we just inserted traffic. */
11174 - mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
11181 -static void etrax_usb_complete_bulk_urb(struct urb *urb, int status)
11183 - etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11184 - int epid = urb_priv->epid;
11185 - unsigned long flags;
11190 - warn("Completing bulk urb with status %d.", status);
11192 - dbg_bulk("Completing bulk urb 0x%lx for epid %d", (unsigned long)urb, epid);
11194 - /* Update the urb list. */
11195 - urb_list_del(urb, epid);
11197 - /* For an IN pipe, we always set the actual length, regardless of whether there was
11198 - an error or not (which means the device driver can use the data if it wants to). */
11199 - if (usb_pipein(urb->pipe)) {
11200 - urb->actual_length = urb_priv->rx_offset;
11202 - /* Set actual_length for OUT urbs also; the USB mass storage driver seems
11203 - to want that. We wouldn't know of any partial writes if there was an error. */
11204 - if (status == 0) {
11205 - urb->actual_length = urb->transfer_buffer_length;
11207 - urb->actual_length = 0;
11211 - /* FIXME: Is there something of the things below we shouldn't do if there was an error?
11212 - Like, maybe we shouldn't toggle the toggle bits, or maybe we shouldn't insert more traffic. */
11214 - save_flags(flags);
11217 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
11220 - /* We need to fiddle with the toggle bits because the hardware doesn't do it for us. */
11221 - if (usb_pipeout(urb->pipe)) {
11223 - IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
11224 - usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
11225 - usb_pipeout(urb->pipe), toggle);
11228 - IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
11229 - usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
11230 - usb_pipeout(urb->pipe), toggle);
11232 - restore_flags(flags);
11234 - /* Remember to free the SBs. */
11235 - etrax_remove_from_sb_list(urb);
11239 - /* If there are any more urb's in the list we'd better start sending */
11240 - if (!urb_list_empty(epid)) {
11242 - struct urb *new_urb;
11244 - /* Get the first urb. */
11245 - new_urb = urb_list_first(epid);
11248 - dbg_bulk("More bulk for epid %d", epid);
11250 - etrax_usb_add_to_bulk_sb_list(new_urb, epid);
11253 - urb->status = status;
11255 - /* We let any non-zero status from the layer above have precedence. */
11256 - if (status == 0) {
11257 - /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
11258 - is to be treated as an error. */
11259 - if (urb->transfer_flags & URB_SHORT_NOT_OK) {
11260 - if (usb_pipein(urb->pipe) &&
11261 - (urb->actual_length !=
11262 - usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))) {
11263 - urb->status = -EREMOTEIO;
11268 - if (urb->complete) {
11269 - urb->complete(urb, NULL);
11272 - if (urb_list_empty(epid)) {
11273 - /* This means that this EP is now free, deconfigure it. */
11274 - etrax_usb_free_epid(epid);
11276 - /* No more traffic; time to clean up.
11277 - Must set sub pointer to 0, since we look at the sub pointer when handling
11278 - the bulk eot interrupt. */
11280 - dbg_bulk("No bulk for epid %d", epid);
11282 - TxBulkEPList[epid].sub = 0;
11284 - /* Unlink the dummy list. */
11286 - dbg_bulk("Kicking dummy list out of party for urb 0x%lx, epid %d",
11287 - (unsigned long)urb, epid);
11289 - /* No need to wait for the DMA before changing the next pointer.
11290 - The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
11291 - the last one (INVALID_EPID) for actual traffic. */
11292 - TxBulkEPList[epid].next =
11293 - virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
11299 -static int etrax_usb_submit_ctrl_urb(struct urb *urb)
11303 - unsigned long flags;
11304 - etrax_urb_priv_t *urb_priv;
11308 - /* FIXME: Return -ENXIO if there is already a queued urb for this endpoint? */
11310 - /* Epid allocation, empty check and list add must be protected.
11312 - Epid allocation because if we find an existing epid for this endpoint an urb might be
11313 - completed (emptying the list) before we add the new urb to the list, causing the epid
11314 - to be de-allocated. We would then start the transfer with an invalid epid -> epid attn.
11316 - Empty check and add because otherwise we might conclude that the list is not empty,
11317 - after which it becomes empty before we add the new urb to the list, causing us not to
11318 - insert the new traffic into the SB list. */
11320 - spin_lock_irqsave(&urb_list_lock, flags);
11321 - epid = etrax_usb_setup_epid(urb);
11322 - if (epid == -1) {
11323 - spin_unlock_irqrestore(&urb_list_lock, flags);
11327 - empty = urb_list_empty(epid);
11328 - urb_list_add(urb, epid);
11329 - spin_unlock_irqrestore(&urb_list_lock, flags);
11331 - dbg_ctrl("Adding ctrl urb 0x%lx to %s list, epid %d",
11332 - (unsigned long)urb, empty ? "empty" : "", epid);
11334 - /* Mark the urb as being in progress. */
11335 - urb->status = -EINPROGRESS;
11337 - /* Setup the hcpriv data. */
11338 - urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
11339 - assert(urb_priv != NULL);
11340 - /* This sets rx_offset to 0. */
11341 - urb_priv->urb_state = NOT_STARTED;
11342 - urb->hcpriv = urb_priv;
11345 - etrax_usb_add_to_ctrl_sb_list(urb, epid);
11353 -static void etrax_usb_add_to_ctrl_sb_list(struct urb *urb, int epid)
11355 - USB_SB_Desc_t *sb_desc_setup;
11356 - USB_SB_Desc_t *sb_desc_data;
11357 - USB_SB_Desc_t *sb_desc_status;
11359 - etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11361 - unsigned long flags;
11366 - maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
11368 - sb_desc_setup = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11369 - assert(sb_desc_setup != NULL);
11370 - sb_desc_status = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11371 - assert(sb_desc_status != NULL);
11373 - /* Initialize the mandatory setup SB descriptor (used only in control transfers) */
11374 - sb_desc_setup->sw_len = 8;
11375 - sb_desc_setup->command = (IO_FIELD(USB_SB_command, rem, 0) |
11376 - IO_STATE(USB_SB_command, tt, setup) |
11377 - IO_STATE(USB_SB_command, full, yes) |
11378 - IO_STATE(USB_SB_command, eot, yes));
11380 - sb_desc_setup->buf = virt_to_phys(urb->setup_packet);
11382 - if (usb_pipeout(urb->pipe)) {
11383 - dbg_ctrl("Transfer for epid %d is OUT", epid);
11385 - /* If this Control OUT transfer has an optional data stage we add an OUT token
11386 - before the mandatory IN (status) token, hence the reordered SB list */
11388 - sb_desc_setup->next = virt_to_phys(sb_desc_status);
11389 - if (urb->transfer_buffer) {
11391 - dbg_ctrl("This OUT transfer has an extra data stage");
11393 - sb_desc_data = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11394 - assert(sb_desc_data != NULL);
11396 - sb_desc_setup->next = virt_to_phys(sb_desc_data);
11398 - sb_desc_data->sw_len = urb->transfer_buffer_length;
11399 - sb_desc_data->command = (IO_STATE(USB_SB_command, tt, out) |
11400 - IO_STATE(USB_SB_command, full, yes) |
11401 - IO_STATE(USB_SB_command, eot, yes));
11402 - sb_desc_data->buf = virt_to_phys(urb->transfer_buffer);
11403 - sb_desc_data->next = virt_to_phys(sb_desc_status);
11406 - sb_desc_status->sw_len = 1;
11407 - sb_desc_status->command = (IO_FIELD(USB_SB_command, rem, 0) |
11408 - IO_STATE(USB_SB_command, tt, in) |
11409 - IO_STATE(USB_SB_command, eot, yes) |
11410 - IO_STATE(USB_SB_command, intr, yes) |
11411 - IO_STATE(USB_SB_command, eol, yes));
11413 - sb_desc_status->buf = 0;
11414 - sb_desc_status->next = 0;
11416 - } else if (usb_pipein(urb->pipe)) {
11418 - dbg_ctrl("Transfer for epid %d is IN", epid);
11419 - dbg_ctrl("transfer_buffer_length = %d", urb->transfer_buffer_length);
11420 - dbg_ctrl("rem is calculated to %d", urb->transfer_buffer_length % maxlen);
11422 - sb_desc_data = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11423 - assert(sb_desc_data != NULL);
11425 - sb_desc_setup->next = virt_to_phys(sb_desc_data);
11427 - sb_desc_data->sw_len = urb->transfer_buffer_length ?
11428 - (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
11429 - dbg_ctrl("sw_len got %d", sb_desc_data->sw_len);
11431 - sb_desc_data->command =
11432 - (IO_FIELD(USB_SB_command, rem,
11433 - urb->transfer_buffer_length % maxlen) |
11434 - IO_STATE(USB_SB_command, tt, in) |
11435 - IO_STATE(USB_SB_command, eot, yes));
11437 - sb_desc_data->buf = 0;
11438 - sb_desc_data->next = virt_to_phys(sb_desc_status);
11440 - /* Read comment at zout_buffer declaration for an explanation to this. */
11441 - sb_desc_status->sw_len = 1;
11442 - sb_desc_status->command = (IO_FIELD(USB_SB_command, rem, 0) |
11443 - IO_STATE(USB_SB_command, tt, zout) |
11444 - IO_STATE(USB_SB_command, full, yes) |
11445 - IO_STATE(USB_SB_command, eot, yes) |
11446 - IO_STATE(USB_SB_command, intr, yes) |
11447 - IO_STATE(USB_SB_command, eol, yes));
11449 - sb_desc_status->buf = virt_to_phys(&zout_buffer[0]);
11450 - sb_desc_status->next = 0;
11453 - urb_priv->first_sb = sb_desc_setup;
11454 - urb_priv->last_sb = sb_desc_status;
11455 - urb_priv->epid = epid;
11457 - urb_priv->urb_state = STARTED;
11459 - /* Reset toggle bits and reset error count, remember to di and ei */
11460 - /* Warning: it is possible that this locking doesn't work with bottom-halves */
11462 - save_flags(flags);
11465 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
11467 - if (*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) {
11468 - panic("Hold was set in %s", __FUNCTION__);
11472 - /* FIXME: Compare with etrax_usb_add_to_bulk_sb_list where the toggle bits
11473 - are set to a specific value. Why the difference? Read "Transfer and Toggle Bits
11474 - in Designer's Reference, p. 8 - 11. */
11475 - *R_USB_EPT_DATA &=
11476 - ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
11477 - IO_MASK(R_USB_EPT_DATA, error_count_out) |
11478 - IO_MASK(R_USB_EPT_DATA, t_in) |
11479 - IO_MASK(R_USB_EPT_DATA, t_out));
11481 - /* Since we use the rx interrupt to complete ctrl urbs, we can enable interrupts now
11482 - (i.e. we don't check the sub pointer on an eot interrupt like we do for bulk traffic). */
11483 - restore_flags(flags);
11485 - /* Assert that the EP descriptor is disabled. */
11486 - assert(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
11488 - /* Set up and enable the EP descriptor. */
11489 - TxCtrlEPList[epid].sub = virt_to_phys(sb_desc_setup);
11490 - TxCtrlEPList[epid].hw_len = 0;
11491 - TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
11493 - /* We start the DMA sub channel without checking if it's running or not, because:
11494 - 1) If it's already running, issuing the start command is a nop.
11495 - 2) We avoid a test-and-set race condition. */
11496 - *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
11501 -static void etrax_usb_complete_ctrl_urb(struct urb *urb, int status)
11503 - etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11504 - int epid = urb_priv->epid;
11509 - warn("Completing ctrl urb with status %d.", status);
11511 - dbg_ctrl("Completing ctrl epid %d, urb 0x%lx", epid, (unsigned long)urb);
11513 - /* Remove this urb from the list. */
11514 - urb_list_del(urb, epid);
11516 - /* For an IN pipe, we always set the actual length, regardless of whether there was
11517 - an error or not (which means the device driver can use the data if it wants to). */
11518 - if (usb_pipein(urb->pipe)) {
11519 - urb->actual_length = urb_priv->rx_offset;
11522 - /* FIXME: Is there something of the things below we shouldn't do if there was an error?
11523 - Like, maybe we shouldn't insert more traffic. */
11525 - /* Remember to free the SBs. */
11526 - etrax_remove_from_sb_list(urb);
11530 - /* If there are any more urbs in the list we'd better start sending. */
11531 - if (!urb_list_empty(epid)) {
11532 - struct urb *new_urb;
11534 - /* Get the first urb. */
11535 - new_urb = urb_list_first(epid);
11538 - dbg_ctrl("More ctrl for epid %d, first urb = 0x%lx", epid, (unsigned long)new_urb);
11540 - etrax_usb_add_to_ctrl_sb_list(new_urb, epid);
11543 - urb->status = status;
11545 - /* We let any non-zero status from the layer above have precedence. */
11546 - if (status == 0) {
11547 - /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
11548 - is to be treated as an error. */
11549 - if (urb->transfer_flags & URB_SHORT_NOT_OK) {
11550 - if (usb_pipein(urb->pipe) &&
11551 - (urb->actual_length !=
11552 - usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))) {
11553 - urb->status = -EREMOTEIO;
11558 - if (urb->complete) {
11559 - urb->complete(urb, NULL);
11562 - if (urb_list_empty(epid)) {
11563 - /* No more traffic. Time to clean up. */
11564 - etrax_usb_free_epid(epid);
11565 - /* Must set sub pointer to 0. */
11566 - dbg_ctrl("No ctrl for epid %d", epid);
11567 - TxCtrlEPList[epid].sub = 0;
11573 -static int etrax_usb_submit_intr_urb(struct urb *urb)
11580 - if (usb_pipeout(urb->pipe)) {
11581 - /* Unsupported transfer type.
11582 - We don't support interrupt out traffic. (If we do, we can't support
11583 - intervals for neither in or out traffic, but are forced to schedule all
11584 - interrupt traffic in one frame.) */
11588 - epid = etrax_usb_setup_epid(urb);
11589 - if (epid == -1) {
11594 - if (!urb_list_empty(epid)) {
11595 - /* There is already a queued urb for this endpoint. */
11596 - etrax_usb_free_epid(epid);
11600 - urb->status = -EINPROGRESS;
11602 - dbg_intr("Add intr urb 0x%lx, to list, epid %d", (unsigned long)urb, epid);
11604 - urb_list_add(urb, epid);
11605 - etrax_usb_add_to_intr_sb_list(urb, epid);
11612 -static void etrax_usb_add_to_intr_sb_list(struct urb *urb, int epid)
11615 - volatile USB_EP_Desc_t *tmp_ep;
11616 - volatile USB_EP_Desc_t *first_ep;
11622 - etrax_urb_priv_t *urb_priv;
11626 - maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
11627 - interval = urb->interval;
11629 - urb_priv = kzalloc(sizeof(etrax_urb_priv_t), KMALLOC_FLAG);
11630 - assert(urb_priv != NULL);
11631 - urb->hcpriv = urb_priv;
11633 - first_ep = &TxIntrEPList[0];
11635 - /* Round of the interval to 2^n, it is obvious that this code favours
11636 - smaller numbers, but that is actually a good thing */
11637 - /* FIXME: The "rounding error" for larger intervals will be quite
11638 - large. For in traffic this shouldn't be a problem since it will only
11639 - mean that we "poll" more often. */
11640 - for (i = 0; interval; i++) {
11641 - interval = interval >> 1;
11643 - interval = 1 << (i - 1);
11645 - dbg_intr("Interval rounded to %d", interval);
11647 - tmp_ep = first_ep;
11650 - if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
11651 - if ((i % interval) == 0) {
11652 - /* Insert the traffic ep after tmp_ep */
11653 - USB_EP_Desc_t *ep_desc;
11654 - USB_SB_Desc_t *sb_desc;
11656 - dbg_intr("Inserting EP for epid %d", epid);
11658 - ep_desc = (USB_EP_Desc_t *)
11659 - kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11660 - sb_desc = (USB_SB_Desc_t *)
11661 - kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
11662 - assert(ep_desc != NULL);
11663 - CHECK_ALIGN(ep_desc);
11664 - assert(sb_desc != NULL);
11666 - ep_desc->sub = virt_to_phys(sb_desc);
11667 - ep_desc->hw_len = 0;
11668 - ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
11669 - IO_STATE(USB_EP_command, enable, yes));
11672 - /* Round upwards the number of packets of size maxlen
11673 - that this SB descriptor should receive. */
11674 - sb_desc->sw_len = urb->transfer_buffer_length ?
11675 - (urb->transfer_buffer_length - 1) / maxlen + 1 : 0;
11676 - sb_desc->next = 0;
11677 - sb_desc->buf = 0;
11678 - sb_desc->command =
11679 - (IO_FIELD(USB_SB_command, rem, urb->transfer_buffer_length % maxlen) |
11680 - IO_STATE(USB_SB_command, tt, in) |
11681 - IO_STATE(USB_SB_command, eot, yes) |
11682 - IO_STATE(USB_SB_command, eol, yes));
11684 - ep_desc->next = tmp_ep->next;
11685 - tmp_ep->next = virt_to_phys(ep_desc);
11689 - tmp_ep = (USB_EP_Desc_t *)phys_to_virt(tmp_ep->next);
11690 - } while (tmp_ep != first_ep);
11693 - /* Note that first_sb/last_sb doesn't apply to interrupt traffic. */
11694 - urb_priv->epid = epid;
11696 - /* We start the DMA sub channel without checking if it's running or not, because:
11697 - 1) If it's already running, issuing the start command is a nop.
11698 - 2) We avoid a test-and-set race condition. */
11699 - *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
11706 -static void etrax_usb_complete_intr_urb(struct urb *urb, int status)
11708 - etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
11709 - int epid = urb_priv->epid;
11714 - warn("Completing intr urb with status %d.", status);
11716 - dbg_intr("Completing intr epid %d, urb 0x%lx", epid, (unsigned long)urb);
11718 - urb->status = status;
11719 - urb->actual_length = urb_priv->rx_offset;
11721 - dbg_intr("interrupt urb->actual_length = %d", urb->actual_length);
11723 - /* We let any non-zero status from the layer above have precedence. */
11724 - if (status == 0) {
11725 - /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's max length)
11726 - is to be treated as an error. */
11727 - if (urb->transfer_flags & URB_SHORT_NOT_OK) {
11728 - if (urb->actual_length !=
11729 - usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
11730 - urb->status = -EREMOTEIO;
11735 - /* The driver will resubmit the URB so we need to remove it first */
11736 - etrax_usb_unlink_urb(urb, 0);
11737 - if (urb->complete) {
11738 - urb->complete(urb, NULL);
11745 -static int etrax_usb_submit_isoc_urb(struct urb *urb)
11748 - unsigned long flags;
11752 - dbg_isoc("Submitting isoc urb = 0x%lx", (unsigned long)urb);
11754 - /* Epid allocation, empty check and list add must be protected.
11755 - Read about this in etrax_usb_submit_ctrl_urb. */
11757 - spin_lock_irqsave(&urb_list_lock, flags);
11758 - /* Is there an active epid for this urb ? */
11759 - epid = etrax_usb_setup_epid(urb);
11760 - if (epid == -1) {
11762 - spin_unlock_irqrestore(&urb_list_lock, flags);
11766 - /* Ok, now we got valid endpoint, lets insert some traffic */
11768 - urb->status = -EINPROGRESS;
11770 - /* Find the last urb in the URB_List and add this urb after that one.
11771 - Also add the traffic, that is do an etrax_usb_add_to_isoc_sb_list. This
11772 - is important to make this in "real time" since isochronous traffic is
11773 - time sensitive. */
11775 - dbg_isoc("Adding isoc urb to (possibly empty) list");
11776 - urb_list_add(urb, epid);
11777 - etrax_usb_add_to_isoc_sb_list(urb, epid);
11778 - spin_unlock_irqrestore(&urb_list_lock, flags);
11785 -static void etrax_usb_check_error_isoc_ep(const int epid)
11787 - unsigned long int flags;
11789 - __u32 r_usb_ept_data;
11791 - /* We can't read R_USB_EPID_ATTN here since it would clear the iso_eof,
11792 - bulk_eot and epid_attn interrupts. So we just check the status of
11793 - the epid without testing if for it in R_USB_EPID_ATTN. */
11796 - save_flags(flags);
11798 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
11800 - /* Note that although there are separate R_USB_EPT_DATA and R_USB_EPT_DATA_ISO
11801 - registers, they are located at the same address and are of the same size.
11802 - In other words, this read should be ok for isoc also. */
11803 - r_usb_ept_data = *R_USB_EPT_DATA;
11804 - restore_flags(flags);
11806 - error_code = IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data);
11808 - if (r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, hold)) {
11809 - warn("Hold was set for epid %d.", epid);
11813 - if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, no_error)) {
11815 - /* This indicates that the SB list of the ept was completed before
11816 - new data was appended to it. This is not an error, but indicates
11817 - large system or USB load and could possibly cause trouble for
11818 - very timing sensitive USB device drivers so we log it.
11820 - info("Isoc. epid %d disabled with no error", epid);
11823 - } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, stall)) {
11824 - /* Not really a protocol error, just says that the endpoint gave
11825 - a stall response. Note that error_code cannot be stall for isoc. */
11826 - panic("Isoc traffic cannot stall");
11828 - } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA_ISO, error_code, bus_error)) {
11829 - /* Two devices responded to a transaction request. Must be resolved
11830 - by software. FIXME: Reset ports? */
11831 - panic("Bus error for epid %d."
11832 - " Two devices responded to transaction request",
11835 - } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, buffer_error)) {
11836 - /* DMA overrun or underrun. */
11837 - warn("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
11839 - /* It seems that error_code = buffer_error in
11840 - R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
11841 - are the same error. */
11846 -static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid)
11851 - etrax_urb_priv_t *urb_priv;
11852 - USB_SB_Desc_t *prev_sb_desc, *next_sb_desc, *temp_sb_desc;
11856 - prev_sb_desc = next_sb_desc = temp_sb_desc = NULL;
11858 - urb_priv = kzalloc(sizeof(etrax_urb_priv_t), GFP_ATOMIC);
11859 - assert(urb_priv != NULL);
11861 - urb->hcpriv = urb_priv;
11862 - urb_priv->epid = epid;
11864 - if (usb_pipeout(urb->pipe)) {
11866 - if (urb->number_of_packets == 0) panic("etrax_usb_add_to_isoc_sb_list 0 packets\n");
11868 - dbg_isoc("Transfer for epid %d is OUT", epid);
11869 - dbg_isoc("%d packets in URB", urb->number_of_packets);
11871 - /* Create one SB descriptor for each packet and link them together. */
11872 - for (i = 0; i < urb->number_of_packets; i++) {
11873 - if (!urb->iso_frame_desc[i].length)
11876 - next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
11877 - assert(next_sb_desc != NULL);
11879 - if (urb->iso_frame_desc[i].length > 0) {
11881 - next_sb_desc->command = (IO_STATE(USB_SB_command, tt, out) |
11882 - IO_STATE(USB_SB_command, eot, yes));
11884 - next_sb_desc->sw_len = urb->iso_frame_desc[i].length;
11885 - next_sb_desc->buf = virt_to_phys((char*)urb->transfer_buffer + urb->iso_frame_desc[i].offset);
11887 - /* Check if full length transfer. */
11888 - if (urb->iso_frame_desc[i].length ==
11889 - usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
11890 - next_sb_desc->command |= IO_STATE(USB_SB_command, full, yes);
11893 - dbg_isoc("zero len packet");
11894 - next_sb_desc->command = (IO_FIELD(USB_SB_command, rem, 0) |
11895 - IO_STATE(USB_SB_command, tt, zout) |
11896 - IO_STATE(USB_SB_command, eot, yes) |
11897 - IO_STATE(USB_SB_command, full, yes));
11899 - next_sb_desc->sw_len = 1;
11900 - next_sb_desc->buf = virt_to_phys(&zout_buffer[0]);
11903 - /* First SB descriptor that belongs to this urb */
11905 - urb_priv->first_sb = next_sb_desc;
11907 - prev_sb_desc->next = virt_to_phys(next_sb_desc);
11909 - prev_sb_desc = next_sb_desc;
11912 - next_sb_desc->command |= (IO_STATE(USB_SB_command, intr, yes) |
11913 - IO_STATE(USB_SB_command, eol, yes));
11914 - next_sb_desc->next = 0;
11915 - urb_priv->last_sb = next_sb_desc;
11917 - } else if (usb_pipein(urb->pipe)) {
11919 - dbg_isoc("Transfer for epid %d is IN", epid);
11920 - dbg_isoc("transfer_buffer_length = %d", urb->transfer_buffer_length);
11921 - dbg_isoc("rem is calculated to %d", urb->iso_frame_desc[urb->number_of_packets - 1].length);
11923 - /* Note that in descriptors for periodic traffic are not consumed. This means that
11924 - the USB controller never propagates in the SB list. In other words, if there already
11925 - is an SB descriptor in the list for this EP we don't have to do anything. */
11926 - if (TxIsocEPList[epid].sub == 0) {
11927 - dbg_isoc("Isoc traffic not already running, allocating SB");
11929 - next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
11930 - assert(next_sb_desc != NULL);
11932 - next_sb_desc->command = (IO_STATE(USB_SB_command, tt, in) |
11933 - IO_STATE(USB_SB_command, eot, yes) |
11934 - IO_STATE(USB_SB_command, eol, yes));
11936 - next_sb_desc->next = 0;
11937 - next_sb_desc->sw_len = 1; /* Actual number of packets is not relevant
11938 - for periodic in traffic as long as it is more
11939 - than zero. Set to 1 always. */
11940 - next_sb_desc->buf = 0;
11942 - /* The rem field is don't care for isoc traffic, so we don't set it. */
11944 - /* Only one SB descriptor that belongs to this urb. */
11945 - urb_priv->first_sb = next_sb_desc;
11946 - urb_priv->last_sb = next_sb_desc;
11950 - dbg_isoc("Isoc traffic already running, just setting first/last_sb");
11952 - /* Each EP for isoc in will have only one SB descriptor, setup when submitting the
11953 - already active urb. Note that even though we may have several first_sb/last_sb
11954 - pointing at the same SB descriptor, they are freed only once (when the list has
11955 - become empty). */
11956 - urb_priv->first_sb = phys_to_virt(TxIsocEPList[epid].sub);
11957 - urb_priv->last_sb = phys_to_virt(TxIsocEPList[epid].sub);
11963 - /* Find the spot to insert this urb and add it. */
11964 - if (TxIsocEPList[epid].sub == 0) {
11965 - /* First SB descriptor inserted in this list (in or out). */
11966 - dbg_isoc("Inserting SB desc first in list");
11967 - TxIsocEPList[epid].hw_len = 0;
11968 - TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
11971 - /* Isochronous traffic is already running, insert new traffic last (only out). */
11972 - dbg_isoc("Inserting SB desc last in list");
11973 - temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
11974 - while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
11975 - IO_STATE(USB_SB_command, eol, yes)) {
11976 - assert(temp_sb_desc->next);
11977 - temp_sb_desc = phys_to_virt(temp_sb_desc->next);
11979 - dbg_isoc("Appending list on desc 0x%p", temp_sb_desc);
11981 - /* Next pointer must be set before eol is removed. */
11982 - temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
11983 - /* Clear the previous end of list flag since there is a new in the
11984 - added SB descriptor list. */
11985 - temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
11987 - if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
11988 - /* 8.8.5 in Designer's Reference says we should check for and correct
11989 - any errors in the EP here. That should not be necessary if epid_attn
11990 - is handled correctly, so we assume all is ok. */
11991 - dbg_isoc("EP disabled");
11992 - etrax_usb_check_error_isoc_ep(epid);
11994 - /* The SB list was exhausted. */
11995 - if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
11996 - /* The new sublist did not get processed before the EP was
11997 - disabled. Setup the EP again. */
11998 - dbg_isoc("Set EP sub to new list");
11999 - TxIsocEPList[epid].hw_len = 0;
12000 - TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
12005 - if (urb->transfer_flags & URB_ISO_ASAP) {
12006 - /* The isoc transfer should be started as soon as possible. The start_frame
12007 - field is a return value if URB_ISO_ASAP was set. Comparing R_USB_FM_NUMBER
12008 - with a USB Chief trace shows that the first isoc IN token is sent 2 frames
12009 - later. I'm not sure how this affects usage of the start_frame field by the
12010 - device driver, or how it affects things when USB_ISO_ASAP is not set, so
12011 - therefore there's no compensation for the 2 frame "lag" here. */
12012 - urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
12013 - TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
12014 - urb_priv->urb_state = STARTED;
12015 - dbg_isoc("URB_ISO_ASAP set, urb->start_frame set to %d", urb->start_frame);
12017 - /* Not started yet. */
12018 - urb_priv->urb_state = NOT_STARTED;
12019 - dbg_isoc("urb_priv->urb_state set to NOT_STARTED");
12022 - /* We start the DMA sub channel without checking if it's running or not, because:
12023 - 1) If it's already running, issuing the start command is a nop.
12024 - 2) We avoid a test-and-set race condition. */
12025 - *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
12030 -static void etrax_usb_complete_isoc_urb(struct urb *urb, int status)
12032 - etrax_urb_priv_t *urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
12033 - int epid = urb_priv->epid;
12034 - int auto_resubmit = 0;
12037 - dbg_isoc("complete urb 0x%p, status %d", urb, status);
12040 - warn("Completing isoc urb with status %d.", status);
12042 - if (usb_pipein(urb->pipe)) {
12045 - /* Make that all isoc packets have status and length set before
12046 - completing the urb. */
12047 - for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++) {
12048 - urb->iso_frame_desc[i].actual_length = 0;
12049 - urb->iso_frame_desc[i].status = -EPROTO;
12052 - urb_list_del(urb, epid);
12054 - if (!list_empty(&urb_list[epid])) {
12055 - ((etrax_urb_priv_t *)(urb_list_first(epid)->hcpriv))->urb_state = STARTED;
12057 - unsigned long int flags;
12058 - if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
12059 - /* The EP was enabled, disable it and wait. */
12060 - TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
12062 - /* Ah, the luxury of busy-wait. */
12063 - while (*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid]));
12066 - etrax_remove_from_sb_list(urb);
12067 - TxIsocEPList[epid].sub = 0;
12068 - TxIsocEPList[epid].hw_len = 0;
12070 - save_flags(flags);
12072 - etrax_usb_free_epid(epid);
12073 - restore_flags(flags);
12079 - /* Release allocated bandwidth. */
12080 - usb_release_bandwidth(urb->dev, urb, 0);
12081 - } else if (usb_pipeout(urb->pipe)) {
12084 - dbg_isoc("Isoc out urb complete 0x%p", urb);
12086 - /* Update the urb list. */
12087 - urb_list_del(urb, epid);
12089 - freed_descr = etrax_remove_from_sb_list(urb);
12090 - dbg_isoc("freed %d descriptors of %d packets", freed_descr, urb->number_of_packets);
12091 - assert(freed_descr == urb->number_of_packets);
12095 - /* Release allocated bandwidth. */
12096 - usb_release_bandwidth(urb->dev, urb, 0);
12099 - urb->status = status;
12100 - if (urb->complete) {
12101 - urb->complete(urb, NULL);
12104 - if (auto_resubmit) {
12105 - /* Check that urb was not unlinked by the complete callback. */
12106 - if (__urb_list_entry(urb, epid)) {
12107 - /* Move this one down the list. */
12108 - urb_list_move_last(urb, epid);
12110 - /* Mark the now first urb as started (may already be). */
12111 - ((etrax_urb_priv_t *)(urb_list_first(epid)->hcpriv))->urb_state = STARTED;
12113 - /* Must set this to 0 since this urb is still active after
12115 - urb_priv->isoc_packet_counter = 0;
12117 - warn("(ISOC) automatic resubmit urb 0x%p removed by complete.", urb);
12124 -static void etrax_usb_complete_urb(struct urb *urb, int status)
12126 - switch (usb_pipetype(urb->pipe)) {
12128 - etrax_usb_complete_bulk_urb(urb, status);
12130 - case PIPE_CONTROL:
12131 - etrax_usb_complete_ctrl_urb(urb, status);
12133 - case PIPE_INTERRUPT:
12134 - etrax_usb_complete_intr_urb(urb, status);
12136 - case PIPE_ISOCHRONOUS:
12137 - etrax_usb_complete_isoc_urb(urb, status);
12140 - err("Unknown pipetype");
12146 -static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc)
12148 - usb_interrupt_registers_t *reg;
12149 - unsigned long flags;
12153 - __u16 port_status_1;
12154 - __u16 port_status_2;
12159 - /* Read critical registers into local variables, do kmalloc afterwards. */
12160 - save_flags(flags);
12163 - irq_mask = *R_USB_IRQ_MASK_READ;
12164 - /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that R_USB_STATUS
12165 - must be read before R_USB_EPID_ATTN since reading the latter clears the
12166 - ourun and perror fields of R_USB_STATUS. */
12167 - status = *R_USB_STATUS;
12169 - /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn interrupts. */
12170 - epid_attn = *R_USB_EPID_ATTN;
12172 - /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
12173 - port_status interrupt. */
12174 - port_status_1 = *R_USB_RH_PORT_STATUS_1;
12175 - port_status_2 = *R_USB_RH_PORT_STATUS_2;
12177 - /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
12178 - /* Note: the lower 11 bits contain the actual frame number, sent with each sof. */
12179 - fm_number = *R_USB_FM_NUMBER;
12181 - restore_flags(flags);
12183 - reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, SLAB_ATOMIC);
12185 - assert(reg != NULL);
12187 - reg->hc = (etrax_hc_t *)vhc;
12189 - /* Now put register values into kmalloc'd area. */
12190 - reg->r_usb_irq_mask_read = irq_mask;
12191 - reg->r_usb_status = status;
12192 - reg->r_usb_epid_attn = epid_attn;
12193 - reg->r_usb_rh_port_status_1 = port_status_1;
12194 - reg->r_usb_rh_port_status_2 = port_status_2;
12195 - reg->r_usb_fm_number = fm_number;
12197 - INIT_WORK(®->usb_bh, etrax_usb_hc_interrupt_bottom_half, reg);
12198 - schedule_work(®->usb_bh);
12202 - return IRQ_HANDLED;
12205 -static void etrax_usb_hc_interrupt_bottom_half(void *data)
12207 - usb_interrupt_registers_t *reg = (usb_interrupt_registers_t *)data;
12208 - __u32 irq_mask = reg->r_usb_irq_mask_read;
12212 - /* Interrupts are handled in order of priority. */
12213 - if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
12214 - etrax_usb_hc_epid_attn_interrupt(reg);
12216 - if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
12217 - etrax_usb_hc_port_status_interrupt(reg);
12219 - if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
12220 - etrax_usb_hc_ctl_status_interrupt(reg);
12222 - if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
12223 - etrax_usb_hc_isoc_eof_interrupt();
12225 - if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
12226 - /* Update/restart the bulk start timer since obviously the channel is running. */
12227 - mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
12228 - /* Update/restart the bulk eot timer since we just received an bulk eot interrupt. */
12229 - mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
12231 - etrax_usb_hc_bulk_eot_interrupt(0);
12234 - kmem_cache_free(top_half_reg_cache, reg);
12240 -void etrax_usb_hc_isoc_eof_interrupt(void)
12243 - etrax_urb_priv_t *urb_priv;
12245 - unsigned long flags;
12249 - /* Do not check the invalid epid (it has a valid sub pointer). */
12250 - for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
12252 - /* Do not check the invalid epid (it has a valid sub pointer). */
12253 - if ((epid == DUMMY_EPID) || (epid == INVALID_EPID))
12256 - /* Disable interrupts to block the isoc out descriptor interrupt handler
12257 - from being called while the isoc EPID list is being checked.
12259 - save_flags(flags);
12262 - if (TxIsocEPList[epid].sub == 0) {
12263 - /* Nothing here to see. */
12264 - restore_flags(flags);
12268 - /* Get the first urb (if any). */
12269 - urb = urb_list_first(epid);
12271 - warn("Ignoring NULL urb");
12272 - restore_flags(flags);
12275 - if (usb_pipein(urb->pipe)) {
12277 - /* Sanity check. */
12278 - assert(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
12280 - urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
12281 - assert(urb_priv);
12283 - if (urb_priv->urb_state == NOT_STARTED) {
12285 - /* If ASAP is not set and urb->start_frame is the current frame,
12286 - start the transfer. */
12287 - if (!(urb->transfer_flags & URB_ISO_ASAP) &&
12288 - (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
12290 - dbg_isoc("Enabling isoc IN EP descr for epid %d", epid);
12291 - TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
12293 - /* This urb is now active. */
12294 - urb_priv->urb_state = STARTED;
12299 - restore_flags(flags);
12306 -void etrax_usb_hc_bulk_eot_interrupt(int timer_induced)
12310 - /* The technique is to run one urb at a time, wait for the eot interrupt at which
12311 - point the EP descriptor has been disabled. */
12314 - dbg_bulk("bulk eot%s", timer_induced ? ", called by timer" : "");
12316 - for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
12318 - if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
12319 - (TxBulkEPList[epid].sub != 0)) {
12322 - etrax_urb_priv_t *urb_priv;
12323 - unsigned long flags;
12324 - __u32 r_usb_ept_data;
12326 - /* Found a disabled EP descriptor which has a non-null sub pointer.
12327 - Verify that this ctrl EP descriptor got disabled no errors.
12328 - FIXME: Necessary to check error_code? */
12329 - dbg_bulk("for epid %d?", epid);
12331 - /* Get the first urb. */
12332 - urb = urb_list_first(epid);
12334 - /* FIXME: Could this happen for valid reasons? Why did it disappear? Because of
12335 - wrong unlinking? */
12337 - warn("NULL urb for epid %d", epid);
12342 - urb_priv = (etrax_urb_priv_t *)urb->hcpriv;
12343 - assert(urb_priv);
12345 - /* Sanity checks. */
12346 - assert(usb_pipetype(urb->pipe) == PIPE_BULK);
12347 - if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
12348 - err("bulk endpoint got disabled before reaching last sb");
12351 - /* For bulk IN traffic, there seems to be a race condition between
12352 - between the bulk eot and eop interrupts, or rather an uncertainty regarding
12353 - the order in which they happen. Normally we expect the eop interrupt from
12354 - DMA channel 9 to happen before the eot interrupt.
12356 - Therefore, we complete the bulk IN urb in the rx interrupt handler instead. */
12358 - if (usb_pipein(urb->pipe)) {
12359 - dbg_bulk("in urb, continuing");
12363 - save_flags(flags);
12365 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
12367 - r_usb_ept_data = *R_USB_EPT_DATA;
12368 - restore_flags(flags);
12370 - if (IO_EXTRACT(R_USB_EPT_DATA, error_code, r_usb_ept_data) ==
12371 - IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
12372 - /* This means that the endpoint has no error, is disabled
12373 - and had inserted traffic, i.e. transfer successfully completed. */
12374 - etrax_usb_complete_bulk_urb(urb, 0);
12376 - /* Shouldn't happen. We expect errors to be caught by epid attention. */
12377 - err("Found disabled bulk EP desc, error_code != no_error");
12382 - /* Normally, we should find (at least) one disabled EP descriptor with a valid sub pointer.
12383 - However, because of the uncertainty in the deliverance of the eop/eot interrupts, we may
12384 - not. Also, we might find two disabled EPs when handling an eot interrupt, and then find
12385 - none the next time. */
12391 -void etrax_usb_hc_epid_attn_interrupt(usb_interrupt_registers_t *reg)
12393 - /* This function handles the epid attention interrupt. There are a variety of reasons
12394 - for this interrupt to happen (Designer's Reference, p. 8 - 22 for the details):
12396 - invalid ep_id - Invalid epid in an EP (EP disabled).
12397 - stall - Not strictly an error condition (EP disabled).
12398 - 3rd error - Three successive transaction errors (EP disabled).
12399 - buffer ourun - Buffer overrun or underrun (EP disabled).
12400 - past eof1 - Intr or isoc transaction proceeds past EOF1.
12401 - near eof - Intr or isoc transaction would not fit inside the frame.
12402 - zout transfer - If zout transfer for a bulk endpoint (EP disabled).
12403 - setup transfer - If setup transfer for a non-ctrl endpoint (EP disabled). */
12410 - assert(reg != NULL);
12412 - /* Note that we loop through all epids. We still want to catch errors for
12413 - the invalid one, even though we might handle them differently. */
12414 - for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
12416 - if (test_bit(epid, (void *)®->r_usb_epid_attn)) {
12419 - __u32 r_usb_ept_data;
12420 - unsigned long flags;
12423 - save_flags(flags);
12425 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
12427 - /* Note that although there are separate R_USB_EPT_DATA and R_USB_EPT_DATA_ISO
12428 - registers, they are located at the same address and are of the same size.
12429 - In other words, this read should be ok for isoc also. */
12430 - r_usb_ept_data = *R_USB_EPT_DATA;
12431 - restore_flags(flags);
12433 - /* First some sanity checks. */
12434 - if (epid == INVALID_EPID) {
12435 - /* FIXME: What if it became disabled? Could seriously hurt interrupt
12436 - traffic. (Use do_intr_recover.) */
12437 - warn("Got epid_attn for INVALID_EPID (%d).", epid);
12438 - err("R_USB_EPT_DATA = 0x%x", r_usb_ept_data);
12439 - err("R_USB_STATUS = 0x%x", reg->r_usb_status);
12441 - } else if (epid == DUMMY_EPID) {
12442 - /* We definitely don't care about these ones. Besides, they are
12443 - always disabled, so any possible disabling caused by the
12444 - epid attention interrupt is irrelevant. */
12445 - warn("Got epid_attn for DUMMY_EPID (%d).", epid);
12449 - /* Get the first urb in the urb list for this epid. We blatantly assume
12450 - that only the first urb could have caused the epid attention.
12451 - (For bulk and ctrl, only one urb is active at any one time. For intr
12452 - and isoc we remove them once they are completed.) */
12453 - urb = urb_list_first(epid);
12455 - if (urb == NULL) {
12456 - err("Got epid_attn for epid %i with no urb.", epid);
12457 - err("R_USB_EPT_DATA = 0x%x", r_usb_ept_data);
12458 - err("R_USB_STATUS = 0x%x", reg->r_usb_status);
12462 - switch (usb_pipetype(urb->pipe)) {
12464 - warn("Got epid attn for bulk endpoint, epid %d", epid);
12466 - case PIPE_CONTROL:
12467 - warn("Got epid attn for control endpoint, epid %d", epid);
12469 - case PIPE_INTERRUPT:
12470 - warn("Got epid attn for interrupt endpoint, epid %d", epid);
12472 - case PIPE_ISOCHRONOUS:
12473 - warn("Got epid attn for isochronous endpoint, epid %d", epid);
12477 - if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) {
12478 - if (r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, hold)) {
12479 - warn("Hold was set for epid %d.", epid);
12484 - /* Even though error_code occupies bits 22 - 23 in both R_USB_EPT_DATA and
12485 - R_USB_EPT_DATA_ISOC, we separate them here so we don't forget in other places. */
12486 - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
12487 - error_code = IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data);
12489 - error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, r_usb_ept_data);
12492 - /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
12493 - if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
12495 - /* Isoc traffic doesn't have error_count_in/error_count_out. */
12496 - if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
12497 - (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, r_usb_ept_data) == 3 ||
12498 - IO_EXTRACT(R_USB_EPT_DATA, error_count_out, r_usb_ept_data) == 3)) {
12500 - warn("3rd error for epid %i", epid);
12501 - etrax_usb_complete_urb(urb, -EPROTO);
12503 - } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
12505 - warn("Perror for epid %d", epid);
12507 - if (!(r_usb_ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
12508 - /* invalid ep_id */
12509 - panic("Perror because of invalid epid."
12510 - " Deconfigured too early?");
12512 - /* past eof1, near eof, zout transfer, setup transfer */
12514 - /* Dump the urb and the relevant EP descriptor list. */
12517 - __dump_ept_data(epid);
12518 - __dump_ep_list(usb_pipetype(urb->pipe));
12520 - panic("Something wrong with DMA descriptor contents."
12521 - " Too much traffic inserted?");
12523 - } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
12524 - /* buffer ourun */
12525 - panic("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
12528 - } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, stall)) {
12529 - /* Not really a protocol error, just says that the endpoint gave
12530 - a stall response. Note that error_code cannot be stall for isoc. */
12531 - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
12532 - panic("Isoc traffic cannot stall");
12535 - warn("Stall for epid %d", epid);
12536 - etrax_usb_complete_urb(urb, -EPIPE);
12538 - } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, bus_error)) {
12539 - /* Two devices responded to a transaction request. Must be resolved
12540 - by software. FIXME: Reset ports? */
12541 - panic("Bus error for epid %d."
12542 - " Two devices responded to transaction request",
12545 - } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, buffer_error)) {
12546 - /* DMA overrun or underrun. */
12547 - warn("Buffer overrun/underrun for epid %d. DMA too busy?", epid);
12549 - /* It seems that error_code = buffer_error in
12550 - R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
12551 - are the same error. */
12552 - etrax_usb_complete_urb(urb, -EPROTO);
12561 -void etrax_usb_bulk_start_timer_func(unsigned long dummy)
12564 - /* We might enable an EP descriptor behind the current DMA position when it's about
12565 - to decide that there are no more bulk traffic and it should stop the bulk channel.
12566 - Therefore we periodically check if the bulk channel is stopped and there is an
12567 - enabled bulk EP descriptor, in which case we start the bulk channel. */
12568 - dbg_bulk("bulk_start_timer timed out.");
12570 - if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
12573 - dbg_bulk("Bulk DMA channel not running.");
12575 - for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
12576 - if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
12577 - dbg_bulk("Found enabled EP for epid %d, starting bulk channel.\n",
12579 - *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
12581 - /* Restart the bulk eot timer since we just started the bulk channel. */
12582 - mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
12584 - /* No need to search any further. */
12589 - dbg_bulk("Bulk DMA channel running.");
12593 -void etrax_usb_hc_port_status_interrupt(usb_interrupt_registers_t *reg)
12595 - etrax_hc_t *hc = reg->hc;
12596 - __u16 r_usb_rh_port_status_1 = reg->r_usb_rh_port_status_1;
12597 - __u16 r_usb_rh_port_status_2 = reg->r_usb_rh_port_status_2;
12601 - /* The Etrax RH does not include a wPortChange register, so this has to be handled in software
12602 - (by saving the old port status value for comparison when the port status interrupt happens).
12603 - See section 11.16.2.6.2 in the USB 1.1 spec for details. */
12605 - dbg_rh("hc->rh.prev_wPortStatus_1 = 0x%x", hc->rh.prev_wPortStatus_1);
12606 - dbg_rh("hc->rh.prev_wPortStatus_2 = 0x%x", hc->rh.prev_wPortStatus_2);
12607 - dbg_rh("r_usb_rh_port_status_1 = 0x%x", r_usb_rh_port_status_1);
12608 - dbg_rh("r_usb_rh_port_status_2 = 0x%x", r_usb_rh_port_status_2);
12610 - /* C_PORT_CONNECTION is set on any transition. */
12611 - hc->rh.wPortChange_1 |=
12612 - ((r_usb_rh_port_status_1 & (1 << RH_PORT_CONNECTION)) !=
12613 - (hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_CONNECTION))) ?
12614 - (1 << RH_PORT_CONNECTION) : 0;
12616 - hc->rh.wPortChange_2 |=
12617 - ((r_usb_rh_port_status_2 & (1 << RH_PORT_CONNECTION)) !=
12618 - (hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_CONNECTION))) ?
12619 - (1 << RH_PORT_CONNECTION) : 0;
12621 - /* C_PORT_ENABLE is _only_ set on a one to zero transition, i.e. when
12622 - the port is disabled, not when it's enabled. */
12623 - hc->rh.wPortChange_1 |=
12624 - ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_ENABLE))
12625 - && !(r_usb_rh_port_status_1 & (1 << RH_PORT_ENABLE))) ?
12626 - (1 << RH_PORT_ENABLE) : 0;
12628 - hc->rh.wPortChange_2 |=
12629 - ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_ENABLE))
12630 - && !(r_usb_rh_port_status_2 & (1 << RH_PORT_ENABLE))) ?
12631 - (1 << RH_PORT_ENABLE) : 0;
12633 - /* C_PORT_SUSPEND is set to one when the device has transitioned out
12634 - of the suspended state, i.e. when suspend goes from one to zero. */
12635 - hc->rh.wPortChange_1 |=
12636 - ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_SUSPEND))
12637 - && !(r_usb_rh_port_status_1 & (1 << RH_PORT_SUSPEND))) ?
12638 - (1 << RH_PORT_SUSPEND) : 0;
12640 - hc->rh.wPortChange_2 |=
12641 - ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_SUSPEND))
12642 - && !(r_usb_rh_port_status_2 & (1 << RH_PORT_SUSPEND))) ?
12643 - (1 << RH_PORT_SUSPEND) : 0;
12646 - /* C_PORT_RESET is set when reset processing on this port is complete. */
12647 - hc->rh.wPortChange_1 |=
12648 - ((hc->rh.prev_wPortStatus_1 & (1 << RH_PORT_RESET))
12649 - && !(r_usb_rh_port_status_1 & (1 << RH_PORT_RESET))) ?
12650 - (1 << RH_PORT_RESET) : 0;
12652 - hc->rh.wPortChange_2 |=
12653 - ((hc->rh.prev_wPortStatus_2 & (1 << RH_PORT_RESET))
12654 - && !(r_usb_rh_port_status_2 & (1 << RH_PORT_RESET))) ?
12655 - (1 << RH_PORT_RESET) : 0;
12657 - /* Save the new values for next port status change. */
12658 - hc->rh.prev_wPortStatus_1 = r_usb_rh_port_status_1;
12659 - hc->rh.prev_wPortStatus_2 = r_usb_rh_port_status_2;
12661 - dbg_rh("hc->rh.wPortChange_1 set to 0x%x", hc->rh.wPortChange_1);
12662 - dbg_rh("hc->rh.wPortChange_2 set to 0x%x", hc->rh.wPortChange_2);
12668 -void etrax_usb_hc_ctl_status_interrupt(usb_interrupt_registers_t *reg)
12672 - /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
12673 - list for the corresponding epid? */
12674 - if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
12675 - panic("USB controller got ourun.");
12677 - if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
12679 - /* Before, etrax_usb_do_intr_recover was called on this epid if it was
12680 - an interrupt pipe. I don't see how re-enabling all EP descriptors
12681 - will help if there was a programming error. */
12682 - panic("USB controller got perror.");
12685 - if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
12686 - /* We should never operate in device mode. */
12687 - panic("USB controller in device mode.");
12690 - /* These if-statements could probably be nested. */
12691 - if (reg->r_usb_status & IO_MASK(R_USB_STATUS, host_mode)) {
12692 - info("USB controller in host mode.");
12694 - if (reg->r_usb_status & IO_MASK(R_USB_STATUS, started)) {
12695 - info("USB controller started.");
12697 - if (reg->r_usb_status & IO_MASK(R_USB_STATUS, running)) {
12698 - info("USB controller running.");
12706 -static int etrax_rh_submit_urb(struct urb *urb)
12708 - struct usb_device *usb_dev = urb->dev;
12709 - etrax_hc_t *hc = usb_dev->bus->hcpriv;
12710 - unsigned int pipe = urb->pipe;
12711 - struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
12712 - void *data = urb->transfer_buffer;
12713 - int leni = urb->transfer_buffer_length;
12717 - __u16 bmRType_bReq;
12724 - /* FIXME: What is this interrupt urb that is sent to the root hub? */
12725 - if (usb_pipetype (pipe) == PIPE_INTERRUPT) {
12726 - dbg_rh("Root-Hub submit IRQ: every %d ms", urb->interval);
12727 - hc->rh.urb = urb;
12729 - /* FIXME: We could probably remove this line since it's done
12730 - in etrax_rh_init_int_timer. (Don't remove it from
12731 - etrax_rh_init_int_timer though.) */
12732 - hc->rh.interval = urb->interval;
12733 - etrax_rh_init_int_timer(urb);
12739 - bmRType_bReq = cmd->bRequestType | (cmd->bRequest << 8);
12740 - wValue = le16_to_cpu(cmd->wValue);
12741 - wIndex = le16_to_cpu(cmd->wIndex);
12742 - wLength = le16_to_cpu(cmd->wLength);
12744 - dbg_rh("bmRType_bReq : 0x%04x (%d)", bmRType_bReq, bmRType_bReq);
12745 - dbg_rh("wValue : 0x%04x (%d)", wValue, wValue);
12746 - dbg_rh("wIndex : 0x%04x (%d)", wIndex, wIndex);
12747 - dbg_rh("wLength : 0x%04x (%d)", wLength, wLength);
12749 - switch (bmRType_bReq) {
12751 - /* Request Destination:
12752 - without flags: Device,
12753 - RH_INTERFACE: interface,
12754 - RH_ENDPOINT: endpoint,
12755 - RH_CLASS means HUB here,
12756 - RH_OTHER | RH_CLASS almost ever means HUB_PORT here
12759 - case RH_GET_STATUS:
12760 - *(__u16 *) data = cpu_to_le16 (1);
12763 - case RH_GET_STATUS | RH_INTERFACE:
12764 - *(__u16 *) data = cpu_to_le16 (0);
12767 - case RH_GET_STATUS | RH_ENDPOINT:
12768 - *(__u16 *) data = cpu_to_le16 (0);
12771 - case RH_GET_STATUS | RH_CLASS:
12772 - *(__u32 *) data = cpu_to_le32 (0);
12773 - OK (4); /* hub power ** */
12775 - case RH_GET_STATUS | RH_OTHER | RH_CLASS:
12776 - if (wIndex == 1) {
12777 - *((__u16*)data) = cpu_to_le16(hc->rh.prev_wPortStatus_1);
12778 - *((__u16*)data + 1) = cpu_to_le16(hc->rh.wPortChange_1);
12779 - } else if (wIndex == 2) {
12780 - *((__u16*)data) = cpu_to_le16(hc->rh.prev_wPortStatus_2);
12781 - *((__u16*)data + 1) = cpu_to_le16(hc->rh.wPortChange_2);
12783 - dbg_rh("RH_GET_STATUS whith invalid wIndex!");
12789 - case RH_CLEAR_FEATURE | RH_ENDPOINT:
12790 - switch (wValue) {
12791 - case (RH_ENDPOINT_STALL):
12796 - case RH_CLEAR_FEATURE | RH_CLASS:
12797 - switch (wValue) {
12798 - case (RH_C_HUB_OVER_CURRENT):
12799 - OK (0); /* hub power over current ** */
12803 - case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
12804 - switch (wValue) {
12805 - case (RH_PORT_ENABLE):
12806 - if (wIndex == 1) {
12808 - dbg_rh("trying to do disable port 1");
12810 - *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
12812 - while (hc->rh.prev_wPortStatus_1 &
12813 - IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes));
12814 - *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
12815 - dbg_rh("Port 1 is disabled");
12817 - } else if (wIndex == 2) {
12819 - dbg_rh("trying to do disable port 2");
12821 - *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
12823 - while (hc->rh.prev_wPortStatus_2 &
12824 - IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes));
12825 - *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
12826 - dbg_rh("Port 2 is disabled");
12829 - dbg_rh("RH_CLEAR_FEATURE->RH_PORT_ENABLE "
12830 - "with invalid wIndex == %d!", wIndex);
12834 - case (RH_PORT_SUSPEND):
12835 - /* Opposite to suspend should be resume, so we'll do a resume. */
12836 - /* FIXME: USB 1.1, 11.16.2.2 says:
12837 - "Clearing the PORT_SUSPEND feature causes a host-initiated resume
12838 - on the specified port. If the port is not in the Suspended state,
12839 - the hub should treat this request as a functional no-operation."
12840 - Shouldn't we check if the port is in a suspended state before
12843 - /* Make sure the controller isn't busy. */
12844 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
12846 - if (wIndex == 1) {
12848 - IO_STATE(R_USB_COMMAND, port_sel, port1) |
12849 - IO_STATE(R_USB_COMMAND, port_cmd, resume) |
12850 - IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12851 - } else if (wIndex == 2) {
12853 - IO_STATE(R_USB_COMMAND, port_sel, port2) |
12854 - IO_STATE(R_USB_COMMAND, port_cmd, resume) |
12855 - IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12857 - dbg_rh("RH_CLEAR_FEATURE->RH_PORT_SUSPEND "
12858 - "with invalid wIndex == %d!", wIndex);
12862 - case (RH_PORT_POWER):
12863 - OK (0); /* port power ** */
12864 - case (RH_C_PORT_CONNECTION):
12865 - if (wIndex == 1) {
12866 - hc->rh.wPortChange_1 &= ~(1 << RH_PORT_CONNECTION);
12867 - } else if (wIndex == 2) {
12868 - hc->rh.wPortChange_2 &= ~(1 << RH_PORT_CONNECTION);
12870 - dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_CONNECTION "
12871 - "with invalid wIndex == %d!", wIndex);
12875 - case (RH_C_PORT_ENABLE):
12876 - if (wIndex == 1) {
12877 - hc->rh.wPortChange_1 &= ~(1 << RH_PORT_ENABLE);
12878 - } else if (wIndex == 2) {
12879 - hc->rh.wPortChange_2 &= ~(1 << RH_PORT_ENABLE);
12881 - dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_ENABLE "
12882 - "with invalid wIndex == %d!", wIndex);
12885 - case (RH_C_PORT_SUSPEND):
12886 -/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
12888 - case (RH_C_PORT_OVER_CURRENT):
12889 - OK (0); /* port power over current ** */
12890 - case (RH_C_PORT_RESET):
12891 - if (wIndex == 1) {
12892 - hc->rh.wPortChange_1 &= ~(1 << RH_PORT_RESET);
12893 - } else if (wIndex == 2) {
12894 - hc->rh.wPortChange_2 &= ~(1 << RH_PORT_RESET);
12896 - dbg_rh("RH_CLEAR_FEATURE->RH_C_PORT_RESET "
12897 - "with invalid index == %d!", wIndex);
12905 - case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
12906 - switch (wValue) {
12907 - case (RH_PORT_SUSPEND):
12909 - /* Make sure the controller isn't busy. */
12910 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
12912 - if (wIndex == 1) {
12914 - IO_STATE(R_USB_COMMAND, port_sel, port1) |
12915 - IO_STATE(R_USB_COMMAND, port_cmd, suspend) |
12916 - IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12917 - } else if (wIndex == 2) {
12919 - IO_STATE(R_USB_COMMAND, port_sel, port2) |
12920 - IO_STATE(R_USB_COMMAND, port_cmd, suspend) |
12921 - IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12923 - dbg_rh("RH_SET_FEATURE->RH_PORT_SUSPEND "
12924 - "with invalid wIndex == %d!", wIndex);
12928 - case (RH_PORT_RESET):
12929 - if (wIndex == 1) {
12932 - dbg_rh("Doing reset of port 1");
12934 - /* Make sure the controller isn't busy. */
12935 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
12938 - IO_STATE(R_USB_COMMAND, port_sel, port1) |
12939 - IO_STATE(R_USB_COMMAND, port_cmd, reset) |
12940 - IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12942 - /* We must wait at least 10 ms for the device to recover.
12943 - 15 ms should be enough. */
12946 - /* Wait for reset bit to go low (should be done by now). */
12947 - while (hc->rh.prev_wPortStatus_1 &
12948 - IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes));
12950 - /* If the port status is
12951 - 1) connected and enabled then there is a device and everything is fine
12952 - 2) neither connected nor enabled then there is no device, also fine
12953 - 3) connected and not enabled then we try again
12954 - (Yes, there are other port status combinations besides these.) */
12956 - if ((hc->rh.prev_wPortStatus_1 &
12957 - IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) &&
12958 - (hc->rh.prev_wPortStatus_1 &
12959 - IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no))) {
12960 - dbg_rh("Connected device on port 1, but port not enabled?"
12961 - " Trying reset again.");
12962 - goto port_2_reset;
12965 - /* Diagnostic printouts. */
12966 - if ((hc->rh.prev_wPortStatus_1 &
12967 - IO_STATE(R_USB_RH_PORT_STATUS_1, connected, no)) &&
12968 - (hc->rh.prev_wPortStatus_1 &
12969 - IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no))) {
12970 - dbg_rh("No connected device on port 1");
12971 - } else if ((hc->rh.prev_wPortStatus_1 &
12972 - IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) &&
12973 - (hc->rh.prev_wPortStatus_1 &
12974 - IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))) {
12975 - dbg_rh("Connected device on port 1, port 1 enabled");
12978 - } else if (wIndex == 2) {
12981 - dbg_rh("Doing reset of port 2");
12983 - /* Make sure the controller isn't busy. */
12984 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
12986 - /* Issue the reset command. */
12988 - IO_STATE(R_USB_COMMAND, port_sel, port2) |
12989 - IO_STATE(R_USB_COMMAND, port_cmd, reset) |
12990 - IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
12992 - /* We must wait at least 10 ms for the device to recover.
12993 - 15 ms should be enough. */
12996 - /* Wait for reset bit to go low (should be done by now). */
12997 - while (hc->rh.prev_wPortStatus_2 &
12998 - IO_STATE(R_USB_RH_PORT_STATUS_2, reset, yes));
13000 - /* If the port status is
13001 - 1) connected and enabled then there is a device and everything is fine
13002 - 2) neither connected nor enabled then there is no device, also fine
13003 - 3) connected and not enabled then we try again
13004 - (Yes, there are other port status combinations besides these.) */
13006 - if ((hc->rh.prev_wPortStatus_2 &
13007 - IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes)) &&
13008 - (hc->rh.prev_wPortStatus_2 &
13009 - IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no))) {
13010 - dbg_rh("Connected device on port 2, but port not enabled?"
13011 - " Trying reset again.");
13012 - goto port_2_reset;
13015 - /* Diagnostic printouts. */
13016 - if ((hc->rh.prev_wPortStatus_2 &
13017 - IO_STATE(R_USB_RH_PORT_STATUS_2, connected, no)) &&
13018 - (hc->rh.prev_wPortStatus_2 &
13019 - IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no))) {
13020 - dbg_rh("No connected device on port 2");
13021 - } else if ((hc->rh.prev_wPortStatus_2 &
13022 - IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes)) &&
13023 - (hc->rh.prev_wPortStatus_2 &
13024 - IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes))) {
13025 - dbg_rh("Connected device on port 2, port 2 enabled");
13029 - dbg_rh("RH_SET_FEATURE->RH_PORT_RESET with invalid wIndex = %d", wIndex);
13032 - /* Make sure the controller isn't busy. */
13033 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13035 - /* If all enabled ports were disabled the host controller goes down into
13036 - started mode, so we need to bring it back into the running state.
13037 - (This is safe even if it's already in the running state.) */
13039 - IO_STATE(R_USB_COMMAND, port_sel, nop) |
13040 - IO_STATE(R_USB_COMMAND, port_cmd, reset) |
13041 - IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
13043 - dbg_rh("...Done");
13046 - case (RH_PORT_POWER):
13047 - OK (0); /* port power ** */
13048 - case (RH_PORT_ENABLE):
13049 - /* There is no port enable command in the host controller, so if the
13050 - port is already enabled, we do nothing. If not, we reset the port
13051 - (with an ugly goto). */
13053 - if (wIndex == 1) {
13054 - if (hc->rh.prev_wPortStatus_1 &
13055 - IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, no)) {
13056 - goto port_1_reset;
13058 - } else if (wIndex == 2) {
13059 - if (hc->rh.prev_wPortStatus_2 &
13060 - IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, no)) {
13061 - goto port_2_reset;
13064 - dbg_rh("RH_SET_FEATURE->RH_GET_STATUS with invalid wIndex = %d", wIndex);
13070 - case RH_SET_ADDRESS:
13071 - hc->rh.devnum = wValue;
13072 - dbg_rh("RH address set to: %d", hc->rh.devnum);
13075 - case RH_GET_DESCRIPTOR:
13076 - switch ((wValue & 0xff00) >> 8) {
13077 - case (0x01): /* device descriptor */
13078 - len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_dev_des), wLength));
13079 - memcpy (data, root_hub_dev_des, len);
13081 - case (0x02): /* configuration descriptor */
13082 - len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_config_des), wLength));
13083 - memcpy (data, root_hub_config_des, len);
13085 - case (0x03): /* string descriptors */
13086 - len = usb_root_hub_string (wValue & 0xff,
13087 - 0xff, "ETRAX 100LX",
13090 - OK(min(leni, len));
13098 - case RH_GET_DESCRIPTOR | RH_CLASS:
13099 - root_hub_hub_des[2] = hc->rh.numports;
13100 - len = min_t(unsigned int, leni, min_t(unsigned int, sizeof (root_hub_hub_des), wLength));
13101 - memcpy (data, root_hub_hub_des, len);
13104 - case RH_GET_CONFIGURATION:
13105 - *(__u8 *) data = 0x01;
13108 - case RH_SET_CONFIGURATION:
13115 - urb->actual_length = len;
13116 - urb->status = stat;
13118 - if (urb->complete) {
13119 - urb->complete(urb, NULL);
13127 -etrax_usb_bulk_eot_timer_func(unsigned long dummy)
13129 - /* Because of a race condition in the top half, we might miss a bulk eot.
13130 - This timer "simulates" a bulk eot if we don't get one for a while, hopefully
13131 - correcting the situation. */
13132 - dbg_bulk("bulk_eot_timer timed out.");
13133 - etrax_usb_hc_bulk_eot_interrupt(1);
13137 -etrax_usb_buffer_alloc(struct usb_bus* bus, size_t size,
13138 - unsigned mem_flags, dma_addr_t *dma)
13140 - return kmalloc(size, mem_flags);
13144 -etrax_usb_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma)
13150 -static struct device fake_device;
13152 -static int __init etrax_usb_hc_init(void)
13154 - static etrax_hc_t *hc;
13155 - struct usb_bus *bus;
13156 - struct usb_device *usb_rh;
13161 - info("ETRAX 100LX USB-HCD %s (c) 2001-2003 Axis Communications AB\n", usb_hcd_version);
13163 - hc = kmalloc(sizeof(etrax_hc_t), GFP_KERNEL);
13164 - assert(hc != NULL);
13166 - /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
13167 - /* Note that we specify sizeof(USB_EP_Desc_t) as the size, but also allocate
13168 - SB descriptors from this cache. This is ok since sizeof(USB_EP_Desc_t) ==
13169 - sizeof(USB_SB_Desc_t). */
13171 - usb_desc_cache = kmem_cache_create("usb_desc_cache", sizeof(USB_EP_Desc_t), 0,
13172 - SLAB_HWCACHE_ALIGN, 0, 0);
13173 - assert(usb_desc_cache != NULL);
13175 - top_half_reg_cache = kmem_cache_create("top_half_reg_cache",
13176 - sizeof(usb_interrupt_registers_t),
13177 - 0, SLAB_HWCACHE_ALIGN, 0, 0);
13178 - assert(top_half_reg_cache != NULL);
13180 - isoc_compl_cache = kmem_cache_create("isoc_compl_cache",
13181 - sizeof(usb_isoc_complete_data_t),
13182 - 0, SLAB_HWCACHE_ALIGN, 0, 0);
13183 - assert(isoc_compl_cache != NULL);
13185 - etrax_usb_bus = bus = usb_alloc_bus(&etrax_usb_device_operations);
13187 - bus->bus_name="ETRAX 100LX";
13188 - bus->hcpriv = hc;
13190 - /* Initialize RH to the default address.
13191 - And make sure that we have no status change indication */
13192 - hc->rh.numports = 2; /* The RH has two ports */
13193 - hc->rh.devnum = 1;
13194 - hc->rh.wPortChange_1 = 0;
13195 - hc->rh.wPortChange_2 = 0;
13197 - /* Also initate the previous values to zero */
13198 - hc->rh.prev_wPortStatus_1 = 0;
13199 - hc->rh.prev_wPortStatus_2 = 0;
13201 - /* Initialize the intr-traffic flags */
13202 - /* FIXME: This isn't used. (Besides, the error field isn't initialized.) */
13203 - hc->intr.sleeping = 0;
13204 - hc->intr.wq = NULL;
13206 - epid_usage_bitmask = 0;
13207 - epid_out_traffic = 0;
13209 - /* Mark the invalid epid as being used. */
13210 - set_bit(INVALID_EPID, (void *)&epid_usage_bitmask);
13211 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, INVALID_EPID);
13213 - /* The valid bit should still be set ('invalid' is in our world; not the hardware's). */
13214 - *R_USB_EPT_DATA = (IO_STATE(R_USB_EPT_DATA, valid, yes) |
13215 - IO_FIELD(R_USB_EPT_DATA, max_len, 1));
13217 - /* Mark the dummy epid as being used. */
13218 - set_bit(DUMMY_EPID, (void *)&epid_usage_bitmask);
13219 - *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, DUMMY_EPID);
13221 - *R_USB_EPT_DATA = (IO_STATE(R_USB_EPT_DATA, valid, no) |
13222 - IO_FIELD(R_USB_EPT_DATA, max_len, 1));
13224 - /* Initialize the urb list by initiating a head for each list. */
13225 - for (i = 0; i < NBR_OF_EPIDS; i++) {
13226 - INIT_LIST_HEAD(&urb_list[i]);
13228 - spin_lock_init(&urb_list_lock);
13230 - INIT_LIST_HEAD(&urb_unlink_list);
13233 - /* Initiate the bulk start timer. */
13234 - init_timer(&bulk_start_timer);
13235 - bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
13236 - bulk_start_timer.function = etrax_usb_bulk_start_timer_func;
13237 - add_timer(&bulk_start_timer);
13240 - /* Initiate the bulk eot timer. */
13241 - init_timer(&bulk_eot_timer);
13242 - bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
13243 - bulk_eot_timer.function = etrax_usb_bulk_eot_timer_func;
13244 - add_timer(&bulk_eot_timer);
13246 - /* Set up the data structures for USB traffic. Note that this must be done before
13247 - any interrupt that relies on sane DMA list occurrs. */
13248 - init_rx_buffers();
13249 - init_tx_bulk_ep();
13250 - init_tx_ctrl_ep();
13251 - init_tx_intr_ep();
13252 - init_tx_isoc_ep();
13254 - device_initialize(&fake_device);
13255 - kobject_set_name(&fake_device.kobj, "etrax_usb");
13256 - kobject_add(&fake_device.kobj);
13257 - kobject_uevent(&fake_device.kobj, KOBJ_ADD);
13258 - hc->bus->controller = &fake_device;
13259 - usb_register_bus(hc->bus);
13261 - *R_IRQ_MASK2_SET =
13262 - /* Note that these interrupts are not used. */
13263 - IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
13264 - /* Sub channel 1 (ctrl) descr. interrupts are used. */
13265 - IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
13266 - IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
13267 - /* Sub channel 3 (isoc) descr. interrupts are used. */
13268 - IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
13270 - /* Note that the dma9_descr interrupt is not used. */
13271 - *R_IRQ_MASK2_SET =
13272 - IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
13273 - IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
13275 - /* FIXME: Enable iso_eof only when isoc traffic is running. */
13276 - *R_USB_IRQ_MASK_SET =
13277 - IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) |
13278 - IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
13279 - IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
13280 - IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
13281 - IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
13284 - if (request_irq(ETRAX_USB_HC_IRQ, etrax_usb_hc_interrupt_top_half, 0,
13285 - "ETRAX 100LX built-in USB (HC)", hc)) {
13286 - err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
13287 - etrax_usb_hc_cleanup();
13292 - if (request_irq(ETRAX_USB_RX_IRQ, etrax_usb_rx_interrupt, 0,
13293 - "ETRAX 100LX built-in USB (Rx)", hc)) {
13294 - err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
13295 - etrax_usb_hc_cleanup();
13300 - if (request_irq(ETRAX_USB_TX_IRQ, etrax_usb_tx_interrupt, 0,
13301 - "ETRAX 100LX built-in USB (Tx)", hc)) {
13302 - err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
13303 - etrax_usb_hc_cleanup();
13308 - /* R_USB_COMMAND:
13309 - USB commands in host mode. The fields in this register should all be
13310 - written to in one write. Do not read-modify-write one field at a time. A
13311 - write to this register will trigger events in the USB controller and an
13312 - incomplete command may lead to unpredictable results, and in worst case
13313 - even to a deadlock in the controller.
13314 - (Note however that the busy field is read-only, so no need to write to it.) */
13316 - /* Check the busy bit before writing to R_USB_COMMAND. */
13318 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13320 - /* Reset the USB interface. */
13322 - IO_STATE(R_USB_COMMAND, port_sel, nop) |
13323 - IO_STATE(R_USB_COMMAND, port_cmd, reset) |
13324 - IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
13326 - /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to 0x2A30 (10800),
13327 - to guarantee that control traffic gets 10% of the bandwidth, and periodic transfer may
13328 - allocate the rest (90%). This doesn't work though. Read on for a lenghty explanation.
13330 - While there is a difference between rev. 2 and rev. 3 of the ETRAX 100LX regarding the NAK
13331 - behaviour, it doesn't solve this problem. What happens is that a control transfer will not
13332 - be interrupted in its data stage when PSTART happens (the point at which periodic traffic
13333 - is started). Thus, if PSTART is set to 10800 and its IN or OUT token is NAKed until just before
13334 - PSTART happens, it will continue the IN/OUT transfer as long as it's ACKed. After it's done,
13335 - there may be too little time left for an isochronous transfer, causing an epid attention
13336 - interrupt due to perror. The work-around for this is to let the control transfers run at the
13337 - end of the frame instead of at the beginning, and will be interrupted just fine if it doesn't
13338 - fit into the frame. However, since there will *always* be a control transfer at the beginning
13339 - of the frame, regardless of what we set PSTART to, that transfer might be a 64-byte transfer
13340 - which consumes up to 15% of the frame, leaving only 85% for periodic traffic. The solution to
13341 - this would be to 'dummy allocate' 5% of the frame with the usb_claim_bandwidth function to make
13342 - sure that the periodic transfers that are inserted will always fit in the frame.
13344 - The idea was suggested that a control transfer could be split up into several 8 byte transfers,
13345 - so that it would be interrupted by PSTART, but since this can't be done for an IN transfer this
13346 - hasn't been implemented.
13348 - The value 11960 is chosen to be just after the SOF token, with a couple of bit times extra
13349 - for possible bit stuffing. */
13351 - *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
13353 -#ifdef CONFIG_ETRAX_USB_HOST_PORT1
13354 - *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
13357 -#ifdef CONFIG_ETRAX_USB_HOST_PORT2
13358 - *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
13361 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13363 - /* Configure the USB interface as a host controller. */
13365 - IO_STATE(R_USB_COMMAND, port_sel, nop) |
13366 - IO_STATE(R_USB_COMMAND, port_cmd, reset) |
13367 - IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
13369 - /* Note: Do not reset any ports here. Await the port status interrupts, to have a controlled
13370 - sequence of resetting the ports. If we reset both ports now, and there are devices
13371 - on both ports, we will get a bus error because both devices will answer the set address
13374 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13376 - /* Start processing of USB traffic. */
13378 - IO_STATE(R_USB_COMMAND, port_sel, nop) |
13379 - IO_STATE(R_USB_COMMAND, port_cmd, reset) |
13380 - IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
13382 - while (*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy));
13384 - usb_rh = usb_alloc_dev(NULL, hc->bus, 0);
13385 - hc->bus->root_hub = usb_rh;
13386 - usb_rh->state = USB_STATE_ADDRESS;
13387 - usb_rh->speed = USB_SPEED_FULL;
13388 - usb_rh->devnum = 1;
13389 - hc->bus->devnum_next = 2;
13390 - usb_rh->ep0.desc.wMaxPacketSize = __const_cpu_to_le16(64);
13391 - usb_get_device_descriptor(usb_rh, USB_DT_DEVICE_SIZE);
13392 - usb_new_device(usb_rh);
13399 -static void etrax_usb_hc_cleanup(void)
13403 - free_irq(ETRAX_USB_HC_IRQ, NULL);
13404 - free_irq(ETRAX_USB_RX_IRQ, NULL);
13405 - free_irq(ETRAX_USB_TX_IRQ, NULL);
13407 - usb_deregister_bus(etrax_usb_bus);
13409 - /* FIXME: call kmem_cache_destroy here? */
13414 -module_init(etrax_usb_hc_init);
13415 -module_exit(etrax_usb_hc_cleanup);
13416 +/* Module hooks */
13417 +module_init(module_hcd_init);
13418 +module_exit(module_hcd_exit);
13419 --- linux-2.6.19.2.orig/drivers/usb/host/hc-crisv10.c 1970-01-01 01:00:00.000000000 +0100
13420 +++ linux-2.6.19.2.dev/drivers/usb/host/hc-crisv10.c 2007-02-26 20:58:29.000000000 +0100
13424 + * ETRAX 100LX USB Host Controller Driver
13426 + * Copyright (C) 2005, 2006 Axis Communications AB
13428 + * Author: Konrad Eriksson <konrad.eriksson@axis.se>
13432 +#include <linux/module.h>
13433 +#include <linux/kernel.h>
13434 +#include <linux/init.h>
13435 +#include <linux/moduleparam.h>
13436 +#include <linux/spinlock.h>
13437 +#include <linux/usb.h>
13438 +#include <linux/platform_device.h>
13440 +#include <asm/io.h>
13441 +#include <asm/irq.h>
13442 +#include <asm/arch/dma.h>
13443 +#include <asm/arch/io_interface_mux.h>
13445 +#include "../core/hcd.h"
13446 +#include "../core/hub.h"
13447 +#include "hc-crisv10.h"
13448 +#include "hc-cris-dbg.h"
13451 +/***************************************************************************/
13452 +/***************************************************************************/
13453 +/* Host Controller settings */
13454 +/***************************************************************************/
13455 +/***************************************************************************/
13457 +#define VERSION "1.00"
13458 +#define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
13459 +#define DESCRIPTION "ETRAX 100LX USB Host Controller"
13461 +#define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
13462 +#define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
13463 +#define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
13465 +/* Number of physical ports in Etrax 100LX */
13466 +#define USB_ROOT_HUB_PORTS 2
13468 +const char hc_name[] = "hc-crisv10";
13469 +const char product_desc[] = DESCRIPTION;
13471 +/* The number of epids is, among other things, used for pre-allocating
13472 + ctrl, bulk and isoc EP descriptors (one for each epid).
13473 + Assumed to be > 1 when initiating the DMA lists. */
13474 +#define NBR_OF_EPIDS 32
13476 +/* Support interrupt traffic intervals up to 128 ms. */
13477 +#define MAX_INTR_INTERVAL 128
13479 +/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
13480 + table must be "invalid". By this we mean that we shouldn't care about epid
13481 + attentions for this epid, or at least handle them differently from epid
13482 + attentions for "valid" epids. This define determines which one to use
13483 + (don't change it). */
13484 +#define INVALID_EPID 31
13485 +/* A special epid for the bulk dummys. */
13486 +#define DUMMY_EPID 30
13488 +/* Module settings */
13490 +MODULE_DESCRIPTION(DESCRIPTION);
13491 +MODULE_LICENSE("GPL");
13492 +MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
13495 +/* Module parameters */
13497 +/* 0 = No ports enabled
13498 + 1 = Only port 1 enabled (on board ethernet on devboard)
13499 + 2 = Only port 2 enabled (external connector on devboard)
13500 + 3 = Both ports enabled
13502 +static unsigned int ports = 3;
13503 +module_param(ports, uint, S_IRUGO);
13504 +MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
13507 +/***************************************************************************/
13508 +/***************************************************************************/
13509 +/* Shared global variables for this module */
13510 +/***************************************************************************/
13511 +/***************************************************************************/
13513 +/* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
13514 +static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
13516 +static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
13518 +/* EP descriptor lists for period transfers. Must be 32-bit aligned. */
13519 +static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
13520 +static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
13522 +static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
13523 +static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
13525 +static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
13527 +/* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
13528 + causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
13529 + gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
13530 + EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
13531 + in each frame. */
13532 +static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
13534 +/* List of URB pointers, where each points to the active URB for a epid.
13535 + For Bulk, Ctrl and Intr this means which URB that currently is added to
13536 + DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
13537 + URB has completed is the queue examined and the first URB in queue is
13538 + removed and moved to the activeUrbList while its state change to STARTED and
13539 + its transfer(s) gets added to DMA list (exception Isoc where URBs enter
13540 + state STARTED directly and added transfers added to DMA lists). */
13541 +static struct urb *activeUrbList[NBR_OF_EPIDS];
13543 +/* Additional software state info for each epid */
13544 +static struct etrax_epid epid_state[NBR_OF_EPIDS];
13546 +/* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
13547 + even if there is new data waiting to be processed */
13548 +static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
13549 +static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
13551 +/* We want the start timer to expire before the eot timer, because the former
13552 + might start traffic, thus making it unnecessary for the latter to time
13554 +#define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
13555 +#define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
13557 +/* Delay before a URB completion happen when it's scheduled to be delayed */
13558 +#define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
13560 +/* Simplifying macros for checking software state info of a epid */
13561 +/* ----------------------------------------------------------------------- */
13562 +#define epid_inuse(epid) epid_state[epid].inuse
13563 +#define epid_out_traffic(epid) epid_state[epid].out_traffic
13564 +#define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
13565 +#define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
13568 +/***************************************************************************/
13569 +/***************************************************************************/
13570 +/* DEBUG FUNCTIONS */
13571 +/***************************************************************************/
13572 +/***************************************************************************/
13573 +/* Note that these functions are always available in their "__" variants,
13574 + for use in error situations. The "__" missing variants are controlled by
13575 + the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
13576 +static void __dump_urb(struct urb* purb)
13578 + struct crisv10_urb_priv *urb_priv = purb->hcpriv;
13579 + int urb_num = -1;
13581 + urb_num = urb_priv->urb_num;
13583 + printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
13584 + printk("dev :0x%08lx\n", (unsigned long)purb->dev);
13585 + printk("pipe :0x%08x\n", purb->pipe);
13586 + printk("status :%d\n", purb->status);
13587 + printk("transfer_flags :0x%08x\n", purb->transfer_flags);
13588 + printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
13589 + printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
13590 + printk("actual_length :%d\n", purb->actual_length);
13591 + printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
13592 + printk("start_frame :%d\n", purb->start_frame);
13593 + printk("number_of_packets :%d\n", purb->number_of_packets);
13594 + printk("interval :%d\n", purb->interval);
13595 + printk("error_count :%d\n", purb->error_count);
13596 + printk("context :0x%08lx\n", (unsigned long)purb->context);
13597 + printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
13600 +static void __dump_in_desc(volatile struct USB_IN_Desc *in)
13602 + printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
13603 + printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
13604 + printk(" command : 0x%04x\n", in->command);
13605 + printk(" next : 0x%08lx\n", in->next);
13606 + printk(" buf : 0x%08lx\n", in->buf);
13607 + printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
13608 + printk(" status : 0x%04x\n\n", in->status);
13611 +static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
13613 + char tt = (sb->command & 0x30) >> 4;
13618 + tt_string = "zout";
13621 + tt_string = "in";
13624 + tt_string = "out";
13627 + tt_string = "setup";
13630 + tt_string = "unknown (weird)";
13633 + printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
13634 + printk(" command:0x%04x (", sb->command);
13635 + printk("rem:%d ", (sb->command & 0x3f00) >> 8);
13636 + printk("full:%d ", (sb->command & 0x40) >> 6);
13637 + printk("tt:%d(%s) ", tt, tt_string);
13638 + printk("intr:%d ", (sb->command & 0x8) >> 3);
13639 + printk("eot:%d ", (sb->command & 0x2) >> 1);
13640 + printk("eol:%d)", sb->command & 0x1);
13641 + printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
13642 + printk(" next:0x%08lx", sb->next);
13643 + printk(" buf:0x%08lx\n", sb->buf);
13647 +static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
13649 + printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
13650 + printk(" command:0x%04x (", ep->command);
13651 + printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
13652 + printk("enable:%d ", (ep->command & 0x10) >> 4);
13653 + printk("intr:%d ", (ep->command & 0x8) >> 3);
13654 + printk("eof:%d ", (ep->command & 0x2) >> 1);
13655 + printk("eol:%d)", ep->command & 0x1);
13656 + printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
13657 + printk(" next:0x%08lx", ep->next);
13658 + printk(" sub:0x%08lx\n", ep->sub);
13661 +static inline void __dump_ep_list(int pipe_type)
13663 + volatile struct USB_EP_Desc *ep;
13664 + volatile struct USB_EP_Desc *first_ep;
13665 + volatile struct USB_SB_Desc *sb;
13667 + switch (pipe_type)
13670 + first_ep = &TxBulkEPList[0];
13672 + case PIPE_CONTROL:
13673 + first_ep = &TxCtrlEPList[0];
13675 + case PIPE_INTERRUPT:
13676 + first_ep = &TxIntrEPList[0];
13678 + case PIPE_ISOCHRONOUS:
13679 + first_ep = &TxIsocEPList[0];
13682 + warn("Cannot dump unknown traffic type");
13687 + printk("\n\nDumping EP list...\n\n");
13690 + __dump_ep_desc(ep);
13691 + /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
13692 + sb = ep->sub ? phys_to_virt(ep->sub) : 0;
13694 + __dump_sb_desc(sb);
13695 + sb = sb->next ? phys_to_virt(sb->next) : 0;
13697 + ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
13699 + } while (ep != first_ep);
13702 +static inline void __dump_ept_data(int epid)
13704 + unsigned long flags;
13705 + __u32 r_usb_ept_data;
13707 + if (epid < 0 || epid > 31) {
13708 + printk("Cannot dump ept data for invalid epid %d\n", epid);
13712 + local_irq_save(flags);
13713 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
13715 + r_usb_ept_data = *R_USB_EPT_DATA;
13716 + local_irq_restore(flags);
13718 + printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
13719 + if (r_usb_ept_data == 0) {
13720 + /* No need for more detailed printing. */
13723 + printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
13724 + printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
13725 + printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
13726 + printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
13727 + printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
13728 + printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
13729 + printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
13730 + printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
13731 + printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
13732 + printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
13733 + printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
13734 + printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
13737 +static inline void __dump_ept_data_iso(int epid)
13739 + unsigned long flags;
13742 + if (epid < 0 || epid > 31) {
13743 + printk("Cannot dump ept data for invalid epid %d\n", epid);
13747 + local_irq_save(flags);
13748 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
13750 + ept_data = *R_USB_EPT_DATA_ISO;
13751 + local_irq_restore(flags);
13753 + printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
13754 + if (ept_data == 0) {
13755 + /* No need for more detailed printing. */
13758 + printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
13760 + printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
13762 + printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
13764 + printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
13766 + printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
13768 + printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
13772 +static inline void __dump_ept_data_list(void)
13776 + printk("Dumping the whole R_USB_EPT_DATA list\n");
13778 + for (i = 0; i < 32; i++) {
13779 + __dump_ept_data(i);
13783 +static void debug_epid(int epid) {
13786 + if(epid_isoc(epid)) {
13787 + __dump_ept_data_iso(epid);
13789 + __dump_ept_data(epid);
13792 + printk("Bulk:\n");
13793 + for(i = 0; i < 32; i++) {
13794 + if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
13796 + printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
13800 + printk("Ctrl:\n");
13801 + for(i = 0; i < 32; i++) {
13802 + if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
13804 + printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
13808 + printk("Intr:\n");
13809 + for(i = 0; i < MAX_INTR_INTERVAL; i++) {
13810 + if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
13812 + printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
13816 + printk("Isoc:\n");
13817 + for(i = 0; i < 32; i++) {
13818 + if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
13820 + printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
13824 + __dump_ept_data_list();
13825 + __dump_ep_list(PIPE_INTERRUPT);
13831 +char* hcd_status_to_str(__u8 bUsbStatus) {
13832 + static char hcd_status_str[128];
13833 + hcd_status_str[0] = '\0';
13834 + if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
13835 + strcat(hcd_status_str, "ourun ");
13837 + if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
13838 + strcat(hcd_status_str, "perror ");
13840 + if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
13841 + strcat(hcd_status_str, "device_mode ");
13843 + if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
13844 + strcat(hcd_status_str, "host_mode ");
13846 + if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
13847 + strcat(hcd_status_str, "started ");
13849 + if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
13850 + strcat(hcd_status_str, "running ");
13852 + return hcd_status_str;
13856 +char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
13857 + static char sblist_to_str_buff[128];
13858 + char tmp[32], tmp2[32];
13859 + sblist_to_str_buff[0] = '\0';
13860 + while(sb_desc != NULL) {
13861 + switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
13862 + case 0: sprintf(tmp, "zout"); break;
13863 + case 1: sprintf(tmp, "in"); break;
13864 + case 2: sprintf(tmp, "out"); break;
13865 + case 3: sprintf(tmp, "setup"); break;
13867 + sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
13868 + strcat(sblist_to_str_buff, tmp2);
13869 + if(sb_desc->next != 0) {
13870 + sb_desc = phys_to_virt(sb_desc->next);
13875 + return sblist_to_str_buff;
13878 +char* port_status_to_str(__u16 wPortStatus) {
13879 + static char port_status_str[128];
13880 + port_status_str[0] = '\0';
13881 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
13882 + strcat(port_status_str, "connected ");
13884 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
13885 + strcat(port_status_str, "enabled ");
13887 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
13888 + strcat(port_status_str, "suspended ");
13890 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
13891 + strcat(port_status_str, "reset ");
13893 + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
13894 + strcat(port_status_str, "full-speed ");
13896 + strcat(port_status_str, "low-speed ");
13898 + return port_status_str;
13902 +char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
13903 + static char endpoint_to_str_buff[128];
13905 + int epnum = ed->bEndpointAddress & 0x0F;
13906 + int dir = ed->bEndpointAddress & 0x80;
13907 + int type = ed->bmAttributes & 0x03;
13908 + endpoint_to_str_buff[0] = '\0';
13909 + sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
13912 + sprintf(tmp, " ctrl");
13915 + sprintf(tmp, " isoc");
13918 + sprintf(tmp, " bulk");
13921 + sprintf(tmp, " intr");
13924 + strcat(endpoint_to_str_buff, tmp);
13926 + sprintf(tmp, " in");
13928 + sprintf(tmp, " out");
13930 + strcat(endpoint_to_str_buff, tmp);
13932 + return endpoint_to_str_buff;
13935 +/* Debug helper functions for Transfer Controller */
13936 +char* pipe_to_str(unsigned int pipe) {
13937 + static char pipe_to_str_buff[128];
13939 + sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
13940 + sprintf(tmp, " type:%s", str_type(pipe));
13941 + strcat(pipe_to_str_buff, tmp);
13943 + sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
13944 + strcat(pipe_to_str_buff, tmp);
13945 + sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
13946 + strcat(pipe_to_str_buff, tmp);
13947 + return pipe_to_str_buff;
13951 +#define USB_DEBUG_DESC 1
13953 +#ifdef USB_DEBUG_DESC
13954 +#define dump_in_desc(x) __dump_in_desc(x)
13955 +#define dump_sb_desc(...) __dump_sb_desc(...)
13956 +#define dump_ep_desc(x) __dump_ep_desc(x)
13957 +#define dump_ept_data(x) __dump_ept_data(x)
13959 +#define dump_in_desc(...) do {} while (0)
13960 +#define dump_sb_desc(...) do {} while (0)
13961 +#define dump_ep_desc(...) do {} while (0)
13965 +/* Uncomment this to enable massive function call trace
13966 + #define USB_DEBUG_TRACE */
13968 +#ifdef USB_DEBUG_TRACE
13969 +#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
13970 +#define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
13972 +#define DBFENTER do {} while (0)
13973 +#define DBFEXIT do {} while (0)
13976 +#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
13977 +{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
13979 +/* Most helpful debugging aid */
13980 +#define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
13983 +/***************************************************************************/
13984 +/***************************************************************************/
13985 +/* Forward declarations */
13986 +/***************************************************************************/
13987 +/***************************************************************************/
13988 +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
13989 +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
13990 +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
13991 +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
13993 +void rh_port_status_change(__u16[]);
13994 +int rh_clear_port_feature(__u8, __u16);
13995 +int rh_set_port_feature(__u8, __u16);
13996 +static void rh_disable_port(unsigned int port);
13998 +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
14001 +static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
14003 +static void tc_free_epid(struct usb_host_endpoint *ep);
14004 +static int tc_allocate_epid(void);
14005 +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
14006 +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
14009 +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
14011 +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
14013 +static inline struct urb *urb_list_first(int epid);
14014 +static inline void urb_list_add(struct urb *urb, int epid,
14016 +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
14017 +static inline void urb_list_del(struct urb *urb, int epid);
14018 +static inline void urb_list_move_last(struct urb *urb, int epid);
14019 +static inline struct urb *urb_list_next(struct urb *urb, int epid);
14021 +int create_sb_for_urb(struct urb *urb, int mem_flags);
14022 +int init_intr_urb(struct urb *urb, int mem_flags);
14024 +static inline void etrax_epid_set(__u8 index, __u32 data);
14025 +static inline void etrax_epid_clear_error(__u8 index);
14026 +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
14028 +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
14029 +static inline __u32 etrax_epid_get(__u8 index);
14031 +/* We're accessing the same register position in Etrax so
14032 + when we do full access the internal difference doesn't matter */
14033 +#define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
14034 +#define etrax_epid_iso_get(index) etrax_epid_get(index)
14037 +static void tc_dma_process_isoc_urb(struct urb *urb);
14038 +static void tc_dma_process_queue(int epid);
14039 +static void tc_dma_unlink_intr_urb(struct urb *urb);
14040 +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
14041 +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
14043 +static void tc_bulk_start_timer_func(unsigned long dummy);
14044 +static void tc_bulk_eot_timer_func(unsigned long dummy);
14047 +/*************************************************************/
14048 +/*************************************************************/
14049 +/* Host Controler Driver block */
14050 +/*************************************************************/
14051 +/*************************************************************/
14053 +/* HCD operations */
14054 +static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
14055 +static int crisv10_hcd_reset(struct usb_hcd *);
14056 +static int crisv10_hcd_start(struct usb_hcd *);
14057 +static void crisv10_hcd_stop(struct usb_hcd *);
14059 +static int crisv10_hcd_suspend(struct device *, u32, u32);
14060 +static int crisv10_hcd_resume(struct device *, u32);
14061 +#endif /* CONFIG_PM */
14062 +static int crisv10_hcd_get_frame(struct usb_hcd *);
14064 +static int tc_urb_enqueue(struct usb_hcd *, struct usb_host_endpoint *ep, struct urb *, gfp_t mem_flags);
14065 +static int tc_urb_dequeue(struct usb_hcd *, struct urb *);
14066 +static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
14068 +static int rh_status_data_request(struct usb_hcd *, char *);
14069 +static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
14072 +static int crisv10_hcd_hub_suspend(struct usb_hcd *);
14073 +static int crisv10_hcd_hub_resume(struct usb_hcd *);
14074 +#endif /* CONFIG_PM */
14075 +#ifdef CONFIG_USB_OTG
14076 +static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
14077 +#endif /* CONFIG_USB_OTG */
14079 +/* host controller driver interface */
14080 +static const struct hc_driver crisv10_hc_driver =
14082 + .description = hc_name,
14083 + .product_desc = product_desc,
14084 + .hcd_priv_size = sizeof(struct crisv10_hcd),
14086 + /* Attaching IRQ handler manualy in probe() */
14087 + /* .irq = crisv10_hcd_irq, */
14089 + .flags = HCD_USB11,
14091 + /* called to init HCD and root hub */
14092 + .reset = crisv10_hcd_reset,
14093 + .start = crisv10_hcd_start,
14095 + /* cleanly make HCD stop writing memory and doing I/O */
14096 + .stop = crisv10_hcd_stop,
14098 + /* return current frame number */
14099 + .get_frame_number = crisv10_hcd_get_frame,
14102 + /* Manage i/o requests via the Transfer Controller */
14103 + .urb_enqueue = tc_urb_enqueue,
14104 + .urb_dequeue = tc_urb_dequeue,
14106 + /* hw synch, freeing endpoint resources that urb_dequeue can't */
14107 + .endpoint_disable = tc_endpoint_disable,
14110 + /* Root Hub support */
14111 + .hub_status_data = rh_status_data_request,
14112 + .hub_control = rh_control_request,
14114 + .hub_suspend = rh_suspend_request,
14115 + .hub_resume = rh_resume_request,
14116 +#endif /* CONFIG_PM */
14117 +#ifdef CONFIG_USB_OTG
14118 + .start_port_reset = crisv10_hcd_start_port_reset,
14119 +#endif /* CONFIG_USB_OTG */
14124 + * conversion between pointers to a hcd and the corresponding
14128 +static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
14130 + return (struct crisv10_hcd *) hcd->hcd_priv;
14133 +static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
14135 + return container_of((void *) hcd, struct usb_hcd, hcd_priv);
14138 +/* check if specified port is in use */
14139 +static inline int port_in_use(unsigned int port)
14141 + return ports & (1 << port);
14144 +/* number of ports in use */
14145 +static inline unsigned int num_ports(void)
14147 + unsigned int i, num = 0;
14148 + for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
14149 + if (port_in_use(i))
14154 +/* map hub port number to the port number used internally by the HC */
14155 +static inline unsigned int map_port(unsigned int port)
14157 + unsigned int i, num = 0;
14158 + for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
14159 + if (port_in_use(i))
14160 + if (++num == port)
14165 +/* size of descriptors in slab cache */
14167 +#define MAX(x, y) ((x) > (y) ? (x) : (y))
14171 +/******************************************************************/
14172 +/* Hardware Interrupt functions */
14173 +/******************************************************************/
14175 +/* Fast interrupt handler for HC */
14176 +static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
14178 + struct usb_hcd *hcd = vcd;
14179 + struct crisv10_irq_reg reg;
14181 + unsigned long flags;
14185 + ASSERT(hcd != NULL);
14188 + /* Turn of other interrupts while handling these sensitive cases */
14189 + local_irq_save(flags);
14191 + /* Read out which interrupts that are flaged */
14192 + irq_mask = *R_USB_IRQ_MASK_READ;
14193 + reg.r_usb_irq_mask_read = irq_mask;
14195 + /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
14196 + R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
14197 + clears the ourun and perror fields of R_USB_STATUS. */
14198 + reg.r_usb_status = *R_USB_STATUS;
14200 + /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
14202 + reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
14204 + /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
14205 + port_status interrupt. */
14206 + reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
14207 + reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
14209 + /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
14210 + /* Note: the lower 11 bits contain the actual frame number, sent with each
14212 + reg.r_usb_fm_number = *R_USB_FM_NUMBER;
14214 + /* Interrupts are handled in order of priority. */
14215 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
14216 + crisv10_hcd_port_status_irq(®);
14218 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
14219 + crisv10_hcd_epid_attn_irq(®);
14221 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
14222 + crisv10_hcd_ctl_status_irq(®);
14224 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
14225 + crisv10_hcd_isoc_eof_irq(®);
14227 + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
14228 + /* Update/restart the bulk start timer since obviously the channel is
14230 + mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
14231 + /* Update/restart the bulk eot timer since we just received an bulk eot
14233 + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
14235 + /* Check for finished bulk transfers on epids */
14236 + check_finished_bulk_tx_epids(hcd, 0);
14238 + local_irq_restore(flags);
14241 + return IRQ_HANDLED;
14245 +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
14246 + struct usb_hcd *hcd = reg->hcd;
14247 + struct crisv10_urb_priv *urb_priv;
14251 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
14252 + if (test_bit(epid, (void *)®->r_usb_epid_attn)) {
14257 + if (epid == DUMMY_EPID || epid == INVALID_EPID) {
14258 + /* We definitely don't care about these ones. Besides, they are
14259 + always disabled, so any possible disabling caused by the
14260 + epid attention interrupt is irrelevant. */
14261 + warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid);
14265 + if(!epid_inuse(epid)) {
14266 + irq_err("Epid attention on epid:%d that isn't in use\n", epid);
14267 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14268 + debug_epid(epid);
14272 + /* Note that although there are separate R_USB_EPT_DATA and
14273 + R_USB_EPT_DATA_ISO registers, they are located at the same address and
14274 + are of the same size. In other words, this read should be ok for isoc
14276 + ept_data = etrax_epid_get(epid);
14277 + error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
14279 + /* Get the active URB for this epid. We blatantly assume
14280 + that only this URB could have caused the epid attention. */
14281 + urb = activeUrbList[epid];
14282 + if (urb == NULL) {
14283 + irq_err("Attention on epid:%d error:%d with no active URB.\n",
14284 + epid, error_code);
14285 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14286 + debug_epid(epid);
14290 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
14291 + ASSERT(urb_priv);
14293 + /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
14294 + if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
14296 + /* Isoc traffic doesn't have error_count_in/error_count_out. */
14297 + if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
14298 + (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
14299 + IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
14300 + /* Check if URB allready is marked for late-finish, we can get
14301 + several 3rd error for Intr traffic when a device is unplugged */
14302 + if(urb_priv->later_data == NULL) {
14304 + irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
14305 + str_dir(urb->pipe), str_type(urb->pipe),
14306 + (unsigned int)urb, urb_priv->urb_num);
14308 + tc_finish_urb_later(hcd, urb, -EPROTO);
14311 + } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
14312 + irq_warn("Perror for epid:%d\n", epid);
14313 + printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
14314 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14316 + debug_epid(epid);
14318 + if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
14319 + /* invalid ep_id */
14320 + panic("Perror because of invalid epid."
14321 + " Deconfigured too early?");
14323 + /* past eof1, near eof, zout transfer, setup transfer */
14324 + /* Dump the urb and the relevant EP descriptor. */
14325 + panic("Something wrong with DMA descriptor contents."
14326 + " Too much traffic inserted?");
14328 + } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
14329 + /* buffer ourun */
14330 + printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
14331 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14333 + debug_epid(epid);
14335 + panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
14337 + irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
14338 + str_dir(urb->pipe), str_type(urb->pipe));
14339 + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
14341 + debug_epid(epid);
14344 + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
14346 + /* Not really a protocol error, just says that the endpoint gave
14347 + a stall response. Note that error_code cannot be stall for isoc. */
14348 + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
14349 + panic("Isoc traffic cannot stall");
14352 + tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
14353 + str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
14354 + tc_finish_urb(hcd, urb, -EPIPE);
14356 + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
14358 + /* Two devices responded to a transaction request. Must be resolved
14359 + by software. FIXME: Reset ports? */
14360 + panic("Bus error for epid %d."
14361 + " Two devices responded to transaction request\n",
14364 + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
14366 + /* DMA overrun or underrun. */
14367 + irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
14368 + str_dir(urb->pipe), str_type(urb->pipe));
14370 + /* It seems that error_code = buffer_error in
14371 + R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
14372 + are the same error. */
14373 + tc_finish_urb(hcd, urb, -EPROTO);
14375 + irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
14376 + str_dir(urb->pipe), str_type(urb->pipe));
14377 + dump_ept_data(epid);
14384 +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
14386 + __u16 port_reg[USB_ROOT_HUB_PORTS];
14388 + port_reg[0] = reg->r_usb_rh_port_status_1;
14389 + port_reg[1] = reg->r_usb_rh_port_status_2;
14390 + rh_port_status_change(port_reg);
14394 +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
14398 + struct crisv10_urb_priv *urb_priv;
14402 + for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
14404 + /* Only check epids that are in use, is valid and has SB list */
14405 + if (!epid_inuse(epid) || epid == INVALID_EPID ||
14406 + TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
14407 + /* Nothing here to see. */
14410 + ASSERT(epid_isoc(epid));
14412 + /* Get the active URB for this epid (if any). */
14413 + urb = activeUrbList[epid];
14415 + isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
14418 + if(!epid_out_traffic(epid)) {
14419 + /* Sanity check. */
14420 + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
14422 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
14423 + ASSERT(urb_priv);
14425 + if (urb_priv->urb_state == NOT_STARTED) {
14426 + /* If ASAP is not set and urb->start_frame is the current frame,
14427 + start the transfer. */
14428 + if (!(urb->transfer_flags & URB_ISO_ASAP) &&
14429 + (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
14430 + /* EP should not be enabled if we're waiting for start_frame */
14431 + ASSERT((TxIsocEPList[epid].command &
14432 + IO_STATE(USB_EP_command, enable, yes)) == 0);
14434 + isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
14435 + TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
14437 + /* This urb is now active. */
14438 + urb_priv->urb_state = STARTED;
14448 +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
14450 + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
14453 + ASSERT(crisv10_hcd);
14455 + irq_dbg("ctr_status_irq, controller status: %s\n",
14456 + hcd_status_to_str(reg->r_usb_status));
14458 + /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
14459 + list for the corresponding epid? */
14460 + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
14461 + panic("USB controller got ourun.");
14463 + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
14465 + /* Before, etrax_usb_do_intr_recover was called on this epid if it was
14466 + an interrupt pipe. I don't see how re-enabling all EP descriptors
14467 + will help if there was a programming error. */
14468 + panic("USB controller got perror.");
14471 + /* Keep track of USB Controller, if it's running or not */
14472 + if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
14473 + crisv10_hcd->running = 1;
14475 + crisv10_hcd->running = 0;
14478 + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
14479 + /* We should never operate in device mode. */
14480 + panic("USB controller in device mode.");
14483 + /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
14484 + using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
14485 + set_bit(HCD_FLAG_SAW_IRQ, ®->hcd->flags);
14491 +/******************************************************************/
14492 +/* Host Controller interface functions */
14493 +/******************************************************************/
14495 +static inline void crisv10_ready_wait(void) {
14496 + volatile int timeout = 10000;
14497 + /* Check the busy bit of USB controller in Etrax */
14498 + while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
14499 + (timeout-- > 0));
14500 + if(timeout == 0) {
14501 + warn("Timeout while waiting for USB controller to be idle\n");
14505 +/* reset host controller */
14506 +static int crisv10_hcd_reset(struct usb_hcd *hcd)
14509 + hcd_dbg(hcd, "reset\n");
14512 + /* Reset the USB interface. */
14515 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
14516 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
14517 + IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
14524 +/* start host controller */
14525 +static int crisv10_hcd_start(struct usb_hcd *hcd)
14528 + hcd_dbg(hcd, "start\n");
14530 + crisv10_ready_wait();
14532 + /* Start processing of USB traffic. */
14534 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
14535 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
14536 + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
14540 + hcd->state = HC_STATE_RUNNING;
14546 +/* stop host controller */
14547 +static void crisv10_hcd_stop(struct usb_hcd *hcd)
14550 + hcd_dbg(hcd, "stop\n");
14551 + crisv10_hcd_reset(hcd);
14555 +/* return the current frame number */
14556 +static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
14560 + return (*R_USB_FM_NUMBER & 0x7ff);
14563 +#ifdef CONFIG_USB_OTG
14565 +static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
14567 + return 0; /* no-op for now */
14570 +#endif /* CONFIG_USB_OTG */
14573 +/******************************************************************/
14574 +/* Root Hub functions */
14575 +/******************************************************************/
14577 +/* root hub status */
14578 +static const struct usb_hub_status rh_hub_status =
14584 +/* root hub descriptor */
14585 +static const u8 rh_hub_descr[] =
14587 + 0x09, /* bDescLength */
14588 + 0x29, /* bDescriptorType */
14589 + USB_ROOT_HUB_PORTS, /* bNbrPorts */
14590 + 0x00, /* wHubCharacteristics */
14592 + 0x01, /* bPwrOn2pwrGood */
14593 + 0x00, /* bHubContrCurrent */
14594 + 0x00, /* DeviceRemovable */
14595 + 0xff /* PortPwrCtrlMask */
14598 +/* Actual holder of root hub status*/
14599 +struct crisv10_rh rh;
14601 +/* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
14602 +int rh_init(void) {
14604 + /* Reset port status flags */
14605 + for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
14606 + rh.wPortChange[i] = 0;
14607 + rh.wPortStatusPrev[i] = 0;
14612 +#define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
14613 + (1<<USB_PORT_FEAT_ENABLE)|\
14614 + (1<<USB_PORT_FEAT_SUSPEND)|\
14615 + (1<<USB_PORT_FEAT_RESET))
14617 +/* Handle port status change interrupt (called from bottom part interrupt) */
14618 +void rh_port_status_change(__u16 port_reg[]) {
14622 + for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
14623 + /* Xor out changes since last read, masked for important flags */
14624 + wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
14625 + /* Or changes together with (if any) saved changes */
14626 + rh.wPortChange[i] |= wChange;
14627 + /* Save new status */
14628 + rh.wPortStatusPrev[i] = port_reg[i];
14631 + rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
14632 + port_status_to_str(wChange),
14633 + port_status_to_str(port_reg[i]));
14638 +/* Construct port status change bitmap for the root hub */
14639 +static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
14641 + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
14646 + * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
14647 + * return bitmap indicating ports with status change
14650 + spin_lock(&crisv10_hcd->lock);
14651 + for (i = 1; i <= crisv10_hcd->num_ports; i++) {
14652 + if (rh.wPortChange[map_port(i)]) {
14653 + *buf |= (1 << i);
14654 + rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
14655 + port_status_to_str(rh.wPortChange[map_port(i)]),
14656 + port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
14659 + spin_unlock(&crisv10_hcd->lock);
14661 + return *buf == 0 ? 0 : 1;
14664 +/* Handle a control request for the root hub (called from hcd_driver) */
14665 +static int rh_control_request(struct usb_hcd *hcd,
14672 + struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
14677 + switch (typeReq) {
14678 + case GetHubDescriptor:
14679 + rh_dbg("GetHubDescriptor\n");
14680 + len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
14681 + memcpy(buf, rh_hub_descr, len);
14682 + buf[2] = crisv10_hcd->num_ports;
14684 + case GetHubStatus:
14685 + rh_dbg("GetHubStatus\n");
14686 + len = min_t(unsigned int, sizeof rh_hub_status, wLength);
14687 + memcpy(buf, &rh_hub_status, len);
14689 + case GetPortStatus:
14690 + if (!wIndex || wIndex > crisv10_hcd->num_ports)
14692 + rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
14693 + port_status_to_str(rh.wPortChange[map_port(wIndex)]),
14694 + port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
14695 + *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
14696 + *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
14698 + case SetHubFeature:
14699 + rh_dbg("SetHubFeature\n");
14700 + case ClearHubFeature:
14701 + rh_dbg("ClearHubFeature\n");
14702 + switch (wValue) {
14703 + case C_HUB_OVER_CURRENT:
14704 + case C_HUB_LOCAL_POWER:
14705 + rh_warn("Not implemented hub request:%d \n", typeReq);
14706 + /* not implemented */
14712 + case SetPortFeature:
14713 + if (!wIndex || wIndex > crisv10_hcd->num_ports)
14715 + if(rh_set_port_feature(map_port(wIndex), wValue))
14718 + case ClearPortFeature:
14719 + if (!wIndex || wIndex > crisv10_hcd->num_ports)
14721 + if(rh_clear_port_feature(map_port(wIndex), wValue))
14725 + rh_warn("Unknown hub request: %d\n", typeReq);
14733 +int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
14734 + __u8 bUsbCommand = 0;
14735 + switch(wFeature) {
14736 + case USB_PORT_FEAT_RESET:
14737 + rh_dbg("SetPortFeature: reset\n");
14738 + bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
14741 + case USB_PORT_FEAT_SUSPEND:
14742 + rh_dbg("SetPortFeature: suspend\n");
14743 + bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
14746 + case USB_PORT_FEAT_POWER:
14747 + rh_dbg("SetPortFeature: power\n");
14749 + case USB_PORT_FEAT_C_CONNECTION:
14750 + rh_dbg("SetPortFeature: c_connection\n");
14752 + case USB_PORT_FEAT_C_RESET:
14753 + rh_dbg("SetPortFeature: c_reset\n");
14755 + case USB_PORT_FEAT_C_OVER_CURRENT:
14756 + rh_dbg("SetPortFeature: c_over_current\n");
14760 + /* Select which port via the port_sel field */
14761 + bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
14763 + /* Make sure the controller isn't busy. */
14764 + crisv10_ready_wait();
14765 + /* Send out the actual command to the USB controller */
14766 + *R_USB_COMMAND = bUsbCommand;
14768 + /* If port reset then also bring USB controller into running state */
14769 + if(wFeature == USB_PORT_FEAT_RESET) {
14770 + /* Wait a while for controller to first become started after port reset */
14771 + udelay(12000); /* 12ms blocking wait */
14773 + /* Make sure the controller isn't busy. */
14774 + crisv10_ready_wait();
14776 + /* If all enabled ports were disabled the host controller goes down into
14777 + started mode, so we need to bring it back into the running state.
14778 + (This is safe even if it's already in the running state.) */
14780 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
14781 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
14782 + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
14787 + rh_dbg("SetPortFeature: unknown feature\n");
14793 +int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
14794 + switch(wFeature) {
14795 + case USB_PORT_FEAT_ENABLE:
14796 + rh_dbg("ClearPortFeature: enable\n");
14797 + rh_disable_port(bPort);
14799 + case USB_PORT_FEAT_SUSPEND:
14800 + rh_dbg("ClearPortFeature: suspend\n");
14802 + case USB_PORT_FEAT_POWER:
14803 + rh_dbg("ClearPortFeature: power\n");
14806 + case USB_PORT_FEAT_C_ENABLE:
14807 + rh_dbg("ClearPortFeature: c_enable\n");
14809 + case USB_PORT_FEAT_C_SUSPEND:
14810 + rh_dbg("ClearPortFeature: c_suspend\n");
14812 + case USB_PORT_FEAT_C_CONNECTION:
14813 + rh_dbg("ClearPortFeature: c_connection\n");
14815 + case USB_PORT_FEAT_C_OVER_CURRENT:
14816 + rh_dbg("ClearPortFeature: c_over_current\n");
14818 + case USB_PORT_FEAT_C_RESET:
14819 + rh_dbg("ClearPortFeature: c_reset\n");
14822 + rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
14825 + rh_dbg("ClearPortFeature: unknown feature\n");
14833 +/* Handle a suspend request for the root hub (called from hcd_driver) */
14834 +static int rh_suspend_request(struct usb_hcd *hcd)
14836 + return 0; /* no-op for now */
14839 +/* Handle a resume request for the root hub (called from hcd_driver) */
14840 +static int rh_resume_request(struct usb_hcd *hcd)
14842 + return 0; /* no-op for now */
14844 +#endif /* CONFIG_PM */
14848 +/* Wrapper function for workaround port disable registers in USB controller */
14849 +static void rh_disable_port(unsigned int port) {
14850 + volatile int timeout = 10000;
14851 + volatile char* usb_portx_disable;
14854 + usb_portx_disable = R_USB_PORT1_DISABLE;
14857 + usb_portx_disable = R_USB_PORT2_DISABLE;
14860 + /* Invalid port index */
14863 + /* Set disable flag in special register */
14864 + *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
14865 + /* Wait until not enabled anymore */
14866 + while((rh.wPortStatusPrev[port] &
14867 + IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
14868 + (timeout-- > 0));
14869 + if(timeout == 0) {
14870 + warn("Timeout while waiting for port %d to become disabled\n", port);
14872 + /* clear disable flag in special register */
14873 + *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
14874 + rh_info("Physical port %d disabled\n", port+1);
14878 +/******************************************************************/
14879 +/* Transfer Controller (TC) functions */
14880 +/******************************************************************/
14882 +/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
14884 + To adjust it dynamically we would have to get an interrupt when we reach
14885 + the end of the rx descriptor list, or when we get close to the end, and
14886 + then allocate more descriptors. */
14887 +#define NBR_OF_RX_DESC 512
14888 +#define RX_DESC_BUF_SIZE 1024
14889 +#define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
14892 +/* Local variables for Transfer Controller */
14893 +/* --------------------------------------- */
14895 +/* This is a circular (double-linked) list of the active urbs for each epid.
14896 + The head is never removed, and new urbs are linked onto the list as
14897 + urb_entry_t elements. Don't reference urb_list directly; use the wrapper
14898 + functions instead (which includes spin_locks) */
14899 +static struct list_head urb_list[NBR_OF_EPIDS];
14901 +/* Read about the need and usage of this lock in submit_ctrl_urb. */
14902 +/* Lock for URB lists for each EPID */
14903 +static spinlock_t urb_list_lock;
14905 +/* Lock for EPID array register (R_USB_EPT_x) in Etrax */
14906 +static spinlock_t etrax_epid_lock;
14908 +/* Lock for dma8 sub0 handling */
14909 +static spinlock_t etrax_dma8_sub0_lock;
14911 +/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
14912 + Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
14913 + cache aligned. */
14914 +static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
14915 +static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
14917 +/* Pointers into RxDescList. */
14918 +static volatile struct USB_IN_Desc *myNextRxDesc;
14919 +static volatile struct USB_IN_Desc *myLastRxDesc;
14921 +/* A zout transfer makes a memory access at the address of its buf pointer,
14922 + which means that setting this buf pointer to 0 will cause an access to the
14923 + flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
14924 + (depending on DMA burst size) transfer.
14925 + Instead, we set it to 1, and point it to this buffer. */
14926 +static int zout_buffer[4] __attribute__ ((aligned (4)));
14928 +/* Cache for allocating new EP and SB descriptors. */
14929 +static kmem_cache_t *usb_desc_cache;
14931 +/* Cache for the data allocated in the isoc descr top half. */
14932 +static kmem_cache_t *isoc_compl_cache;
14934 +/* Cache for the data allocated when delayed finishing of URBs */
14935 +static kmem_cache_t *later_data_cache;
14938 +/* Counter to keep track of how many Isoc EP we have sat up. Used to enable
14939 + and disable iso_eof interrupt. We only need these interrupts when we have
14940 + Isoc data endpoints (consumes CPU cycles).
14941 + FIXME: This could be more fine granular, so this interrupt is only enabled
14942 + when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
14943 +static int isoc_epid_counter;
14945 +/* Protecting wrapper functions for R_USB_EPT_x */
14946 +/* -------------------------------------------- */
14947 +static inline void etrax_epid_set(__u8 index, __u32 data) {
14948 + unsigned long flags;
14949 + spin_lock_irqsave(&etrax_epid_lock, flags);
14950 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
14952 + *R_USB_EPT_DATA = data;
14953 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
14956 +static inline void etrax_epid_clear_error(__u8 index) {
14957 + unsigned long flags;
14958 + spin_lock_irqsave(&etrax_epid_lock, flags);
14959 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
14961 + *R_USB_EPT_DATA &=
14962 + ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
14963 + IO_MASK(R_USB_EPT_DATA, error_count_out) |
14964 + IO_MASK(R_USB_EPT_DATA, error_code));
14965 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
14968 +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
14970 + unsigned long flags;
14971 + spin_lock_irqsave(&etrax_epid_lock, flags);
14972 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
14975 + *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
14976 + *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
14978 + *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
14979 + *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
14981 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
14984 +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
14985 + unsigned long flags;
14987 + spin_lock_irqsave(&etrax_epid_lock, flags);
14988 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
14991 + toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
14993 + toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
14995 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
15000 +static inline __u32 etrax_epid_get(__u8 index) {
15001 + unsigned long flags;
15003 + spin_lock_irqsave(&etrax_epid_lock, flags);
15004 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
15006 + data = *R_USB_EPT_DATA;
15007 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
15014 +/* Main functions for Transfer Controller */
15015 +/* -------------------------------------- */
15017 +/* Init structs, memories and lists used by Transfer Controller */
15018 +int tc_init(struct usb_hcd *hcd) {
15020 + /* Clear software state info for all epids */
15021 + memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
15023 + /* Set Invalid and Dummy as being in use and disabled */
15024 + epid_state[INVALID_EPID].inuse = 1;
15025 + epid_state[DUMMY_EPID].inuse = 1;
15026 + epid_state[INVALID_EPID].disabled = 1;
15027 + epid_state[DUMMY_EPID].disabled = 1;
15029 + /* Clear counter for how many Isoc epids we have sat up */
15030 + isoc_epid_counter = 0;
15032 + /* Initialize the urb list by initiating a head for each list.
15033 + Also reset list hodling active URB for each epid */
15034 + for (i = 0; i < NBR_OF_EPIDS; i++) {
15035 + INIT_LIST_HEAD(&urb_list[i]);
15036 + activeUrbList[i] = NULL;
15039 + /* Init lock for URB lists */
15040 + spin_lock_init(&urb_list_lock);
15041 + /* Init lock for Etrax R_USB_EPT register */
15042 + spin_lock_init(&etrax_epid_lock);
15043 + /* Init lock for Etrax dma8 sub0 handling */
15044 + spin_lock_init(&etrax_dma8_sub0_lock);
15046 + /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
15048 + /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
15049 + allocate SB descriptors from this cache. This is ok since
15050 + sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
15051 + usb_desc_cache = kmem_cache_create("usb_desc_cache",
15052 + sizeof(struct USB_EP_Desc), 0,
15053 + SLAB_HWCACHE_ALIGN, 0, 0);
15054 + if(usb_desc_cache == NULL) {
15058 + /* Create slab cache for speedy allocation of memory for isoc bottom-half
15059 + interrupt handling */
15060 + isoc_compl_cache =
15061 + kmem_cache_create("isoc_compl_cache",
15062 + sizeof(struct crisv10_isoc_complete_data),
15063 + 0, SLAB_HWCACHE_ALIGN, 0, 0);
15064 + if(isoc_compl_cache == NULL) {
15068 + /* Create slab cache for speedy allocation of memory for later URB finish
15070 + later_data_cache =
15071 + kmem_cache_create("later_data_cache",
15072 + sizeof(struct urb_later_data),
15073 + 0, SLAB_HWCACHE_ALIGN, 0, 0);
15074 + if(later_data_cache == NULL) {
15079 + /* Initiate the bulk start timer. */
15080 + init_timer(&bulk_start_timer);
15081 + bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
15082 + bulk_start_timer.function = tc_bulk_start_timer_func;
15083 + add_timer(&bulk_start_timer);
15086 + /* Initiate the bulk eot timer. */
15087 + init_timer(&bulk_eot_timer);
15088 + bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
15089 + bulk_eot_timer.function = tc_bulk_eot_timer_func;
15090 + bulk_eot_timer.data = (unsigned long)hcd;
15091 + add_timer(&bulk_eot_timer);
15096 +/* Uninitialize all resources used by Transfer Controller */
15097 +void tc_destroy(void) {
15099 + /* Destroy all slab cache */
15100 + kmem_cache_destroy(usb_desc_cache);
15101 + kmem_cache_destroy(isoc_compl_cache);
15102 + kmem_cache_destroy(later_data_cache);
15104 + /* Remove timers */
15105 + del_timer(&bulk_start_timer);
15106 + del_timer(&bulk_eot_timer);
15109 +static void restart_dma8_sub0(void) {
15110 + unsigned long flags;
15111 + spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
15112 + /* Verify that the dma is not running */
15113 + if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
15114 + struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
15115 + while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
15116 + ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
15118 + /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID.
15119 + * ep->next is already a physical address; no need for a virt_to_phys. */
15120 + *R_DMA_CH8_SUB0_EP = ep->next;
15121 + /* Restart the DMA */
15122 + *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
15124 + spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
15127 +/* queue an URB with the transfer controller (called from hcd_driver) */
15128 +static int tc_urb_enqueue(struct usb_hcd *hcd,
15129 + struct usb_host_endpoint *ep,
15131 + gfp_t mem_flags) {
15136 + unsigned long flags;
15137 + struct crisv10_urb_priv *urb_priv;
15138 + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
15141 + if(!(crisv10_hcd->running)) {
15142 + /* The USB Controller is not running, probably because no device is
15143 + attached. No idea to enqueue URBs then */
15144 + tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
15145 + (unsigned int)urb);
15149 + maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
15150 + /* Special case check for In Isoc transfers. Specification states that each
15151 + In Isoc transfer consists of one packet and therefore it should fit into
15152 + the transfer-buffer of an URB.
15153 + We do the check here to be sure (an invalid scenario can be produced with
15154 + parameters to the usbtest suite) */
15155 + if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
15156 + (urb->transfer_buffer_length < maxpacket)) {
15157 + tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
15158 + return -EMSGSIZE;
15161 + /* Check if there is enough bandwidth for periodic transfer */
15162 + if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
15163 + /* only check (and later claim) if not already claimed */
15164 + if (urb->bandwidth == 0) {
15165 + bustime = usb_check_bandwidth(urb->dev, urb);
15166 + if (bustime < 0) {
15167 + tc_err("Not enough periodic bandwidth\n");
15173 + /* Check if there is a epid for URBs destination, if not this function
15175 + epid = tc_setup_epid(ep, urb, mem_flags);
15177 + tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
15182 + if(urb == activeUrbList[epid]) {
15183 + tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
15187 + if(urb_list_entry(urb, epid)) {
15188 + tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
15192 + /* If we actively have flaged endpoint as disabled then refuse submition */
15193 + if(epid_state[epid].disabled) {
15197 + /* Allocate and init HC-private data for URB */
15198 + if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
15202 + urb_priv = urb->hcpriv;
15204 + tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
15205 + (unsigned int)urb, urb_priv->urb_num, epid,
15206 + pipe_to_str(urb->pipe), urb->transfer_buffer_length);
15208 + /* Create and link SBs required for this URB */
15209 + retval = create_sb_for_urb(urb, mem_flags);
15210 + if(retval != 0) {
15211 + tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
15212 + urb_priv->urb_num);
15213 + urb_priv_free(hcd, urb);
15218 + /* Init intr EP pool if this URB is a INTR transfer. This pool is later
15219 + used when inserting EPs in the TxIntrEPList. We do the alloc here
15220 + so we can't run out of memory later */
15221 + if(usb_pipeint(urb->pipe)) {
15222 + retval = init_intr_urb(urb, mem_flags);
15223 + if(retval != 0) {
15224 + tc_warn("Failed to init Intr URB\n");
15225 + urb_priv_free(hcd, urb);
15231 + /* Disable other access when inserting USB */
15232 + local_irq_save(flags);
15234 + /* Claim bandwidth, if needed */
15236 + usb_claim_bandwidth(urb->dev, urb, bustime, 0);
15239 + /* Add URB to EP queue */
15240 + urb_list_add(urb, epid, mem_flags);
15242 + if(usb_pipeisoc(urb->pipe)) {
15243 + /* Special processing of Isoc URBs. */
15244 + tc_dma_process_isoc_urb(urb);
15246 + /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
15247 + tc_dma_process_queue(epid);
15250 + local_irq_restore(flags);
15256 +/* remove an URB from the transfer controller queues (called from hcd_driver)*/
15257 +static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) {
15258 + struct crisv10_urb_priv *urb_priv;
15259 + unsigned long flags;
15263 + /* Disable interrupts here since a descriptor interrupt for the isoc epid
15264 + will modify the sb list. This could possibly be done more granular, but
15265 + urb_dequeue should not be used frequently anyway.
15267 + local_irq_save(flags);
15269 + urb_priv = urb->hcpriv;
15272 + /* This happens if a device driver calls unlink on an urb that
15273 + was never submitted (lazy driver) or if the urb was completed
15274 + while dequeue was being called. */
15275 + tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
15276 + local_irq_restore(flags);
15279 + epid = urb_priv->epid;
15281 + tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
15282 + (urb == activeUrbList[epid]) ? "active" : "queued",
15283 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15284 + str_type(urb->pipe), epid, urb->status,
15285 + (urb_priv->later_data) ? "later-sched" : "");
15287 + /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
15288 + that isn't active can be dequeued by just removing it from the queue */
15289 + if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
15290 + usb_pipeint(urb->pipe)) {
15292 + /* Check if URB haven't gone further than the queue */
15293 + if(urb != activeUrbList[epid]) {
15294 + ASSERT(urb_priv->later_data == NULL);
15295 + tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
15296 + " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
15297 + str_dir(urb->pipe), str_type(urb->pipe), epid);
15299 + /* Finish the URB with error status from USB core */
15300 + tc_finish_urb(hcd, urb, urb->status);
15301 + local_irq_restore(flags);
15306 + /* Set URB status to Unlink for handling when interrupt comes. */
15307 + urb_priv->urb_state = UNLINK;
15309 + /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
15310 + switch(usb_pipetype(urb->pipe)) {
15312 + /* Check if EP still is enabled */
15313 + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15314 + /* The EP was enabled, disable it. */
15315 + TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15317 + /* Kicking dummy list out of the party. */
15318 + TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
15320 + case PIPE_CONTROL:
15321 + /* Check if EP still is enabled */
15322 + if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15323 + /* The EP was enabled, disable it. */
15324 + TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15327 + case PIPE_ISOCHRONOUS:
15328 + /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
15329 + finish_isoc_urb(). Because there might the case when URB is dequeued
15330 + but there are other valid URBs waiting */
15332 + /* Check if In Isoc EP still is enabled */
15333 + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15334 + /* The EP was enabled, disable it. */
15335 + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15338 + case PIPE_INTERRUPT:
15339 + /* Special care is taken for interrupt URBs. EPs are unlinked in
15346 + /* Asynchronous unlink, finish the URB later from scheduled or other
15347 + event (data finished, error) */
15348 + tc_finish_urb_later(hcd, urb, urb->status);
15350 + local_irq_restore(flags);
15356 +static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
15357 + volatile int timeout = 10000;
15359 + struct crisv10_urb_priv* urb_priv;
15360 + unsigned long flags;
15362 + volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
15363 + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
15364 + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
15366 + int type = epid_state[epid].type;
15368 + /* Setting this flag will cause enqueue() to return -ENOENT for new
15369 + submitions on this endpoint and finish_urb() wont process queue further */
15370 + epid_state[epid].disabled = 1;
15374 + /* Check if EP still is enabled */
15375 + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15376 + /* The EP was enabled, disable it. */
15377 + TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15378 + tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
15380 + /* Do busy-wait until DMA not using this EP descriptor anymore */
15381 + while((*R_DMA_CH8_SUB0_EP ==
15382 + virt_to_phys(&TxBulkEPList[epid])) &&
15383 + (timeout-- > 0));
15384 + if(timeout == 0) {
15385 + warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
15386 + " epid:%d\n", epid);
15391 + case PIPE_CONTROL:
15392 + /* Check if EP still is enabled */
15393 + if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15394 + /* The EP was enabled, disable it. */
15395 + TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15396 + tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
15398 + /* Do busy-wait until DMA not using this EP descriptor anymore */
15399 + while((*R_DMA_CH8_SUB1_EP ==
15400 + virt_to_phys(&TxCtrlEPList[epid])) &&
15401 + (timeout-- > 0));
15402 + if(timeout == 0) {
15403 + warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
15404 + " epid:%d\n", epid);
15409 + case PIPE_INTERRUPT:
15410 + local_irq_save(flags);
15411 + /* Disable all Intr EPs belonging to epid */
15412 + first_ep = &TxIntrEPList[0];
15413 + curr_ep = first_ep;
15415 + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
15416 + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
15418 + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
15420 + curr_ep = phys_to_virt(curr_ep->next);
15421 + } while (curr_ep != first_ep);
15423 + local_irq_restore(flags);
15426 + case PIPE_ISOCHRONOUS:
15427 + /* Check if EP still is enabled */
15428 + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15429 + tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
15430 + /* The EP was enabled, disable it. */
15431 + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15433 + while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
15434 + (timeout-- > 0));
15435 + if(timeout == 0) {
15436 + warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
15437 + " epid:%d\n", epid);
15443 + local_irq_save(flags);
15445 + /* Finish if there is active URB for this endpoint */
15446 + if(activeUrbList[epid] != NULL) {
15447 + urb = activeUrbList[epid];
15448 + urb_priv = urb->hcpriv;
15449 + ASSERT(urb_priv);
15450 + tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
15451 + (urb == activeUrbList[epid]) ? "active" : "queued",
15452 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15453 + str_type(urb->pipe), epid, urb->status,
15454 + (urb_priv->later_data) ? "later-sched" : "");
15456 + tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
15457 + ASSERT(activeUrbList[epid] == NULL);
15460 + /* Finish any queued URBs for this endpoint. There won't be any resubmitions
15461 + because epid_disabled causes enqueue() to fail for this endpoint */
15462 + while((urb = urb_list_first(epid)) != NULL) {
15463 + urb_priv = urb->hcpriv;
15464 + ASSERT(urb_priv);
15466 + tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
15467 + (urb == activeUrbList[epid]) ? "active" : "queued",
15468 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15469 + str_type(urb->pipe), epid, urb->status,
15470 + (urb_priv->later_data) ? "later-sched" : "");
15472 + tc_finish_urb(hcd, urb, -ENOENT);
15474 + epid_state[epid].disabled = 0;
15475 + local_irq_restore(flags);
15478 +/* free resources associated with an endpoint (called from hcd_driver) */
15479 +static void tc_endpoint_disable(struct usb_hcd *hcd,
15480 + struct usb_host_endpoint *ep) {
15482 + /* Only free epid if it has been allocated. We get two endpoint_disable
15483 + requests for ctrl endpoints so ignore the second one */
15484 + if(ep->hcpriv != NULL) {
15485 + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
15486 + int epid = ep_priv->epid;
15487 + tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
15488 + (unsigned int)ep, (unsigned int)ep->hcpriv,
15489 + endpoint_to_str(&(ep->desc)), epid);
15491 + tc_sync_finish_epid(hcd, epid);
15493 + ASSERT(activeUrbList[epid] == NULL);
15494 + ASSERT(list_empty(&urb_list[epid]));
15496 + tc_free_epid(ep);
15498 + tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
15499 + (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
15504 +static void tc_finish_urb_later_proc(void *data) {
15505 + unsigned long flags;
15506 + struct urb_later_data* uld = (struct urb_later_data*)data;
15507 + local_irq_save(flags);
15508 + if(uld->urb == NULL) {
15509 + late_dbg("Later finish of URB = NULL (allready finished)\n");
15511 + struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
15512 + ASSERT(urb_priv);
15513 + if(urb_priv->urb_num == uld->urb_num) {
15514 + late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
15515 + urb_priv->urb_num);
15516 + if(uld->status != uld->urb->status) {
15517 + errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
15518 + uld->urb->status, uld->status);
15520 + if(uld != urb_priv->later_data) {
15521 + panic("Scheduled uld not same as URBs uld\n");
15523 + tc_finish_urb(uld->hcd, uld->urb, uld->status);
15525 + late_warn("Ignoring later finish of URB:0x%x[%d]"
15526 + ", urb_num doesn't match current URB:0x%x[%d]",
15527 + (unsigned int)(uld->urb), uld->urb_num,
15528 + (unsigned int)(uld->urb), urb_priv->urb_num);
15531 + local_irq_restore(flags);
15532 + kmem_cache_free(later_data_cache, uld);
15535 +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
15537 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
15538 + struct urb_later_data* uld;
15540 + ASSERT(urb_priv);
15542 + if(urb_priv->later_data != NULL) {
15543 + /* Later-finish allready scheduled for this URB, just update status to
15544 + return when finishing later */
15545 + errno_dbg("Later-finish schedule change URB status:%d with new"
15546 + " status:%d\n", urb_priv->later_data->status, status);
15548 + urb_priv->later_data->status = status;
15552 + uld = kmem_cache_alloc(later_data_cache, SLAB_ATOMIC);
15557 + uld->urb_num = urb_priv->urb_num;
15558 + uld->status = status;
15560 + INIT_WORK(&uld->ws, tc_finish_urb_later_proc, uld);
15561 + urb_priv->later_data = uld;
15563 + /* Schedule the finishing of the URB to happen later */
15564 + schedule_delayed_work(&uld->ws, LATER_TIMER_DELAY);
15567 +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
15570 +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
15571 + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
15572 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
15578 + ASSERT(urb_priv != NULL);
15579 + epid = urb_priv->epid;
15580 + urb_num = urb_priv->urb_num;
15582 + if(urb != activeUrbList[epid]) {
15583 + if(urb_list_entry(urb, epid)) {
15584 + /* Remove this URB from the list. Only happens when URB are finished
15585 + before having been processed (dequeing) */
15586 + urb_list_del(urb, epid);
15588 + tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
15589 + " epid:%d\n", (unsigned int)urb, urb_num, epid);
15593 + /* Cancel any pending later-finish of this URB */
15594 + if(urb_priv->later_data) {
15595 + urb_priv->later_data->urb = NULL;
15598 + /* For an IN pipe, we always set the actual length, regardless of whether
15599 + there was an error or not (which means the device driver can use the data
15600 + if it wants to). */
15601 + if(usb_pipein(urb->pipe)) {
15602 + urb->actual_length = urb_priv->rx_offset;
15604 + /* Set actual_length for OUT urbs also; the USB mass storage driver seems
15606 + if (status == 0 && urb->status == -EINPROGRESS) {
15607 + urb->actual_length = urb->transfer_buffer_length;
15609 + /* We wouldn't know of any partial writes if there was an error. */
15610 + urb->actual_length = 0;
15615 + /* URB status mangling */
15616 + if(urb->status == -EINPROGRESS) {
15617 + /* The USB core hasn't changed the status, let's set our finish status */
15618 + urb->status = status;
15620 + if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
15621 + usb_pipein(urb->pipe) &&
15622 + (urb->actual_length != urb->transfer_buffer_length)) {
15623 + /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
15624 + max length) is to be treated as an error. */
15625 + errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
15626 + " data:%d\n", (unsigned int)urb, urb_num,
15627 + urb->actual_length);
15628 + urb->status = -EREMOTEIO;
15631 + if(urb_priv->urb_state == UNLINK) {
15632 + /* URB has been requested to be unlinked asynchronously */
15633 + urb->status = -ECONNRESET;
15634 + errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
15635 + (unsigned int)urb, urb_num, urb->status);
15638 + /* The USB Core wants to signal some error via the URB, pass it through */
15641 + /* use completely different finish function for Isoc URBs */
15642 + if(usb_pipeisoc(urb->pipe)) {
15643 + tc_finish_isoc_urb(hcd, urb, status);
15647 + /* Do special unlinking of EPs for Intr traffic */
15648 + if(usb_pipeint(urb->pipe)) {
15649 + tc_dma_unlink_intr_urb(urb);
15652 + /* Release allocated bandwidth for periodic transfers */
15653 + if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
15654 + usb_release_bandwidth(urb->dev, urb, 0);
15656 + /* This URB is active on EP */
15657 + if(urb == activeUrbList[epid]) {
15658 + /* We need to fiddle with the toggle bits because the hardware doesn't do
15660 + toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
15661 + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
15662 + usb_pipeout(urb->pipe), toggle);
15664 + /* Checks for Ctrl and Bulk EPs */
15665 + switch(usb_pipetype(urb->pipe)) {
15667 + /* Check so Bulk EP realy is disabled before finishing active URB */
15668 + ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
15669 + IO_STATE(USB_EP_command, enable, no));
15670 + /* Disable sub-pointer for EP to avoid next tx_interrupt() to
15671 + process Bulk EP. */
15672 + TxBulkEPList[epid].sub = 0;
15673 + /* No need to wait for the DMA before changing the next pointer.
15674 + The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
15675 + the last one (INVALID_EPID) for actual traffic. */
15676 + TxBulkEPList[epid].next =
15677 + virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
15679 + case PIPE_CONTROL:
15680 + /* Check so Ctrl EP realy is disabled before finishing active URB */
15681 + ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
15682 + IO_STATE(USB_EP_command, enable, no));
15683 + /* Disable sub-pointer for EP to avoid next tx_interrupt() to
15684 + process Ctrl EP. */
15685 + TxCtrlEPList[epid].sub = 0;
15690 + /* Free HC-private URB data*/
15691 + urb_priv_free(hcd, urb);
15693 + if(urb->status) {
15694 + errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
15695 + (unsigned int)urb, urb_num, str_dir(urb->pipe),
15696 + str_type(urb->pipe), urb->actual_length, urb->status);
15698 + tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
15699 + (unsigned int)urb, urb_num, str_dir(urb->pipe),
15700 + str_type(urb->pipe), urb->actual_length, urb->status);
15703 + /* If we just finished an active URB, clear active pointer. */
15704 + if (urb == activeUrbList[epid]) {
15705 + /* Make URB not active on EP anymore */
15706 + activeUrbList[epid] = NULL;
15708 + if(urb->status == 0) {
15709 + /* URB finished sucessfully, process queue to see if there are any more
15710 + URBs waiting before we call completion function.*/
15711 + if(crisv10_hcd->running) {
15712 + /* Only process queue if USB controller is running */
15713 + tc_dma_process_queue(epid);
15715 + tc_warn("No processing of queue for epid:%d, USB Controller not"
15716 + " running\n", epid);
15721 + /* Hand the URB from HCD to its USB device driver, using its completion
15723 + usb_hcd_giveback_urb (hcd, urb);
15725 + /* Check the queue once more if the URB returned with error, because we
15726 + didn't do it before the completion function because the specification
15727 + states that the queue should not restart until all it's unlinked
15728 + URBs have been fully retired, with the completion functions run */
15729 + if(crisv10_hcd->running) {
15730 + /* Only process queue if USB controller is running */
15731 + tc_dma_process_queue(epid);
15733 + tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
15740 +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
15742 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
15744 + volatile int timeout = 10000;
15746 + ASSERT(urb_priv);
15747 + epid = urb_priv->epid;
15749 + ASSERT(usb_pipeisoc(urb->pipe));
15751 + /* Set that all isoc packets have status and length set before
15752 + completing the urb. */
15753 + for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
15754 + urb->iso_frame_desc[i].actual_length = 0;
15755 + urb->iso_frame_desc[i].status = -EPROTO;
15758 + /* Check if the URB is currently active (done or error) */
15759 + if(urb == activeUrbList[epid]) {
15760 + /* Check if there are another In Isoc URB queued for this epid */
15761 + if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
15762 + /* Move it from queue to active and mark it started so Isoc transfers
15763 + won't be interrupted.
15764 + All Isoc URBs data transfers are already added to DMA lists so we
15765 + don't have to insert anything in DMA lists here. */
15766 + activeUrbList[epid] = urb_list_first(epid);
15767 + ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
15769 + urb_list_del(activeUrbList[epid], epid);
15771 + if(urb->status) {
15772 + errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
15773 + " status:%d, new waiting URB:0x%x[%d]\n",
15774 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15775 + str_type(urb->pipe), urb_priv->isoc_packet_counter,
15776 + urb->number_of_packets, urb->status,
15777 + (unsigned int)activeUrbList[epid],
15778 + ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
15781 + } else { /* No other URB queued for this epid */
15782 + if(urb->status) {
15783 + errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
15784 + " status:%d, no new URB waiting\n",
15785 + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
15786 + str_type(urb->pipe), urb_priv->isoc_packet_counter,
15787 + urb->number_of_packets, urb->status);
15790 + /* Check if EP is still enabled, then shut it down. */
15791 + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
15792 + isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
15794 + /* Should only occur for In Isoc EPs where SB isn't consumed. */
15795 + ASSERT(usb_pipein(urb->pipe));
15797 + /* Disable it and wait for it to stop */
15798 + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
15800 + /* Ah, the luxury of busy-wait. */
15801 + while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
15802 + (timeout-- > 0));
15803 + if(timeout == 0) {
15804 + warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid);
15808 + /* Unlink SB to say that epid is finished. */
15809 + TxIsocEPList[epid].sub = 0;
15810 + TxIsocEPList[epid].hw_len = 0;
15812 + /* No URB active for EP anymore */
15813 + activeUrbList[epid] = NULL;
15815 + } else { /* Finishing of not active URB (queued up with SBs thought) */
15816 + isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
15817 + " SB queued but not active\n",
15818 + (unsigned int)urb, str_dir(urb->pipe),
15819 + urb_priv->isoc_packet_counter, urb->number_of_packets,
15821 + if(usb_pipeout(urb->pipe)) {
15822 + /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
15823 + struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
15825 + iter_sb = TxIsocEPList[epid].sub ?
15826 + phys_to_virt(TxIsocEPList[epid].sub) : 0;
15829 + /* SB that is linked before this URBs first SB */
15830 + while (iter_sb && (iter_sb != urb_priv->first_sb)) {
15831 + prev_sb = iter_sb;
15832 + iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
15835 + if (iter_sb == 0) {
15836 + /* Unlink of the URB currently being transmitted. */
15838 + iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
15841 + while (iter_sb && (iter_sb != urb_priv->last_sb)) {
15842 + iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
15846 + next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
15848 + /* This should only happen if the DMA has completed
15849 + processing the SB list for this EP while interrupts
15851 + isoc_dbg("Isoc urb not found, already sent?\n");
15855 + prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
15857 + TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
15862 + /* Free HC-private URB data*/
15863 + urb_priv_free(hcd, urb);
15865 + usb_release_bandwidth(urb->dev, urb, 0);
15867 + /* Hand the URB from HCD to its USB device driver, using its completion
15869 + usb_hcd_giveback_urb (hcd, urb);
15872 +static __u32 urb_num = 0;
15874 +/* allocate and initialize URB private data */
15875 +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
15877 + struct crisv10_urb_priv *urb_priv;
15879 + urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
15882 + memset(urb_priv, 0, sizeof *urb_priv);
15884 + urb_priv->epid = epid;
15885 + urb_priv->urb_state = NOT_STARTED;
15887 + urb->hcpriv = urb_priv;
15888 + /* Assign URB a sequence number, and increment counter */
15889 + urb_priv->urb_num = urb_num;
15894 +/* free URB private data */
15895 +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
15897 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
15898 + ASSERT(urb_priv != 0);
15900 + /* Check it has any SBs linked that needs to be freed*/
15901 + if(urb_priv->first_sb != NULL) {
15902 + struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
15904 + first_sb = urb_priv->first_sb;
15905 + last_sb = urb_priv->last_sb;
15907 + while(first_sb != last_sb) {
15908 + next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
15909 + kmem_cache_free(usb_desc_cache, first_sb);
15910 + first_sb = next_sb;
15913 + kmem_cache_free(usb_desc_cache, last_sb);
15917 + /* Check if it has any EPs in its Intr pool that also needs to be freed */
15918 + if(urb_priv->intr_ep_pool_length > 0) {
15919 + for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
15920 + kfree(urb_priv->intr_ep_pool[i]);
15923 + tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
15924 + urb_priv->intr_ep_pool_length, (unsigned int)urb);
15929 + urb->hcpriv = NULL;
15932 +static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
15933 + struct crisv10_ep_priv *ep_priv;
15935 + ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
15938 + memset(ep_priv, 0, sizeof *ep_priv);
15940 + ep->hcpriv = ep_priv;
15944 +static void ep_priv_free(struct usb_host_endpoint *ep) {
15945 + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
15948 + ep->hcpriv = NULL;
15951 +/* EPID handling functions, managing EP-list in Etrax through wrappers */
15952 +/* ------------------------------------------------------------------- */
15954 +/* Sets up a new EPID for an endpoint or returns existing if found */
15955 +static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
15958 + char devnum, endpoint, out_traffic, slow;
15961 + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
15965 + /* Check if a valid epid already is setup for this endpoint */
15966 + if(ep_priv != NULL) {
15967 + return ep_priv->epid;
15970 + /* We must find and initiate a new epid for this urb. */
15971 + epid = tc_allocate_epid();
15973 + if (epid == -1) {
15974 + /* Failed to allocate a new epid. */
15979 + /* We now have a new epid to use. Claim it. */
15980 + epid_state[epid].inuse = 1;
15982 + /* Init private data for new endpoint */
15983 + if(ep_priv_create(ep, mem_flags) != 0) {
15986 + ep_priv = ep->hcpriv;
15987 + ep_priv->epid = epid;
15989 + devnum = usb_pipedevice(urb->pipe);
15990 + endpoint = usb_pipeendpoint(urb->pipe);
15991 + slow = (urb->dev->speed == USB_SPEED_LOW);
15992 + maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
15994 + if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
15995 + /* We want both IN and OUT control traffic to be put on the same
15999 + out_traffic = usb_pipeout(urb->pipe);
16002 + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
16003 + epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
16004 + /* FIXME: Change any to the actual port? */
16005 + IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
16006 + IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
16007 + IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
16008 + IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
16009 + etrax_epid_iso_set(epid, epid_data);
16011 + epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
16012 + IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
16013 + /* FIXME: Change any to the actual port? */
16014 + IO_STATE(R_USB_EPT_DATA, port, any) |
16015 + IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
16016 + IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
16017 + IO_FIELD(R_USB_EPT_DATA, dev, devnum);
16018 + etrax_epid_set(epid, epid_data);
16021 + epid_state[epid].out_traffic = out_traffic;
16022 + epid_state[epid].type = usb_pipetype(urb->pipe);
16024 + tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
16025 + (unsigned int)ep, epid, devnum, endpoint, maxlen,
16026 + str_type(urb->pipe), out_traffic ? "out" : "in",
16027 + slow ? "low" : "full");
16029 + /* Enable Isoc eof interrupt if we set up the first Isoc epid */
16030 + if(usb_pipeisoc(urb->pipe)) {
16031 + isoc_epid_counter++;
16032 + if(isoc_epid_counter == 1) {
16033 + isoc_warn("Enabled Isoc eof interrupt\n");
16034 + *R_USB_IRQ_MASK_SET |= IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
16042 +static void tc_free_epid(struct usb_host_endpoint *ep) {
16043 + unsigned long flags;
16044 + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
16046 + volatile int timeout = 10000;
16050 + if (ep_priv == NULL) {
16051 + tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
16056 + epid = ep_priv->epid;
16058 + /* Disable Isoc eof interrupt if we free the last Isoc epid */
16059 + if(epid_isoc(epid)) {
16060 + ASSERT(isoc_epid_counter > 0);
16061 + isoc_epid_counter--;
16062 + if(isoc_epid_counter == 0) {
16063 + *R_USB_IRQ_MASK_SET &= ~IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
16064 + isoc_warn("Disabled Isoc eof interrupt\n");
16068 + /* Take lock manualy instead of in epid_x_x wrappers,
16069 + because we need to be polling here */
16070 + spin_lock_irqsave(&etrax_epid_lock, flags);
16072 + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
16074 + while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
16075 + (timeout-- > 0));
16076 + if(timeout == 0) {
16077 + warn("Timeout while waiting for epid:%d to drop hold\n", epid);
16079 + /* This will, among other things, set the valid field to 0. */
16080 + *R_USB_EPT_DATA = 0;
16081 + spin_unlock_irqrestore(&etrax_epid_lock, flags);
16083 + /* Free resource in software state info list */
16084 + epid_state[epid].inuse = 0;
16086 + /* Free private endpoint data */
16087 + ep_priv_free(ep);
16092 +static int tc_allocate_epid(void) {
16095 + for (i = 0; i < NBR_OF_EPIDS; i++) {
16096 + if (!epid_inuse(i)) {
16102 + tc_warn("Found no free epids\n");
16108 +/* Wrappers around the list functions (include/linux/list.h). */
16109 +/* ---------------------------------------------------------- */
16110 +static inline int __urb_list_empty(int epid) {
16112 + retval = list_empty(&urb_list[epid]);
16116 +/* Returns first urb for this epid, or NULL if list is empty. */
16117 +static inline struct urb *urb_list_first(int epid) {
16118 + unsigned long flags;
16119 + struct urb *first_urb = 0;
16120 + spin_lock_irqsave(&urb_list_lock, flags);
16121 + if (!__urb_list_empty(epid)) {
16122 + /* Get the first urb (i.e. head->next). */
16123 + urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
16124 + first_urb = urb_entry->urb;
16126 + spin_unlock_irqrestore(&urb_list_lock, flags);
16127 + return first_urb;
16130 +/* Adds an urb_entry last in the list for this epid. */
16131 +static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
16132 + unsigned long flags;
16133 + urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
16134 + ASSERT(urb_entry);
16136 + urb_entry->urb = urb;
16137 + spin_lock_irqsave(&urb_list_lock, flags);
16138 + list_add_tail(&urb_entry->list, &urb_list[epid]);
16139 + spin_unlock_irqrestore(&urb_list_lock, flags);
16142 +/* Search through the list for an element that contains this urb. (The list
16143 + is expected to be short and the one we are about to delete will often be
16144 + the first in the list.)
16145 + Should be protected by spin_locks in calling function */
16146 +static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
16147 + struct list_head *entry;
16148 + struct list_head *tmp;
16149 + urb_entry_t *urb_entry;
16151 + list_for_each_safe(entry, tmp, &urb_list[epid]) {
16152 + urb_entry = list_entry(entry, urb_entry_t, list);
16153 + ASSERT(urb_entry);
16154 + ASSERT(urb_entry->urb);
16156 + if (urb_entry->urb == urb) {
16157 + return urb_entry;
16163 +/* Same function as above but for global use. Protects list by spinlock */
16164 +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
16165 + unsigned long flags;
16166 + urb_entry_t *urb_entry;
16167 + spin_lock_irqsave(&urb_list_lock, flags);
16168 + urb_entry = __urb_list_entry(urb, epid);
16169 + spin_unlock_irqrestore(&urb_list_lock, flags);
16170 + return (urb_entry);
16173 +/* Delete an urb from the list. */
16174 +static inline void urb_list_del(struct urb *urb, int epid) {
16175 + unsigned long flags;
16176 + urb_entry_t *urb_entry;
16178 + /* Delete entry and free. */
16179 + spin_lock_irqsave(&urb_list_lock, flags);
16180 + urb_entry = __urb_list_entry(urb, epid);
16181 + ASSERT(urb_entry);
16183 + list_del(&urb_entry->list);
16184 + spin_unlock_irqrestore(&urb_list_lock, flags);
16185 + kfree(urb_entry);
16188 +/* Move an urb to the end of the list. */
16189 +static inline void urb_list_move_last(struct urb *urb, int epid) {
16190 + unsigned long flags;
16191 + urb_entry_t *urb_entry;
16193 + spin_lock_irqsave(&urb_list_lock, flags);
16194 + urb_entry = __urb_list_entry(urb, epid);
16195 + ASSERT(urb_entry);
16197 + list_del(&urb_entry->list);
16198 + list_add_tail(&urb_entry->list, &urb_list[epid]);
16199 + spin_unlock_irqrestore(&urb_list_lock, flags);
16202 +/* Get the next urb in the list. */
16203 +static inline struct urb *urb_list_next(struct urb *urb, int epid) {
16204 + unsigned long flags;
16205 + urb_entry_t *urb_entry;
16207 + spin_lock_irqsave(&urb_list_lock, flags);
16208 + urb_entry = __urb_list_entry(urb, epid);
16209 + ASSERT(urb_entry);
16211 + if (urb_entry->list.next != &urb_list[epid]) {
16212 + struct list_head *elem = urb_entry->list.next;
16213 + urb_entry = list_entry(elem, urb_entry_t, list);
16214 + spin_unlock_irqrestore(&urb_list_lock, flags);
16215 + return urb_entry->urb;
16217 + spin_unlock_irqrestore(&urb_list_lock, flags);
16222 +struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
16224 + struct USB_EP_Desc *ep_desc;
16225 + ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
16226 + if(ep_desc == NULL)
16228 + memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
16230 + ep_desc->hw_len = 0;
16231 + ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
16232 + IO_STATE(USB_EP_command, enable, yes));
16233 + if(sb_desc == NULL) {
16234 + ep_desc->sub = 0;
16236 + ep_desc->sub = virt_to_phys(sb_desc);
16244 +#define TT_SETUP 3
16246 +#define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
16247 +#define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
16248 +#define CMD_FULL IO_STATE(USB_SB_command, full, yes)
16250 +/* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
16251 + SBs. Also used by create_sb_in() to avoid same allocation procedure at two
16253 +struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
16254 + int datalen, int mem_flags) {
16255 + struct USB_SB_Desc *sb_desc;
16256 + sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
16257 + if(sb_desc == NULL)
16259 + memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
16261 + sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
16262 + IO_STATE(USB_SB_command, eot, yes);
16264 + sb_desc->sw_len = datalen;
16265 + if(data != NULL) {
16266 + sb_desc->buf = virt_to_phys(data);
16268 + sb_desc->buf = 0;
16270 + if(sb_prev != NULL) {
16271 + sb_prev->next = virt_to_phys(sb_desc);
16276 +/* Creates a copy of an existing SB by allocation space for it and copy
16278 +struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
16279 + struct USB_SB_Desc *sb_desc;
16280 + sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
16281 + if(sb_desc == NULL)
16284 + memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
16288 +/* A specific create_sb function for creation of in SBs. This is due to
16289 + that datalen in In SBs shows how many packets we are expecting. It also
16290 + sets up the rem field to show if how many bytes we expect in last packet
16291 + if it's not a full one */
16292 +struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
16293 + int maxlen, int mem_flags) {
16294 + struct USB_SB_Desc *sb_desc;
16295 + sb_desc = create_sb(sb_prev, TT_IN, NULL,
16296 + datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
16297 + if(sb_desc == NULL)
16299 + sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
16303 +void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
16304 + sb_desc->command |= flags;
16307 +int create_sb_for_urb(struct urb *urb, int mem_flags) {
16308 + int is_out = !usb_pipein(urb->pipe);
16309 + int type = usb_pipetype(urb->pipe);
16310 + int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
16311 + int buf_len = urb->transfer_buffer_length;
16312 + void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
16313 + struct USB_SB_Desc *sb_desc = NULL;
16315 + struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
16316 + ASSERT(urb_priv != NULL);
16319 + case PIPE_CONTROL:
16320 + /* Setup stage */
16321 + sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
16322 + if(sb_desc == NULL)
16324 + set_sb_cmds(sb_desc, CMD_FULL);
16326 + /* Attach first SB to URB */
16327 + urb_priv->first_sb = sb_desc;
16329 + if (is_out) { /* Out Control URB */
16330 + /* If this Control OUT transfer has an optional data stage we add
16331 + an OUT token before the mandatory IN (status) token */
16332 + if ((buf_len > 0) && buf) {
16333 + sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
16334 + if(sb_desc == NULL)
16336 + set_sb_cmds(sb_desc, CMD_FULL);
16339 + /* Status stage */
16340 + /* The data length has to be exactly 1. This is due to a requirement
16341 + of the USB specification that a host must be prepared to receive
16342 + data in the status phase */
16343 + sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
16344 + if(sb_desc == NULL)
16346 + } else { /* In control URB */
16348 + sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
16349 + if(sb_desc == NULL)
16352 + /* Status stage */
16353 + /* Read comment at zout_buffer declaration for an explanation to this. */
16354 + sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
16355 + if(sb_desc == NULL)
16357 + /* Set descriptor interrupt flag for in URBs so we can finish URB after
16358 + zout-packet has been sent */
16359 + set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
16361 + /* Set end-of-list flag in last SB */
16362 + set_sb_cmds(sb_desc, CMD_EOL);
16363 + /* Attach last SB to URB */
16364 + urb_priv->last_sb = sb_desc;
16368 + if (is_out) { /* Out Bulk URB */
16369 + sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
16370 + if(sb_desc == NULL)
16372 + /* The full field is set to yes, even if we don't actually check that
16373 + this is a full-length transfer (i.e., that transfer_buffer_length %
16375 + Setting full prevents the USB controller from sending an empty packet
16376 + in that case. However, if URB_ZERO_PACKET was set we want that. */
16377 + if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
16378 + set_sb_cmds(sb_desc, CMD_FULL);
16380 + } else { /* In Bulk URB */
16381 + sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
16382 + if(sb_desc == NULL)
16385 + /* Set end-of-list flag for last SB */
16386 + set_sb_cmds(sb_desc, CMD_EOL);
16388 + /* Attach SB to URB */
16389 + urb_priv->first_sb = sb_desc;
16390 + urb_priv->last_sb = sb_desc;
16393 + case PIPE_INTERRUPT:
16394 + if(is_out) { /* Out Intr URB */
16395 + sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
16396 + if(sb_desc == NULL)
16399 + /* The full field is set to yes, even if we don't actually check that
16400 + this is a full-length transfer (i.e., that transfer_buffer_length %
16402 + Setting full prevents the USB controller from sending an empty packet
16403 + in that case. However, if URB_ZERO_PACKET was set we want that. */
16404 + if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
16405 + set_sb_cmds(sb_desc, CMD_FULL);
16407 + /* Only generate TX interrupt if it's a Out URB*/
16408 + set_sb_cmds(sb_desc, CMD_INTR);
16410 + } else { /* In Intr URB */
16411 + sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
16412 + if(sb_desc == NULL)
16415 + /* Set end-of-list flag for last SB */
16416 + set_sb_cmds(sb_desc, CMD_EOL);
16418 + /* Attach SB to URB */
16419 + urb_priv->first_sb = sb_desc;
16420 + urb_priv->last_sb = sb_desc;
16423 + case PIPE_ISOCHRONOUS:
16424 + if(is_out) { /* Out Isoc URB */
16426 + if(urb->number_of_packets == 0) {
16427 + tc_err("Can't create SBs for Isoc URB with zero packets\n");
16430 + /* Create one SB descriptor for each packet and link them together. */
16431 + for(i = 0; i < urb->number_of_packets; i++) {
16432 + if (urb->iso_frame_desc[i].length > 0) {
16434 + sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
16435 + urb->iso_frame_desc[i].offset,
16436 + urb->iso_frame_desc[i].length, mem_flags);
16437 + if(sb_desc == NULL)
16440 + /* Check if it's a full length packet */
16441 + if (urb->iso_frame_desc[i].length ==
16442 + usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
16443 + set_sb_cmds(sb_desc, CMD_FULL);
16446 + } else { /* zero length packet */
16447 + sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
16448 + if(sb_desc == NULL)
16450 + set_sb_cmds(sb_desc, CMD_FULL);
16452 + /* Attach first SB descriptor to URB */
16454 + urb_priv->first_sb = sb_desc;
16457 + /* Set interrupt and end-of-list flags in last SB */
16458 + set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
16459 + /* Attach last SB descriptor to URB */
16460 + urb_priv->last_sb = sb_desc;
16461 + tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
16462 + urb->number_of_packets, (unsigned int)urb);
16463 + } else { /* In Isoc URB */
16464 + /* Actual number of packets is not relevant for periodic in traffic as
16465 + long as it is more than zero. Set to 1 always. */
16466 + sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
16467 + if(sb_desc == NULL)
16469 + /* Set end-of-list flags for SB */
16470 + set_sb_cmds(sb_desc, CMD_EOL);
16472 + /* Attach SB to URB */
16473 + urb_priv->first_sb = sb_desc;
16474 + urb_priv->last_sb = sb_desc;
16478 + tc_err("Unknown pipe-type\n");
16485 +int init_intr_urb(struct urb *urb, int mem_flags) {
16486 + struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
16487 + struct USB_EP_Desc* ep_desc;
16492 + ASSERT(urb_priv != NULL);
16493 + ASSERT(usb_pipeint(urb->pipe));
16494 + /* We can't support interval longer than amount of eof descriptors in
16496 + if(urb->interval > MAX_INTR_INTERVAL) {
16497 + tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
16498 + MAX_INTR_INTERVAL);
16502 + /* We assume that the SB descriptors already have been setup */
16503 + ASSERT(urb_priv->first_sb != NULL);
16505 + /* Round of the interval to 2^n, it is obvious that this code favours
16506 + smaller numbers, but that is actually a good thing */
16507 + /* FIXME: The "rounding error" for larger intervals will be quite
16508 + large. For in traffic this shouldn't be a problem since it will only
16509 + mean that we "poll" more often. */
16510 + interval = urb->interval;
16511 + for (i = 0; interval; i++) {
16512 + interval = interval >> 1;
16514 + urb_priv->interval = 1 << (i - 1);
16516 + /* We can only have max interval for Out Interrupt due to that we can only
16517 + handle one linked in EP for a certain epid in the Intr descr array at the
16518 + time. The USB Controller in the Etrax 100LX continues to process Intr EPs
16519 + so we have no way of knowing which one that caused the actual transfer if
16520 + we have several linked in. */
16521 + if(usb_pipeout(urb->pipe)) {
16522 + urb_priv->interval = MAX_INTR_INTERVAL;
16525 + /* Calculate amount of EPs needed */
16526 + ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
16528 + for(i = 0; i < ep_count; i++) {
16529 + ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
16530 + if(ep_desc == NULL) {
16531 + /* Free any descriptors that we may have allocated before failure */
16534 + kfree(urb_priv->intr_ep_pool[i]);
16538 + urb_priv->intr_ep_pool[i] = ep_desc;
16540 + urb_priv->intr_ep_pool_length = ep_count;
16544 +/* DMA RX/TX functions */
16545 +/* ----------------------- */
16547 +static void tc_dma_init_rx_list(void) {
16550 + /* Setup descriptor list except last one */
16551 + for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
16552 + RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
16553 + RxDescList[i].command = 0;
16554 + RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
16555 + RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
16556 + RxDescList[i].hw_len = 0;
16557 + RxDescList[i].status = 0;
16559 + /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
16560 + USB_IN_Desc for the relevant fields.) */
16561 + prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
16564 + /* Special handling of last descriptor */
16565 + RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
16566 + RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
16567 + RxDescList[i].next = virt_to_phys(&RxDescList[0]);
16568 + RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
16569 + RxDescList[i].hw_len = 0;
16570 + RxDescList[i].status = 0;
16572 + /* Setup list pointers that show progress in list */
16573 + myNextRxDesc = &RxDescList[0];
16574 + myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
16576 + flush_etrax_cache();
16577 + /* Point DMA to first descriptor in list and start it */
16578 + *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
16579 + *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
16583 +static void tc_dma_init_tx_bulk_list(void) {
16585 + volatile struct USB_EP_Desc *epDescr;
16587 + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
16588 + epDescr = &(TxBulkEPList[i]);
16589 + CHECK_ALIGN(epDescr);
16590 + epDescr->hw_len = 0;
16591 + epDescr->command = IO_FIELD(USB_EP_command, epid, i);
16592 + epDescr->sub = 0;
16593 + epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
16595 + /* Initiate two EPs, disabled and with the eol flag set. No need for any
16596 + preserved epid. */
16598 + /* The first one has the intr flag set so we get an interrupt when the DMA
16599 + channel is about to become disabled. */
16600 + CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
16601 + TxBulkDummyEPList[i][0].hw_len = 0;
16602 + TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
16603 + IO_STATE(USB_EP_command, eol, yes) |
16604 + IO_STATE(USB_EP_command, intr, yes));
16605 + TxBulkDummyEPList[i][0].sub = 0;
16606 + TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
16608 + /* The second one. */
16609 + CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
16610 + TxBulkDummyEPList[i][1].hw_len = 0;
16611 + TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
16612 + IO_STATE(USB_EP_command, eol, yes));
16613 + TxBulkDummyEPList[i][1].sub = 0;
16614 + /* The last dummy's next pointer is the same as the current EP's next pointer. */
16615 + TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
16618 + /* Special handling of last descr in list, make list circular */
16619 + epDescr = &TxBulkEPList[i];
16620 + CHECK_ALIGN(epDescr);
16621 + epDescr->hw_len = 0;
16622 + epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
16623 + IO_FIELD(USB_EP_command, epid, i);
16624 + epDescr->sub = 0;
16625 + epDescr->next = virt_to_phys(&TxBulkEPList[0]);
16627 + /* Init DMA sub-channel pointers to last item in each list */
16628 + *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
16629 + /* No point in starting the bulk channel yet.
16630 + *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
16633 +static void tc_dma_init_tx_ctrl_list(void) {
16635 + volatile struct USB_EP_Desc *epDescr;
16637 + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
16638 + epDescr = &(TxCtrlEPList[i]);
16639 + CHECK_ALIGN(epDescr);
16640 + epDescr->hw_len = 0;
16641 + epDescr->command = IO_FIELD(USB_EP_command, epid, i);
16642 + epDescr->sub = 0;
16643 + epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
16645 + /* Special handling of last descr in list, make list circular */
16646 + epDescr = &TxCtrlEPList[i];
16647 + CHECK_ALIGN(epDescr);
16648 + epDescr->hw_len = 0;
16649 + epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
16650 + IO_FIELD(USB_EP_command, epid, i);
16651 + epDescr->sub = 0;
16652 + epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
16654 + /* Init DMA sub-channel pointers to last item in each list */
16655 + *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
16656 + /* No point in starting the ctrl channel yet.
16657 + *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
16661 +static void tc_dma_init_tx_intr_list(void) {
16664 + TxIntrSB_zout.sw_len = 1;
16665 + TxIntrSB_zout.next = 0;
16666 + TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
16667 + TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
16668 + IO_STATE(USB_SB_command, tt, zout) |
16669 + IO_STATE(USB_SB_command, full, yes) |
16670 + IO_STATE(USB_SB_command, eot, yes) |
16671 + IO_STATE(USB_SB_command, eol, yes));
16673 + for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
16674 + CHECK_ALIGN(&TxIntrEPList[i]);
16675 + TxIntrEPList[i].hw_len = 0;
16676 + TxIntrEPList[i].command =
16677 + (IO_STATE(USB_EP_command, eof, yes) |
16678 + IO_STATE(USB_EP_command, enable, yes) |
16679 + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
16680 + TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
16681 + TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
16684 + /* Special handling of last descr in list, make list circular */
16685 + CHECK_ALIGN(&TxIntrEPList[i]);
16686 + TxIntrEPList[i].hw_len = 0;
16687 + TxIntrEPList[i].command =
16688 + (IO_STATE(USB_EP_command, eof, yes) |
16689 + IO_STATE(USB_EP_command, eol, yes) |
16690 + IO_STATE(USB_EP_command, enable, yes) |
16691 + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
16692 + TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
16693 + TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
16695 + intr_dbg("Initiated Intr EP descriptor list\n");
16698 + /* Connect DMA 8 sub-channel 2 to first in list */
16699 + *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
16702 +static void tc_dma_init_tx_isoc_list(void) {
16707 + /* Read comment at zout_buffer declaration for an explanation to this. */
16708 + TxIsocSB_zout.sw_len = 1;
16709 + TxIsocSB_zout.next = 0;
16710 + TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
16711 + TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
16712 + IO_STATE(USB_SB_command, tt, zout) |
16713 + IO_STATE(USB_SB_command, full, yes) |
16714 + IO_STATE(USB_SB_command, eot, yes) |
16715 + IO_STATE(USB_SB_command, eol, yes));
16717 + /* The last isochronous EP descriptor is a dummy. */
16718 + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
16719 + CHECK_ALIGN(&TxIsocEPList[i]);
16720 + TxIsocEPList[i].hw_len = 0;
16721 + TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
16722 + TxIsocEPList[i].sub = 0;
16723 + TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
16726 + CHECK_ALIGN(&TxIsocEPList[i]);
16727 + TxIsocEPList[i].hw_len = 0;
16729 + /* Must enable the last EP descr to get eof interrupt. */
16730 + TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
16731 + IO_STATE(USB_EP_command, eof, yes) |
16732 + IO_STATE(USB_EP_command, eol, yes) |
16733 + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
16734 + TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
16735 + TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
16737 + *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
16738 + *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
16741 +static int tc_dma_init(struct usb_hcd *hcd) {
16742 + tc_dma_init_rx_list();
16743 + tc_dma_init_tx_bulk_list();
16744 + tc_dma_init_tx_ctrl_list();
16745 + tc_dma_init_tx_intr_list();
16746 + tc_dma_init_tx_isoc_list();
16748 + if (cris_request_dma(USB_TX_DMA_NBR,
16749 + "ETRAX 100LX built-in USB (Tx)",
16750 + DMA_VERBOSE_ON_ERROR,
16752 + err("Could not allocate DMA ch 8 for USB");
16756 + if (cris_request_dma(USB_RX_DMA_NBR,
16757 + "ETRAX 100LX built-in USB (Rx)",
16758 + DMA_VERBOSE_ON_ERROR,
16760 + err("Could not allocate DMA ch 9 for USB");
16764 + *R_IRQ_MASK2_SET =
16765 + /* Note that these interrupts are not used. */
16766 + IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
16767 + /* Sub channel 1 (ctrl) descr. interrupts are used. */
16768 + IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
16769 + IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
16770 + /* Sub channel 3 (isoc) descr. interrupts are used. */
16771 + IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
16773 + /* Note that the dma9_descr interrupt is not used. */
16774 + *R_IRQ_MASK2_SET =
16775 + IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
16776 + IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
16778 + if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
16779 + "ETRAX 100LX built-in USB (Rx)", hcd)) {
16780 + err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
16784 + if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
16785 + "ETRAX 100LX built-in USB (Tx)", hcd)) {
16786 + err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
16793 +static void tc_dma_destroy(void) {
16794 + free_irq(ETRAX_USB_RX_IRQ, NULL);
16795 + free_irq(ETRAX_USB_TX_IRQ, NULL);
16797 + cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
16798 + cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
16802 +static void tc_dma_link_intr_urb(struct urb *urb);
16804 +/* Handle processing of Bulk, Ctrl and Intr queues */
16805 +static void tc_dma_process_queue(int epid) {
16807 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
16808 + unsigned long flags;
16811 + if(epid_state[epid].disabled) {
16812 + /* Don't process any URBs on a disabled endpoint */
16816 + /* Do not disturb us while fiddling with EPs and epids */
16817 + local_irq_save(flags);
16819 + /* For bulk, Ctrl and Intr can we only have one URB active at a time for
16820 + a specific EP. */
16821 + if(activeUrbList[epid] != NULL) {
16822 + /* An URB is already active on EP, skip checking queue */
16823 + local_irq_restore(flags);
16827 + urb = urb_list_first(epid);
16828 + if(urb == NULL) {
16829 + /* No URB waiting in EP queue. Nothing do to */
16830 + local_irq_restore(flags);
16834 + urb_priv = urb->hcpriv;
16835 + ASSERT(urb_priv != NULL);
16836 + ASSERT(urb_priv->urb_state == NOT_STARTED);
16837 + ASSERT(!usb_pipeisoc(urb->pipe));
16839 + /* Remove this URB from the queue and move it to active */
16840 + activeUrbList[epid] = urb;
16841 + urb_list_del(urb, epid);
16843 + urb_priv->urb_state = STARTED;
16845 + /* Reset error counters (regardless of which direction this traffic is). */
16846 + etrax_epid_clear_error(epid);
16848 + /* Special handling of Intr EP lists */
16849 + if(usb_pipeint(urb->pipe)) {
16850 + tc_dma_link_intr_urb(urb);
16851 + local_irq_restore(flags);
16855 + /* Software must preset the toggle bits for Bulk and Ctrl */
16856 + if(usb_pipecontrol(urb->pipe)) {
16857 + /* Toggle bits are initialized only during setup transaction in a
16859 + etrax_epid_set_toggle(epid, 0, 0);
16860 + etrax_epid_set_toggle(epid, 1, 0);
16862 + toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
16863 + usb_pipeout(urb->pipe));
16864 + etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
16867 + tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
16868 + (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
16869 + sblist_to_str(urb_priv->first_sb));
16871 + /* We start the DMA sub channel without checking if it's running or not,
16873 + 1) If it's already running, issuing the start command is a nop.
16874 + 2) We avoid a test-and-set race condition. */
16875 + switch(usb_pipetype(urb->pipe)) {
16877 + /* Assert that the EP descriptor is disabled. */
16878 + ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
16880 + /* Set up and enable the EP descriptor. */
16881 + TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
16882 + TxBulkEPList[epid].hw_len = 0;
16883 + TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
16885 + /* Check if the dummy list is already with us (if several urbs were queued). */
16886 + if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
16887 + tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
16888 + (unsigned long)urb, epid);
16890 + /* We don't need to check if the DMA is at this EP or not before changing the
16891 + next pointer, since we will do it in one 32-bit write (EP descriptors are
16892 + 32-bit aligned). */
16893 + TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
16896 + restart_dma8_sub0();
16898 + /* Update/restart the bulk start timer since we just started the channel.*/
16899 + mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
16900 + /* Update/restart the bulk eot timer since we just inserted traffic. */
16901 + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
16903 + case PIPE_CONTROL:
16904 + /* Assert that the EP descriptor is disabled. */
16905 + ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
16907 + /* Set up and enable the EP descriptor. */
16908 + TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
16909 + TxCtrlEPList[epid].hw_len = 0;
16910 + TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
16912 + *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
16915 + local_irq_restore(flags);
16918 +static void tc_dma_link_intr_urb(struct urb *urb) {
16919 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
16920 + volatile struct USB_EP_Desc *tmp_ep;
16921 + struct USB_EP_Desc *ep_desc;
16923 + int pool_idx = 0;
16925 + ASSERT(urb_priv != NULL);
16926 + epid = urb_priv->epid;
16927 + ASSERT(urb_priv->interval > 0);
16928 + ASSERT(urb_priv->intr_ep_pool_length > 0);
16930 + tmp_ep = &TxIntrEPList[0];
16932 + /* Only insert one EP descriptor in list for Out Intr URBs.
16933 + We can only handle Out Intr with interval of 128ms because
16934 + it's not possible to insert several Out Intr EPs because they
16935 + are not consumed by the DMA. */
16936 + if(usb_pipeout(urb->pipe)) {
16937 + ep_desc = urb_priv->intr_ep_pool[0];
16939 + ep_desc->next = tmp_ep->next;
16940 + tmp_ep->next = virt_to_phys(ep_desc);
16943 + /* Loop through Intr EP descriptor list and insert EP for URB at
16944 + specified interval */
16946 + /* Each EP descriptor with eof flag sat signals a new frame */
16947 + if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
16948 + /* Insert a EP from URBs EP pool at correct interval */
16949 + if ((i % urb_priv->interval) == 0) {
16950 + ep_desc = urb_priv->intr_ep_pool[pool_idx];
16952 + ep_desc->next = tmp_ep->next;
16953 + tmp_ep->next = virt_to_phys(ep_desc);
16955 + ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
16959 + tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
16960 + } while(tmp_ep != &TxIntrEPList[0]);
16963 + intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
16964 + sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
16966 + /* We start the DMA sub channel without checking if it's running or not,
16968 + 1) If it's already running, issuing the start command is a nop.
16969 + 2) We avoid a test-and-set race condition. */
16970 + *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
16973 +static void tc_dma_process_isoc_urb(struct urb *urb) {
16974 + unsigned long flags;
16975 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
16978 + /* Do not disturb us while fiddling with EPs and epids */
16979 + local_irq_save(flags);
16981 + ASSERT(urb_priv);
16982 + ASSERT(urb_priv->first_sb);
16983 + epid = urb_priv->epid;
16985 + if(activeUrbList[epid] == NULL) {
16986 + /* EP is idle, so make this URB active */
16987 + activeUrbList[epid] = urb;
16988 + urb_list_del(urb, epid);
16989 + ASSERT(TxIsocEPList[epid].sub == 0);
16990 + ASSERT(!(TxIsocEPList[epid].command &
16991 + IO_STATE(USB_EP_command, enable, yes)));
16993 + /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
16994 + if(usb_pipein(urb->pipe)) {
16995 + /* Each EP for In Isoc will have only one SB descriptor, setup when
16996 + submitting the first active urb. We do it here by copying from URBs
16997 + pre-allocated SB. */
16998 + memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
16999 + sizeof(TxIsocSBList[epid]));
17000 + TxIsocEPList[epid].hw_len = 0;
17001 + TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
17003 + /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
17004 + TxIsocEPList[epid].hw_len = 0;
17005 + TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
17007 + isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
17008 + " last_sb::0x%x\n",
17009 + (unsigned int)urb, urb_priv->urb_num, epid,
17010 + (unsigned int)(urb_priv->first_sb),
17011 + (unsigned int)(urb_priv->last_sb));
17014 + if (urb->transfer_flags & URB_ISO_ASAP) {
17015 + /* The isoc transfer should be started as soon as possible. The
17016 + start_frame field is a return value if URB_ISO_ASAP was set. Comparing
17017 + R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
17018 + token is sent 2 frames later. I'm not sure how this affects usage of
17019 + the start_frame field by the device driver, or how it affects things
17020 + when USB_ISO_ASAP is not set, so therefore there's no compensation for
17021 + the 2 frame "lag" here. */
17022 + urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
17023 + TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
17024 + urb_priv->urb_state = STARTED;
17025 + isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
17026 + urb->start_frame);
17028 + /* Not started yet. */
17029 + urb_priv->urb_state = NOT_STARTED;
17030 + isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
17031 + (unsigned int)urb);
17035 + /* An URB is already active on the EP. Leave URB in queue and let
17036 + finish_isoc_urb process it after current active URB */
17037 + ASSERT(TxIsocEPList[epid].sub != 0);
17039 + if(usb_pipein(urb->pipe)) {
17040 + /* Because there already is a active In URB on this epid we do nothing
17041 + and the finish_isoc_urb() function will handle switching to next URB*/
17043 + } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
17044 + struct USB_SB_Desc *temp_sb_desc;
17046 + /* Set state STARTED to all Out Isoc URBs added to SB list because we
17047 + don't know how many of them that are finished before descr interrupt*/
17048 + urb_priv->urb_state = STARTED;
17050 + /* Find end of current SB list by looking for SB with eol flag sat */
17051 + temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
17052 + while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
17053 + IO_STATE(USB_SB_command, eol, yes)) {
17054 + ASSERT(temp_sb_desc->next);
17055 + temp_sb_desc = phys_to_virt(temp_sb_desc->next);
17058 + isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
17059 + " sub:0x%x eol:0x%x\n",
17060 + (unsigned int)urb, urb_priv->urb_num,
17061 + (unsigned int)(urb_priv->first_sb),
17062 + (unsigned int)(urb_priv->last_sb), epid,
17063 + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
17064 + (unsigned int)temp_sb_desc);
17066 + /* Next pointer must be set before eol is removed. */
17067 + temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
17068 + /* Clear the previous end of list flag since there is a new in the
17069 + added SB descriptor list. */
17070 + temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
17072 + if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
17074 + /* 8.8.5 in Designer's Reference says we should check for and correct
17075 + any errors in the EP here. That should not be necessary if
17076 + epid_attn is handled correctly, so we assume all is ok. */
17077 + epid_data = etrax_epid_iso_get(epid);
17078 + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
17079 + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
17080 + isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
17081 + " URB:0x%x[%d]\n",
17082 + IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
17083 + (unsigned int)urb, urb_priv->urb_num);
17086 + /* The SB list was exhausted. */
17087 + if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
17088 + /* The new sublist did not get processed before the EP was
17089 + disabled. Setup the EP again. */
17091 + if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
17092 + isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
17093 + ", restarting from this URBs SB:0x%x\n",
17094 + epid, (unsigned int)temp_sb_desc,
17095 + (unsigned int)(urb_priv->first_sb));
17096 + TxIsocEPList[epid].hw_len = 0;
17097 + TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
17098 + urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
17099 + /* Enable the EP again so data gets processed this time */
17100 + TxIsocEPList[epid].command |=
17101 + IO_STATE(USB_EP_command, enable, yes);
17104 + /* The EP has been disabled but not at end this URB (god knows
17105 + where). This should generate an epid_attn so we should not be
17107 + isoc_warn("EP was disabled on sb:0x%x before SB list for"
17108 + " URB:0x%x[%d] got processed\n",
17109 + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
17110 + (unsigned int)urb, urb_priv->urb_num);
17113 + /* This might happend if we are slow on this function and isn't
17115 + isoc_dbg("EP was disabled and finished with SBs from appended"
17116 + " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
17122 + /* Start the DMA sub channel */
17123 + *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
17125 + local_irq_restore(flags);
17128 +static void tc_dma_unlink_intr_urb(struct urb *urb) {
17129 + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
17130 + volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
17131 + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
17132 + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
17133 + volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
17136 + volatile int timeout = 10000;
17139 + /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
17141 + ASSERT(urb_priv);
17142 + ASSERT(urb_priv->intr_ep_pool_length > 0);
17143 + epid = urb_priv->epid;
17145 + /* First disable all Intr EPs belonging to epid for this URB */
17146 + first_ep = &TxIntrEPList[0];
17147 + curr_ep = first_ep;
17149 + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
17150 + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
17152 + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
17154 + curr_ep = phys_to_virt(curr_ep->next);
17155 + } while (curr_ep != first_ep);
17158 + /* Now unlink all EPs belonging to this epid from Descr list */
17159 + first_ep = &TxIntrEPList[0];
17160 + curr_ep = first_ep;
17162 + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
17163 + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
17164 + /* This is the one we should unlink. */
17165 + unlink_ep = next_ep;
17167 + /* Actually unlink the EP from the DMA list. */
17168 + curr_ep->next = unlink_ep->next;
17170 + /* Wait until the DMA is no longer at this descriptor. */
17171 + while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
17172 + (timeout-- > 0));
17173 + if(timeout == 0) {
17174 + warn("Timeout while waiting for DMA-TX-Intr to leave unlink EP\n");
17179 + curr_ep = phys_to_virt(curr_ep->next);
17180 + } while (curr_ep != first_ep);
17182 + if(count != urb_priv->intr_ep_pool_length) {
17183 + intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
17184 + urb_priv->intr_ep_pool_length, (unsigned int)urb,
17185 + urb_priv->urb_num);
17187 + intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
17188 + urb_priv->intr_ep_pool_length, (unsigned int)urb);
17192 +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
17194 + unsigned long flags;
17197 + struct crisv10_urb_priv * urb_priv;
17200 + /* Protect TxEPList */
17201 + local_irq_save(flags);
17203 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17204 + /* A finished EP descriptor is disabled and has a valid sub pointer */
17205 + if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
17206 + (TxBulkEPList[epid].sub != 0)) {
17208 + /* Get the active URB for this epid */
17209 + urb = activeUrbList[epid];
17210 + /* Sanity checks */
17212 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17213 + ASSERT(urb_priv);
17215 + /* Only handle finished out Bulk EPs here,
17216 + and let RX interrupt take care of the rest */
17217 + if(!epid_out_traffic(epid)) {
17222 + tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
17223 + epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
17224 + urb_priv->urb_num);
17226 + tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
17227 + epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
17228 + urb_priv->urb_num);
17231 + if(urb_priv->urb_state == UNLINK) {
17232 + /* This Bulk URB is requested to be unlinked, that means that the EP
17233 + has been disabled and we might not have sent all data */
17234 + tc_finish_urb(hcd, urb, urb->status);
17238 + ASSERT(urb_priv->urb_state == STARTED);
17239 + if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
17240 + tc_err("Endpoint got disabled before reaching last sb\n");
17243 + epid_data = etrax_epid_get(epid);
17244 + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
17245 + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
17246 + /* This means that the endpoint has no error, is disabled
17247 + and had inserted traffic, i.e. transfer successfully completed. */
17248 + tc_finish_urb(hcd, urb, 0);
17250 + /* Shouldn't happen. We expect errors to be caught by epid
17252 + tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
17253 + epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
17256 + tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
17260 + local_irq_restore(flags);
17263 +static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
17264 + unsigned long flags;
17267 + struct crisv10_urb_priv * urb_priv;
17270 + /* Protect TxEPList */
17271 + local_irq_save(flags);
17273 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17274 + if(epid == DUMMY_EPID)
17277 + /* A finished EP descriptor is disabled and has a valid sub pointer */
17278 + if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
17279 + (TxCtrlEPList[epid].sub != 0)) {
17281 + /* Get the active URB for this epid */
17282 + urb = activeUrbList[epid];
17284 + if(urb == NULL) {
17285 + tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
17289 + /* Sanity checks */
17290 + ASSERT(usb_pipein(urb->pipe));
17291 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17292 + ASSERT(urb_priv);
17293 + if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
17294 + tc_err("Endpoint got disabled before reaching last sb\n");
17297 + epid_data = etrax_epid_get(epid);
17298 + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
17299 + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
17300 + /* This means that the endpoint has no error, is disabled
17301 + and had inserted traffic, i.e. transfer successfully completed. */
17303 + /* Check if RX-interrupt for In Ctrl has been processed before
17304 + finishing the URB */
17305 + if(urb_priv->ctrl_rx_done) {
17306 + tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
17307 + (unsigned int)urb, urb_priv->urb_num);
17308 + tc_finish_urb(hcd, urb, 0);
17310 + /* If we get zout descriptor interrupt before RX was done for a
17311 + In Ctrl transfer, then we flag that and it will be finished
17312 + in the RX-Interrupt */
17313 + urb_priv->ctrl_zout_done = 1;
17314 + tc_dbg("Got zout descr interrupt before RX interrupt\n");
17317 + /* Shouldn't happen. We expect errors to be caught by epid
17319 + tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
17320 + __dump_ep_desc(&(TxCtrlEPList[epid]));
17321 + __dump_ept_data(epid);
17325 + local_irq_restore(flags);
17328 +/* This function goes through all epids that are setup for Out Isoc transfers
17329 + and marks (isoc_out_done) all queued URBs that the DMA has finished
17331 + No URB completetion is done here to make interrupt routine return quickly.
17332 + URBs are completed later with help of complete_isoc_bottom_half() that
17333 + becomes schedules when this functions is finished. */
17334 +static void check_finished_isoc_tx_epids(void) {
17335 + unsigned long flags;
17338 + struct crisv10_urb_priv * urb_priv;
17339 + struct USB_SB_Desc* sb_desc;
17342 + /* Protect TxIsocEPList */
17343 + local_irq_save(flags);
17345 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17346 + if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
17347 + !epid_out_traffic(epid)) {
17348 + /* Nothing here to see. */
17351 + ASSERT(epid_inuse(epid));
17352 + ASSERT(epid_isoc(epid));
17354 + sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
17355 + /* Find the last descriptor of the currently active URB for this ep.
17356 + This is the first descriptor in the sub list marked for a descriptor
17358 + while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
17359 + sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
17363 + isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
17364 + epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
17365 + (unsigned int)sb_desc);
17367 + urb = activeUrbList[epid];
17368 + if(urb == NULL) {
17369 + isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
17374 + while(urb && !epid_done) {
17375 + /* Sanity check. */
17376 + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
17377 + ASSERT(usb_pipeout(urb->pipe));
17379 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17380 + ASSERT(urb_priv);
17381 + ASSERT(urb_priv->urb_state == STARTED ||
17382 + urb_priv->urb_state == UNLINK);
17384 + if (sb_desc != urb_priv->last_sb) {
17385 + /* This urb has been sent. */
17386 + urb_priv->isoc_out_done = 1;
17388 + } else { /* Found URB that has last_sb as the interrupt reason */
17390 + /* Check if EP has been disabled, meaning that all transfers are done*/
17391 + if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
17392 + ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
17393 + IO_STATE(USB_SB_command, eol, yes));
17394 + ASSERT(sb_desc->next == 0);
17395 + urb_priv->isoc_out_done = 1;
17397 + isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
17398 + (unsigned int)urb, urb_priv->urb_num);
17400 + /* Stop looking any further in queue */
17404 + if (!epid_done) {
17405 + if(urb == activeUrbList[epid]) {
17406 + urb = urb_list_first(epid);
17408 + urb = urb_list_next(urb, epid);
17411 + } /* END: while(urb && !epid_done) */
17414 + local_irq_restore(flags);
17418 +/* This is where the Out Isoc URBs are realy completed. This function is
17419 + scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
17420 + are done. This functions completes all URBs earlier marked with
17421 + isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
17423 +static void complete_isoc_bottom_half(void *data) {
17424 + struct crisv10_isoc_complete_data *comp_data;
17425 + struct usb_iso_packet_descriptor *packet;
17426 + struct crisv10_urb_priv * urb_priv;
17427 + unsigned long flags;
17433 + comp_data = (struct crisv10_isoc_complete_data*)data;
17435 + local_irq_save(flags);
17437 + for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
17438 + if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
17439 + /* Only check valid Out Isoc epids */
17443 + isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
17444 + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
17446 + /* The descriptor interrupt handler has marked all transmitted Out Isoc
17447 + URBs with isoc_out_done. Now we traverse all epids and for all that
17448 + have out Isoc traffic we traverse its URB list and complete the
17449 + transmitted URBs. */
17451 + while (!epid_done) {
17453 + /* Get the active urb (if any) */
17454 + urb = activeUrbList[epid];
17456 + isoc_dbg("No active URB on epid:%d anymore\n", epid);
17461 + /* Sanity check. */
17462 + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
17463 + ASSERT(usb_pipeout(urb->pipe));
17465 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17466 + ASSERT(urb_priv);
17468 + if (!(urb_priv->isoc_out_done)) {
17469 + /* We have reached URB that isn't flaged done yet, stop traversing. */
17470 + isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
17471 + " before not yet flaged URB:0x%x[%d]\n",
17472 + epid, (unsigned int)urb, urb_priv->urb_num);
17477 + /* This urb has been sent. */
17478 + isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
17479 + (unsigned int)urb, urb_priv->urb_num);
17481 + /* Set ok on transfered packets for this URB and finish it */
17482 + for (i = 0; i < urb->number_of_packets; i++) {
17483 + packet = &urb->iso_frame_desc[i];
17484 + packet->status = 0;
17485 + packet->actual_length = packet->length;
17487 + urb_priv->isoc_packet_counter = urb->number_of_packets;
17488 + tc_finish_urb(comp_data->hcd, urb, 0);
17490 + } /* END: while(!epid_done) */
17491 + } /* END: for(epid...) */
17493 + local_irq_restore(flags);
17494 + kmem_cache_free(isoc_compl_cache, comp_data);
17498 +static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
17499 + unsigned long flags;
17502 + struct crisv10_urb_priv * urb_priv;
17503 + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
17504 + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
17506 + /* Protect TxintrEPList */
17507 + local_irq_save(flags);
17509 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17510 + if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
17511 + /* Nothing to see on this epid. Only check valid Out Intr epids */
17515 + urb = activeUrbList[epid];
17517 + intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
17521 + /* Sanity check. */
17522 + ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
17523 + ASSERT(usb_pipeout(urb->pipe));
17525 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17526 + ASSERT(urb_priv);
17528 + /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
17530 + curr_ep = &TxIntrEPList[0];
17532 + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
17533 + if(next_ep == urb_priv->intr_ep_pool[0]) {
17534 + /* We found the Out Intr EP for this epid */
17536 + /* Disable it so it doesn't get processed again */
17537 + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
17539 + /* Finish the active Out Intr URB with status OK */
17540 + tc_finish_urb(hcd, urb, 0);
17542 + curr_ep = phys_to_virt(curr_ep->next);
17543 + } while (curr_ep != &TxIntrEPList[1]);
17546 + local_irq_restore(flags);
17549 +/* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
17550 +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
17551 + struct usb_hcd *hcd = (struct usb_hcd*)vhc;
17554 + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
17555 + /* Clear this interrupt */
17556 + *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
17557 + restart_dma8_sub0();
17560 + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
17561 + /* Clear this interrupt */
17562 + *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
17563 + check_finished_ctrl_tx_epids(hcd);
17566 + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
17567 + /* Clear this interrupt */
17568 + *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
17569 + check_finished_intr_tx_epids(hcd);
17572 + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
17573 + struct crisv10_isoc_complete_data* comp_data;
17575 + /* Flag done Out Isoc for later completion */
17576 + check_finished_isoc_tx_epids();
17578 + /* Clear this interrupt */
17579 + *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
17580 + /* Schedule bottom half of Out Isoc completion function. This function
17581 + finishes the URBs marked with isoc_out_done */
17582 + comp_data = (struct crisv10_isoc_complete_data*)
17583 + kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
17584 + ASSERT(comp_data != NULL);
17585 + comp_data ->hcd = hcd;
17587 + INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half, comp_data);
17588 + schedule_work(&comp_data->usb_bh);
17591 + return IRQ_HANDLED;
17594 +/* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
17595 +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
17596 + unsigned long flags;
17598 + struct usb_hcd *hcd = (struct usb_hcd*)vhc;
17599 + struct crisv10_urb_priv *urb_priv;
17605 + /* Clear this interrupt. */
17606 + *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
17608 + /* Custom clear interrupt for this interrupt */
17609 + /* The reason we cli here is that we call the driver's callback functions. */
17610 + local_irq_save(flags);
17612 + /* Note that this while loop assumes that all packets span only
17613 + one rx descriptor. */
17614 + while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
17615 + epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
17616 + /* Get the active URB for this epid */
17617 + urb = activeUrbList[epid];
17619 + ASSERT(epid_inuse(epid));
17621 + dma_err("No urb for epid %d in rx interrupt\n", epid);
17625 + /* Check if any errors on epid */
17627 + if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
17628 + __u32 r_usb_ept_data;
17630 + if (usb_pipeisoc(urb->pipe)) {
17631 + r_usb_ept_data = etrax_epid_iso_get(epid);
17632 + if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
17633 + (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
17634 + (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
17635 + /* Not an error, just a failure to receive an expected iso
17636 + in packet in this frame. This is not documented
17637 + in the designers reference. Continue processing.
17639 + } else real_error = 1;
17640 + } else real_error = 1;
17644 + dma_err("Error in RX descr on epid:%d for URB 0x%x",
17645 + epid, (unsigned int)urb);
17646 + dump_ept_data(epid);
17647 + dump_in_desc(myNextRxDesc);
17651 + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
17652 + ASSERT(urb_priv);
17653 + ASSERT(urb_priv->urb_state == STARTED ||
17654 + urb_priv->urb_state == UNLINK);
17656 + if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
17657 + (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
17658 + (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
17660 + /* We get nodata for empty data transactions, and the rx descriptor's
17661 + hw_len field is not valid in that case. No data to copy in other
17663 + if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
17664 + /* No data to copy */
17667 + dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
17668 + (unsigned int)urb, epid, myNextRxDesc->hw_len,
17669 + urb_priv->rx_offset);
17671 + /* Only copy data if URB isn't flaged to be unlinked*/
17672 + if(urb_priv->urb_state != UNLINK) {
17673 + /* Make sure the data fits in the buffer. */
17674 + if(urb_priv->rx_offset + myNextRxDesc->hw_len
17675 + <= urb->transfer_buffer_length) {
17677 + /* Copy the data to URBs buffer */
17678 + memcpy(urb->transfer_buffer + urb_priv->rx_offset,
17679 + phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
17680 + urb_priv->rx_offset += myNextRxDesc->hw_len;
17682 + /* Signal overflow when returning URB */
17683 + urb->status = -EOVERFLOW;
17684 + tc_finish_urb_later(hcd, urb, urb->status);
17689 + /* Check if it was the last packet in the transfer */
17690 + if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
17691 + /* Special handling for In Ctrl URBs. */
17692 + if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
17693 + !(urb_priv->ctrl_zout_done)) {
17694 + /* Flag that RX part of Ctrl transfer is done. Because zout descr
17695 + interrupt hasn't happend yet will the URB be finished in the
17697 + urb_priv->ctrl_rx_done = 1;
17698 + tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
17699 + " for zout\n", (unsigned int)urb);
17701 + tc_finish_urb(hcd, urb, 0);
17704 + } else { /* ISOC RX */
17706 + isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
17707 + epid, (unsigned int)urb);
17710 + struct usb_iso_packet_descriptor *packet;
17712 + if (urb_priv->urb_state == UNLINK) {
17713 + isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
17715 + } else if (urb_priv->urb_state == NOT_STARTED) {
17716 + isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
17720 + packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
17722 + packet->status = 0;
17724 + if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
17725 + /* We get nodata for empty data transactions, and the rx descriptor's
17726 + hw_len field is not valid in that case. We copy 0 bytes however to
17727 + stay in synch. */
17728 + packet->actual_length = 0;
17730 + packet->actual_length = myNextRxDesc->hw_len;
17731 + /* Make sure the data fits in the buffer. */
17732 + ASSERT(packet->actual_length <= packet->length);
17733 + memcpy(urb->transfer_buffer + packet->offset,
17734 + phys_to_virt(myNextRxDesc->buf), packet->actual_length);
17735 + if(packet->actual_length > 0)
17736 + isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
17737 + packet->actual_length, urb_priv->isoc_packet_counter,
17738 + (unsigned int)urb, urb_priv->urb_num);
17741 + /* Increment the packet counter. */
17742 + urb_priv->isoc_packet_counter++;
17744 + /* Note that we don't care about the eot field in the rx descriptor's
17745 + status. It will always be set for isoc traffic. */
17746 + if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
17747 + /* Complete the urb with status OK. */
17748 + tc_finish_urb(hcd, urb, 0);
17753 + myNextRxDesc->status = 0;
17754 + myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
17755 + myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
17756 + myLastRxDesc = myNextRxDesc;
17757 + myNextRxDesc = phys_to_virt(myNextRxDesc->next);
17758 + flush_etrax_cache();
17759 + *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
17762 + local_irq_restore(flags);
17764 + return IRQ_HANDLED;
17767 +static void tc_bulk_start_timer_func(unsigned long dummy) {
17768 + /* We might enable an EP descriptor behind the current DMA position when
17769 + it's about to decide that there are no more bulk traffic and it should
17770 + stop the bulk channel.
17771 + Therefore we periodically check if the bulk channel is stopped and there
17772 + is an enabled bulk EP descriptor, in which case we start the bulk
17775 + if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
17778 + timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
17780 + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
17781 + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
17782 + timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
17784 + restart_dma8_sub0();
17786 + /* Restart the bulk eot timer since we just started the bulk channel.*/
17787 + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
17789 + /* No need to search any further. */
17794 + timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
17798 +static void tc_bulk_eot_timer_func(unsigned long dummy) {
17799 + struct usb_hcd *hcd = (struct usb_hcd*)dummy;
17801 + /* Because of a race condition in the top half, we might miss a bulk eot.
17802 + This timer "simulates" a bulk eot if we don't get one for a while,
17803 + hopefully correcting the situation. */
17804 + timer_dbg("bulk_eot_timer timed out.\n");
17805 + check_finished_bulk_tx_epids(hcd, 1);
17809 +/*************************************************************/
17810 +/*************************************************************/
17811 +/* Device driver block */
17812 +/*************************************************************/
17813 +/*************************************************************/
17815 +/* Forward declarations for device driver functions */
17816 +static int devdrv_hcd_probe(struct device *);
17817 +static int devdrv_hcd_remove(struct device *);
17819 +static int devdrv_hcd_suspend(struct device *, u32, u32);
17820 +static int devdrv_hcd_resume(struct device *, u32);
17821 +#endif /* CONFIG_PM */
17824 +static struct platform_device *devdrv_hc_platform_device;
17826 +/* device driver interface */
17827 +static struct device_driver devdrv_hc_device_driver = {
17828 + .name = (char *) hc_name,
17829 + .bus = &platform_bus_type,
17831 + .probe = devdrv_hcd_probe,
17832 + .remove = devdrv_hcd_remove,
17835 + .suspend = devdrv_hcd_suspend,
17836 + .resume = devdrv_hcd_resume,
17837 +#endif /* CONFIG_PM */
17840 +/* initialize the host controller and driver */
17841 +static int __init_or_module devdrv_hcd_probe(struct device *dev)
17843 + struct usb_hcd *hcd;
17844 + struct crisv10_hcd *crisv10_hcd;
17847 + /* Check DMA burst length */
17848 + if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
17849 + IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
17850 + devdrv_err("Invalid DMA burst length in Etrax 100LX,"
17851 + " needs to be 32\n");
17855 + hcd = usb_create_hcd(&crisv10_hc_driver, dev, dev->bus_id);
17859 + crisv10_hcd = hcd_to_crisv10_hcd(hcd);
17860 + spin_lock_init(&crisv10_hcd->lock);
17861 + crisv10_hcd->num_ports = num_ports();
17862 + crisv10_hcd->running = 0;
17864 + dev_set_drvdata(dev, crisv10_hcd);
17866 + devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ,
17867 + ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
17869 + /* Print out chip version read from registers */
17870 + int rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
17871 + int rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
17872 + if(rev_min == 0) {
17873 + devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
17875 + devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
17878 + devdrv_info("Bulk timer interval, start:%d eot:%d\n",
17879 + BULK_START_TIMER_INTERVAL,
17880 + BULK_EOT_TIMER_INTERVAL);
17883 + /* Init root hub data structures */
17885 + devdrv_err("Failed init data for Root Hub\n");
17886 + retval = -ENOMEM;
17889 + if(port_in_use(0)) {
17890 + if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
17891 + printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
17895 + devdrv_info("Claimed interface for USB physical port 1\n");
17897 + if(port_in_use(1)) {
17898 + if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
17899 + /* Free first interface if second failed to be claimed */
17900 + if(port_in_use(0)) {
17901 + cris_free_io_interface(if_usb_1);
17903 + printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
17907 + devdrv_info("Claimed interface for USB physical port 2\n");
17910 + /* Init transfer controller structs and locks */
17911 + if((retval = tc_init(hcd)) != 0) {
17915 + /* Attach interrupt functions for DMA and init DMA controller */
17916 + if((retval = tc_dma_init(hcd)) != 0) {
17920 + /* Attach the top IRQ handler for USB controller interrupts */
17921 + if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
17922 + "ETRAX 100LX built-in USB (HC)", hcd)) {
17923 + err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
17928 + /* iso_eof is only enabled when isoc traffic is running. */
17929 + *R_USB_IRQ_MASK_SET =
17930 + /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
17931 + IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
17932 + IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
17933 + IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
17934 + IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
17937 + crisv10_ready_wait();
17938 + /* Reset the USB interface. */
17940 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
17941 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
17942 + IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
17944 + /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
17945 + 0x2A30 (10800), to guarantee that control traffic gets 10% of the
17946 + bandwidth, and periodic transfer may allocate the rest (90%).
17947 + This doesn't work though.
17948 + The value 11960 is chosen to be just after the SOF token, with a couple
17949 + of bit times extra for possible bit stuffing. */
17950 + *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
17952 + crisv10_ready_wait();
17953 + /* Configure the USB interface as a host controller. */
17955 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
17956 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
17957 + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
17960 + /* Check so controller not busy before enabling ports */
17961 + crisv10_ready_wait();
17963 + /* Enable selected USB ports */
17964 + if(port_in_use(0)) {
17965 + *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
17967 + *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
17969 + if(port_in_use(1)) {
17970 + *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
17972 + *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
17975 + crisv10_ready_wait();
17976 + /* Start processing of USB traffic. */
17978 + IO_STATE(R_USB_COMMAND, port_sel, nop) |
17979 + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
17980 + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
17982 + /* Do not continue probing initialization before USB interface is done */
17983 + crisv10_ready_wait();
17985 + /* Register our Host Controller to USB Core
17986 + * Finish the remaining parts of generic HCD initialization: allocate the
17987 + * buffers of consistent memory, register the bus
17988 + * and call the driver's reset() and start() routines. */
17989 + retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
17990 + if (retval != 0) {
17991 + devdrv_err("Failed registering HCD driver\n");
17998 + devdrv_hcd_remove(dev);
18003 +/* cleanup after the host controller and driver */
18004 +static int __init_or_module devdrv_hcd_remove(struct device *dev)
18006 + struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
18007 + struct usb_hcd *hcd;
18009 + if (!crisv10_hcd)
18011 + hcd = crisv10_hcd_to_hcd(crisv10_hcd);
18014 + /* Stop USB Controller in Etrax 100LX */
18015 + crisv10_hcd_reset(hcd);
18017 + usb_remove_hcd(hcd);
18018 + devdrv_dbg("Removed HCD from USB Core\n");
18020 + /* Free USB Controller IRQ */
18021 + free_irq(ETRAX_USB_HC_IRQ, NULL);
18023 + /* Free resources */
18024 + tc_dma_destroy();
18028 + if(port_in_use(0)) {
18029 + cris_free_io_interface(if_usb_1);
18031 + if(port_in_use(1)) {
18032 + cris_free_io_interface(if_usb_2);
18035 + devdrv_dbg("Freed all claimed resources\n");
18043 +static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
18045 + return 0; /* no-op for now */
18048 +static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
18050 + return 0; /* no-op for now */
18053 +#endif /* CONFIG_PM */
18057 +/*************************************************************/
18058 +/*************************************************************/
18059 +/* Module block */
18060 +/*************************************************************/
18061 +/*************************************************************/
18063 +/* register driver */
18064 +static int __init module_hcd_init(void)
18067 + if (usb_disabled())
18070 + /* Here we select enabled ports by following defines created from
18072 +#ifndef CONFIG_ETRAX_USB_HOST_PORT1
18073 + ports &= ~(1<<0);
18075 +#ifndef CONFIG_ETRAX_USB_HOST_PORT2
18076 + ports &= ~(1<<1);
18079 + printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
18081 + devdrv_hc_platform_device =
18082 + platform_device_register_simple((char *) hc_name, 0, NULL, 0);
18084 + if (IS_ERR(devdrv_hc_platform_device))
18085 + return PTR_ERR(devdrv_hc_platform_device);
18086 + return driver_register(&devdrv_hc_device_driver);
18088 + * Note that we do not set the DMA mask for the device,
18089 + * i.e. we pretend that we will use PIO, since no specific
18090 + * allocation routines are needed for DMA buffers. This will
18091 + * cause the HCD buffer allocation routines to fall back to
18096 +/* unregister driver */
18097 +static void __exit module_hcd_exit(void)
18099 + driver_unregister(&devdrv_hc_device_driver);
18103 +/* Module hooks */
18104 +module_init(module_hcd_init);
18105 +module_exit(module_hcd_exit);
18106 --- linux-2.6.19.2.orig/drivers/usb/host/hc_crisv10.h 2007-01-10 20:10:37.000000000 +0100
18107 +++ linux-2.6.19.2.dev/drivers/usb/host/hc_crisv10.h 1970-01-01 01:00:00.000000000 +0100
18109 -#ifndef __LINUX_ETRAX_USB_H
18110 -#define __LINUX_ETRAX_USB_H
18112 -#include <linux/types.h>
18113 -#include <linux/list.h>
18115 -typedef struct USB_IN_Desc {
18116 - volatile __u16 sw_len;
18117 - volatile __u16 command;
18118 - volatile unsigned long next;
18119 - volatile unsigned long buf;
18120 - volatile __u16 hw_len;
18121 - volatile __u16 status;
18124 -typedef struct USB_SB_Desc {
18125 - volatile __u16 sw_len;
18126 - volatile __u16 command;
18127 - volatile unsigned long next;
18128 - volatile unsigned long buf;
18132 -typedef struct USB_EP_Desc {
18133 - volatile __u16 hw_len;
18134 - volatile __u16 command;
18135 - volatile unsigned long sub;
18136 - volatile unsigned long next;
18140 -struct virt_root_hub {
18147 - struct timer_list rh_int_timer;
18148 - volatile __u16 wPortChange_1;
18149 - volatile __u16 wPortChange_2;
18150 - volatile __u16 prev_wPortStatus_1;
18151 - volatile __u16 prev_wPortStatus_2;
18154 -struct etrax_usb_intr_traffic {
18157 - struct wait_queue *wq;
18160 -typedef struct etrax_usb_hc {
18161 - struct usb_bus *bus;
18162 - struct virt_root_hub rh;
18163 - struct etrax_usb_intr_traffic intr;
18171 - WAITING_FOR_DESCR_INTR
18172 -} etrax_usb_urb_state_t;
18176 -typedef struct etrax_usb_urb_priv {
18177 - /* The first_sb field is used for freeing all SB descriptors belonging
18178 - to an urb. The corresponding ep descriptor's sub pointer cannot be
18179 - used for this since the DMA advances the sub pointer as it processes
18181 - USB_SB_Desc_t *first_sb;
18182 - /* The last_sb field referes to the last SB descriptor that belongs to
18183 - this urb. This is important to know so we can free the SB descriptors
18184 - that ranges between first_sb and last_sb. */
18185 - USB_SB_Desc_t *last_sb;
18187 - /* The rx_offset field is used in ctrl and bulk traffic to keep track
18188 - of the offset in the urb's transfer_buffer where incoming data should be
18192 - /* Counter used in isochronous transfers to keep track of the
18193 - number of packets received/transmitted. */
18194 - __u32 isoc_packet_counter;
18196 - /* This field is used to pass information about the urb's current state between
18197 - the various interrupt handlers (thus marked volatile). */
18198 - volatile etrax_usb_urb_state_t urb_state;
18200 - /* Connection between the submitted urb and ETRAX epid number */
18203 - /* The rx_data_list field is used for periodic traffic, to hold
18204 - received data for later processing in the the complete_urb functions,
18205 - where the data us copied to the urb's transfer_buffer. Basically, we
18206 - use this intermediate storage because we don't know when it's safe to
18207 - reuse the transfer_buffer (FIXME?). */
18208 - struct list_head rx_data_list;
18209 -} etrax_urb_priv_t;
18211 -/* This struct is for passing data from the top half to the bottom half. */
18212 -typedef struct usb_interrupt_registers
18215 - __u32 r_usb_epid_attn;
18216 - __u8 r_usb_status;
18217 - __u16 r_usb_rh_port_status_1;
18218 - __u16 r_usb_rh_port_status_2;
18219 - __u32 r_usb_irq_mask_read;
18220 - __u32 r_usb_fm_number;
18221 - struct work_struct usb_bh;
18222 -} usb_interrupt_registers_t;
18224 -/* This struct is for passing data from the isoc top half to the isoc bottom half. */
18225 -typedef struct usb_isoc_complete_data
18228 - struct work_struct usb_bh;
18229 -} usb_isoc_complete_data_t;
18231 -/* This struct holds data we get from the rx descriptors for DMA channel 9
18232 - for periodic traffic (intr and isoc). */
18233 -typedef struct rx_data
18237 - struct list_head list;
18240 -typedef struct urb_entry
18243 - struct list_head list;
18246 -/* ---------------------------------------------------------------------------
18248 - ------------------------------------------------------------------------- */
18249 -/* destination of request */
18250 -#define RH_INTERFACE 0x01
18251 -#define RH_ENDPOINT 0x02
18252 -#define RH_OTHER 0x03
18254 -#define RH_CLASS 0x20
18255 -#define RH_VENDOR 0x40
18257 -/* Requests: bRequest << 8 | bmRequestType */
18258 -#define RH_GET_STATUS 0x0080
18259 -#define RH_CLEAR_FEATURE 0x0100
18260 -#define RH_SET_FEATURE 0x0300
18261 -#define RH_SET_ADDRESS 0x0500
18262 -#define RH_GET_DESCRIPTOR 0x0680
18263 -#define RH_SET_DESCRIPTOR 0x0700
18264 -#define RH_GET_CONFIGURATION 0x0880
18265 -#define RH_SET_CONFIGURATION 0x0900
18266 -#define RH_GET_STATE 0x0280
18267 -#define RH_GET_INTERFACE 0x0A80
18268 -#define RH_SET_INTERFACE 0x0B00
18269 -#define RH_SYNC_FRAME 0x0C80
18270 -/* Our Vendor Specific Request */
18271 -#define RH_SET_EP 0x2000
18274 -/* Hub port features */
18275 -#define RH_PORT_CONNECTION 0x00
18276 -#define RH_PORT_ENABLE 0x01
18277 -#define RH_PORT_SUSPEND 0x02
18278 -#define RH_PORT_OVER_CURRENT 0x03
18279 -#define RH_PORT_RESET 0x04
18280 -#define RH_PORT_POWER 0x08
18281 -#define RH_PORT_LOW_SPEED 0x09
18282 -#define RH_C_PORT_CONNECTION 0x10
18283 -#define RH_C_PORT_ENABLE 0x11
18284 -#define RH_C_PORT_SUSPEND 0x12
18285 -#define RH_C_PORT_OVER_CURRENT 0x13
18286 -#define RH_C_PORT_RESET 0x14
18288 -/* Hub features */
18289 -#define RH_C_HUB_LOCAL_POWER 0x00
18290 -#define RH_C_HUB_OVER_CURRENT 0x01
18292 -#define RH_DEVICE_REMOTE_WAKEUP 0x00
18293 -#define RH_ENDPOINT_STALL 0x01
18295 -/* Our Vendor Specific feature */
18296 -#define RH_REMOVE_EP 0x00
18299 -#define RH_ACK 0x01
18300 -#define RH_REQ_ERR -1
18301 -#define RH_NACK 0x00
18303 -/* Field definitions for */
18305 -#define USB_IN_command__eol__BITNR 0 /* command macros */
18306 -#define USB_IN_command__eol__WIDTH 1
18307 -#define USB_IN_command__eol__no 0
18308 -#define USB_IN_command__eol__yes 1
18310 -#define USB_IN_command__intr__BITNR 3
18311 -#define USB_IN_command__intr__WIDTH 1
18312 -#define USB_IN_command__intr__no 0
18313 -#define USB_IN_command__intr__yes 1
18315 -#define USB_IN_status__eop__BITNR 1 /* status macros. */
18316 -#define USB_IN_status__eop__WIDTH 1
18317 -#define USB_IN_status__eop__no 0
18318 -#define USB_IN_status__eop__yes 1
18320 -#define USB_IN_status__eot__BITNR 5
18321 -#define USB_IN_status__eot__WIDTH 1
18322 -#define USB_IN_status__eot__no 0
18323 -#define USB_IN_status__eot__yes 1
18325 -#define USB_IN_status__error__BITNR 6
18326 -#define USB_IN_status__error__WIDTH 1
18327 -#define USB_IN_status__error__no 0
18328 -#define USB_IN_status__error__yes 1
18330 -#define USB_IN_status__nodata__BITNR 7
18331 -#define USB_IN_status__nodata__WIDTH 1
18332 -#define USB_IN_status__nodata__no 0
18333 -#define USB_IN_status__nodata__yes 1
18335 -#define USB_IN_status__epid__BITNR 8
18336 -#define USB_IN_status__epid__WIDTH 5
18338 -#define USB_EP_command__eol__BITNR 0
18339 -#define USB_EP_command__eol__WIDTH 1
18340 -#define USB_EP_command__eol__no 0
18341 -#define USB_EP_command__eol__yes 1
18343 -#define USB_EP_command__eof__BITNR 1
18344 -#define USB_EP_command__eof__WIDTH 1
18345 -#define USB_EP_command__eof__no 0
18346 -#define USB_EP_command__eof__yes 1
18348 -#define USB_EP_command__intr__BITNR 3
18349 -#define USB_EP_command__intr__WIDTH 1
18350 -#define USB_EP_command__intr__no 0
18351 -#define USB_EP_command__intr__yes 1
18353 -#define USB_EP_command__enable__BITNR 4
18354 -#define USB_EP_command__enable__WIDTH 1
18355 -#define USB_EP_command__enable__no 0
18356 -#define USB_EP_command__enable__yes 1
18358 -#define USB_EP_command__hw_valid__BITNR 5
18359 -#define USB_EP_command__hw_valid__WIDTH 1
18360 -#define USB_EP_command__hw_valid__no 0
18361 -#define USB_EP_command__hw_valid__yes 1
18363 -#define USB_EP_command__epid__BITNR 8
18364 -#define USB_EP_command__epid__WIDTH 5
18366 -#define USB_SB_command__eol__BITNR 0 /* command macros. */
18367 -#define USB_SB_command__eol__WIDTH 1
18368 -#define USB_SB_command__eol__no 0
18369 -#define USB_SB_command__eol__yes 1
18371 -#define USB_SB_command__eot__BITNR 1
18372 -#define USB_SB_command__eot__WIDTH 1
18373 -#define USB_SB_command__eot__no 0
18374 -#define USB_SB_command__eot__yes 1
18376 -#define USB_SB_command__intr__BITNR 3
18377 -#define USB_SB_command__intr__WIDTH 1
18378 -#define USB_SB_command__intr__no 0
18379 -#define USB_SB_command__intr__yes 1
18381 -#define USB_SB_command__tt__BITNR 4
18382 -#define USB_SB_command__tt__WIDTH 2
18383 -#define USB_SB_command__tt__zout 0
18384 -#define USB_SB_command__tt__in 1
18385 -#define USB_SB_command__tt__out 2
18386 -#define USB_SB_command__tt__setup 3
18389 -#define USB_SB_command__rem__BITNR 8
18390 -#define USB_SB_command__rem__WIDTH 6
18392 -#define USB_SB_command__full__BITNR 6
18393 -#define USB_SB_command__full__WIDTH 1
18394 -#define USB_SB_command__full__no 0
18395 -#define USB_SB_command__full__yes 1
18398 --- linux-2.6.19.2.orig/drivers/usb/host/hc-crisv10.h 1970-01-01 01:00:00.000000000 +0100
18399 +++ linux-2.6.19.2.dev/drivers/usb/host/hc-crisv10.h 2006-01-27 13:59:58.000000000 +0100
18401 +#ifndef __LINUX_ETRAX_USB_H
18402 +#define __LINUX_ETRAX_USB_H
18404 +#include <linux/types.h>
18405 +#include <linux/list.h>
18407 +struct USB_IN_Desc {
18408 + volatile __u16 sw_len;
18409 + volatile __u16 command;
18410 + volatile unsigned long next;
18411 + volatile unsigned long buf;
18412 + volatile __u16 hw_len;
18413 + volatile __u16 status;
18416 +struct USB_SB_Desc {
18417 + volatile __u16 sw_len;
18418 + volatile __u16 command;
18419 + volatile unsigned long next;
18420 + volatile unsigned long buf;
18423 +struct USB_EP_Desc {
18424 + volatile __u16 hw_len;
18425 + volatile __u16 command;
18426 + volatile unsigned long sub;
18427 + volatile unsigned long next;
18431 +/* Root Hub port status struct */
18432 +struct crisv10_rh {
18433 + volatile __u16 wPortChange[2];
18434 + volatile __u16 wPortStatusPrev[2];
18437 +/* HCD description */
18438 +struct crisv10_hcd {
18445 +/* Endpoint HC private data description */
18446 +struct crisv10_ep_priv {
18450 +/* Additional software state info for a USB Controller epid */
18451 +struct etrax_epid {
18452 + __u8 inuse; /* !0 = setup in Etrax and used for a endpoint */
18453 + __u8 disabled; /* !0 = Temporarly disabled to avoid resubmission */
18454 + __u8 type; /* Setup as: PIPE_BULK, PIPE_CONTROL ... */
18455 + __u8 out_traffic; /* !0 = This epid is for out traffic */
18458 +/* Struct to hold information of scheduled later URB completion */
18459 +struct urb_later_data {
18460 + struct work_struct ws;
18461 + struct usb_hcd *hcd;
18472 +} crisv10_urb_state_t;
18475 +struct crisv10_urb_priv {
18476 + /* Sequence number for this URB. Every new submited URB gets this from
18477 + a incrementing counter. Used when a URB is scheduled for later finish to
18478 + be sure that the intended URB hasn't already been completed (device
18479 + drivers has a tendency to reuse URBs once they are completed, causing us
18480 + to not be able to single old ones out only based on the URB pointer.) */
18483 + /* The first_sb field is used for freeing all SB descriptors belonging
18484 + to an urb. The corresponding ep descriptor's sub pointer cannot be
18485 + used for this since the DMA advances the sub pointer as it processes
18487 + struct USB_SB_Desc *first_sb;
18489 + /* The last_sb field referes to the last SB descriptor that belongs to
18490 + this urb. This is important to know so we can free the SB descriptors
18491 + that ranges between first_sb and last_sb. */
18492 + struct USB_SB_Desc *last_sb;
18494 + /* The rx_offset field is used in ctrl and bulk traffic to keep track
18495 + of the offset in the urb's transfer_buffer where incoming data should be
18499 + /* Counter used in isochronous transfers to keep track of the
18500 + number of packets received/transmitted. */
18501 + __u32 isoc_packet_counter;
18503 + /* Flag that marks if this Isoc Out URB has finished it's transfer. Used
18504 + because several URBs can be finished before list is processed */
18505 + __u8 isoc_out_done;
18507 + /* This field is used to pass information about the urb's current state
18508 + between the various interrupt handlers (thus marked volatile). */
18509 + volatile crisv10_urb_state_t urb_state;
18511 + /* In Ctrl transfers consist of (at least) 3 packets: SETUP, IN and ZOUT.
18512 + When DMA8 sub-channel 2 has processed the SB list for this sequence we
18513 + get a interrupt. We also get a interrupt for In transfers and which
18514 + one of these interrupts that comes first depends of data size and device.
18515 + To be sure that we have got both interrupts before we complete the URB
18516 + we have these to flags that shows which part that has completed.
18517 + We can then check when we get one of the interrupts that if the other has
18518 + occured it's safe for us to complete the URB, otherwise we set appropriate
18519 + flag and do the completion when we get the other interrupt. */
18520 + volatile unsigned char ctrl_zout_done;
18521 + volatile unsigned char ctrl_rx_done;
18523 + /* Connection between the submitted urb and ETRAX epid number */
18526 + /* The rx_data_list field is used for periodic traffic, to hold
18527 + received data for later processing in the the complete_urb functions,
18528 + where the data us copied to the urb's transfer_buffer. Basically, we
18529 + use this intermediate storage because we don't know when it's safe to
18530 + reuse the transfer_buffer (FIXME?). */
18531 + struct list_head rx_data_list;
18534 + /* The interval time rounded up to closest 2^N */
18537 + /* Pool of EP descriptors needed if it's a INTR transfer.
18538 + Amount of EPs in pool correspons to how many INTR that should
18539 + be inserted in TxIntrEPList (max 128, defined by MAX_INTR_INTERVAL) */
18540 + struct USB_EP_Desc* intr_ep_pool[128];
18542 + /* The mount of EPs allocated for this INTR URB */
18543 + int intr_ep_pool_length;
18545 + /* Pointer to info struct if URB is scheduled to be finished later */
18546 + struct urb_later_data* later_data;
18550 +/* This struct is for passing data from the top half to the bottom half irq
18552 +struct crisv10_irq_reg {
18553 + struct usb_hcd* hcd;
18554 + __u32 r_usb_epid_attn;
18555 + __u8 r_usb_status;
18556 + __u16 r_usb_rh_port_status_1;
18557 + __u16 r_usb_rh_port_status_2;
18558 + __u32 r_usb_irq_mask_read;
18559 + __u32 r_usb_fm_number;
18560 + struct work_struct usb_bh;
18564 +/* This struct is for passing data from the isoc top half to the isoc bottom
18566 +struct crisv10_isoc_complete_data {
18567 + struct usb_hcd *hcd;
18569 + struct work_struct usb_bh;
18572 +/* Entry item for URB lists for each endpint */
18573 +typedef struct urb_entry
18576 + struct list_head list;
18579 +/* ---------------------------------------------------------------------------
18581 + ------------------------------------------------------------------------- */
18582 +/* destination of request */
18583 +#define RH_INTERFACE 0x01
18584 +#define RH_ENDPOINT 0x02
18585 +#define RH_OTHER 0x03
18587 +#define RH_CLASS 0x20
18588 +#define RH_VENDOR 0x40
18590 +/* Requests: bRequest << 8 | bmRequestType */
18591 +#define RH_GET_STATUS 0x0080
18592 +#define RH_CLEAR_FEATURE 0x0100
18593 +#define RH_SET_FEATURE 0x0300
18594 +#define RH_SET_ADDRESS 0x0500
18595 +#define RH_GET_DESCRIPTOR 0x0680
18596 +#define RH_SET_DESCRIPTOR 0x0700
18597 +#define RH_GET_CONFIGURATION 0x0880
18598 +#define RH_SET_CONFIGURATION 0x0900
18599 +#define RH_GET_STATE 0x0280
18600 +#define RH_GET_INTERFACE 0x0A80
18601 +#define RH_SET_INTERFACE 0x0B00
18602 +#define RH_SYNC_FRAME 0x0C80
18603 +/* Our Vendor Specific Request */
18604 +#define RH_SET_EP 0x2000
18607 +/* Hub port features */
18608 +#define RH_PORT_CONNECTION 0x00
18609 +#define RH_PORT_ENABLE 0x01
18610 +#define RH_PORT_SUSPEND 0x02
18611 +#define RH_PORT_OVER_CURRENT 0x03
18612 +#define RH_PORT_RESET 0x04
18613 +#define RH_PORT_POWER 0x08
18614 +#define RH_PORT_LOW_SPEED 0x09
18615 +#define RH_C_PORT_CONNECTION 0x10
18616 +#define RH_C_PORT_ENABLE 0x11
18617 +#define RH_C_PORT_SUSPEND 0x12
18618 +#define RH_C_PORT_OVER_CURRENT 0x13
18619 +#define RH_C_PORT_RESET 0x14
18621 +/* Hub features */
18622 +#define RH_C_HUB_LOCAL_POWER 0x00
18623 +#define RH_C_HUB_OVER_CURRENT 0x01
18625 +#define RH_DEVICE_REMOTE_WAKEUP 0x00
18626 +#define RH_ENDPOINT_STALL 0x01
18628 +/* Our Vendor Specific feature */
18629 +#define RH_REMOVE_EP 0x00
18632 +#define RH_ACK 0x01
18633 +#define RH_REQ_ERR -1
18634 +#define RH_NACK 0x00
18636 +/* Field definitions for */
18638 +#define USB_IN_command__eol__BITNR 0 /* command macros */
18639 +#define USB_IN_command__eol__WIDTH 1
18640 +#define USB_IN_command__eol__no 0
18641 +#define USB_IN_command__eol__yes 1
18643 +#define USB_IN_command__intr__BITNR 3
18644 +#define USB_IN_command__intr__WIDTH 1
18645 +#define USB_IN_command__intr__no 0
18646 +#define USB_IN_command__intr__yes 1
18648 +#define USB_IN_status__eop__BITNR 1 /* status macros. */
18649 +#define USB_IN_status__eop__WIDTH 1
18650 +#define USB_IN_status__eop__no 0
18651 +#define USB_IN_status__eop__yes 1
18653 +#define USB_IN_status__eot__BITNR 5
18654 +#define USB_IN_status__eot__WIDTH 1
18655 +#define USB_IN_status__eot__no 0
18656 +#define USB_IN_status__eot__yes 1
18658 +#define USB_IN_status__error__BITNR 6
18659 +#define USB_IN_status__error__WIDTH 1
18660 +#define USB_IN_status__error__no 0
18661 +#define USB_IN_status__error__yes 1
18663 +#define USB_IN_status__nodata__BITNR 7
18664 +#define USB_IN_status__nodata__WIDTH 1
18665 +#define USB_IN_status__nodata__no 0
18666 +#define USB_IN_status__nodata__yes 1
18668 +#define USB_IN_status__epid__BITNR 8
18669 +#define USB_IN_status__epid__WIDTH 5
18671 +#define USB_EP_command__eol__BITNR 0
18672 +#define USB_EP_command__eol__WIDTH 1
18673 +#define USB_EP_command__eol__no 0
18674 +#define USB_EP_command__eol__yes 1
18676 +#define USB_EP_command__eof__BITNR 1
18677 +#define USB_EP_command__eof__WIDTH 1
18678 +#define USB_EP_command__eof__no 0
18679 +#define USB_EP_command__eof__yes 1
18681 +#define USB_EP_command__intr__BITNR 3
18682 +#define USB_EP_command__intr__WIDTH 1
18683 +#define USB_EP_command__intr__no 0
18684 +#define USB_EP_command__intr__yes 1
18686 +#define USB_EP_command__enable__BITNR 4
18687 +#define USB_EP_command__enable__WIDTH 1
18688 +#define USB_EP_command__enable__no 0
18689 +#define USB_EP_command__enable__yes 1
18691 +#define USB_EP_command__hw_valid__BITNR 5
18692 +#define USB_EP_command__hw_valid__WIDTH 1
18693 +#define USB_EP_command__hw_valid__no 0
18694 +#define USB_EP_command__hw_valid__yes 1
18696 +#define USB_EP_command__epid__BITNR 8
18697 +#define USB_EP_command__epid__WIDTH 5
18699 +#define USB_SB_command__eol__BITNR 0 /* command macros. */
18700 +#define USB_SB_command__eol__WIDTH 1
18701 +#define USB_SB_command__eol__no 0
18702 +#define USB_SB_command__eol__yes 1
18704 +#define USB_SB_command__eot__BITNR 1
18705 +#define USB_SB_command__eot__WIDTH 1
18706 +#define USB_SB_command__eot__no 0
18707 +#define USB_SB_command__eot__yes 1
18709 +#define USB_SB_command__intr__BITNR 3
18710 +#define USB_SB_command__intr__WIDTH 1
18711 +#define USB_SB_command__intr__no 0
18712 +#define USB_SB_command__intr__yes 1
18714 +#define USB_SB_command__tt__BITNR 4
18715 +#define USB_SB_command__tt__WIDTH 2
18716 +#define USB_SB_command__tt__zout 0
18717 +#define USB_SB_command__tt__in 1
18718 +#define USB_SB_command__tt__out 2
18719 +#define USB_SB_command__tt__setup 3
18722 +#define USB_SB_command__rem__BITNR 8
18723 +#define USB_SB_command__rem__WIDTH 6
18725 +#define USB_SB_command__full__BITNR 6
18726 +#define USB_SB_command__full__WIDTH 1
18727 +#define USB_SB_command__full__no 0
18728 +#define USB_SB_command__full__yes 1
18731 diff -urN linux-2.6.19.2.orig/drivers/net/cris/Makefile linux-2.6.19.2.dev/drivers/net/cris/Makefile
18732 --- linux-2.6.19.2.orig/drivers/net/cris/Makefile 2007-01-10 20:10:37.000000000 +0100
18733 +++ linux-2.6.19.2.dev/drivers/net/cris/Makefile 2005-01-04 13:09:12.000000000 +0100
18735 obj-$(CONFIG_ETRAX_ARCH_V10) += eth_v10.o
18736 +obj-$(CONFIG_ETRAX_ARCH_V32) += eth_v32.o
18737 diff -urN linux-2.6.19.2.orig/drivers/net/cris/eth_v10.c linux-2.6.19.2.dev/drivers/net/cris/eth_v10.c
18738 --- linux-2.6.19.2.orig/drivers/net/cris/eth_v10.c 2007-01-10 20:10:37.000000000 +0100
18739 +++ linux-2.6.19.2.dev/drivers/net/cris/eth_v10.c 2007-01-15 16:35:48.000000000 +0100
18741 -/* $Id: ethernet.c,v 1.31 2004/10/18 14:49:03 starvik Exp $
18743 - * e100net.c: A network driver for the ETRAX 100LX network controller.
18745 + * Driver for the ETRAX 100LX network controller.
18747 - * Copyright (c) 1998-2002 Axis Communications AB.
18748 + * Copyright (c) 1998-2006 Axis Communications AB.
18750 * The outline of this driver comes from skeleton.c.
18752 - * $Log: ethernet.c,v $
18753 - * Revision 1.31 2004/10/18 14:49:03 starvik
18754 - * Use RX interrupt as random source
18756 - * Revision 1.30 2004/09/29 10:44:04 starvik
18757 - * Enabed MAC-address output again
18759 - * Revision 1.29 2004/08/24 07:14:05 starvik
18760 - * Make use of generic MDIO interface and constants.
18762 - * Revision 1.28 2004/08/20 09:37:11 starvik
18763 - * Added support for Intel LXT972A. Creds to Randy Scarborough.
18765 - * Revision 1.27 2004/08/16 12:37:22 starvik
18766 - * Merge of Linux 2.6.8
18768 - * Revision 1.25 2004/06/21 10:29:57 starvik
18769 - * Merge of Linux 2.6.7
18771 - * Revision 1.23 2004/06/09 05:29:22 starvik
18772 - * Avoid any race where R_DMA_CH1_FIRST is NULL (may trigger cache bug).
18774 - * Revision 1.22 2004/05/14 07:58:03 starvik
18775 - * Merge of changes from 2.4
18777 - * Revision 1.20 2004/03/11 11:38:40 starvik
18778 - * Merge of Linux 2.6.4
18780 - * Revision 1.18 2003/12/03 13:45:46 starvik
18781 - * Use hardware pad for short packets to prevent information leakage.
18783 - * Revision 1.17 2003/07/04 08:27:37 starvik
18784 - * Merge of Linux 2.5.74
18786 - * Revision 1.16 2003/04/24 08:28:22 starvik
18787 - * New LED behaviour: LED off when no link
18789 - * Revision 1.15 2003/04/09 05:20:47 starvik
18790 - * Merge of Linux 2.5.67
18792 - * Revision 1.13 2003/03/06 16:11:01 henriken
18793 - * Off by one error in group address register setting.
18795 - * Revision 1.12 2003/02/27 17:24:19 starvik
18796 - * Corrected Rev to Revision
18798 - * Revision 1.11 2003/01/24 09:53:21 starvik
18799 - * Oops. Initialize GA to 0, not to 1
18801 - * Revision 1.10 2003/01/24 09:50:55 starvik
18802 - * Initialize GA_0 and GA_1 to 0 to avoid matching of unwanted packets
18804 - * Revision 1.9 2002/12/13 07:40:58 starvik
18805 - * Added basic ethtool interface
18806 - * Handled out of memory when allocating new buffers
18808 - * Revision 1.8 2002/12/11 13:13:57 starvik
18809 - * Added arch/ to v10 specific includes
18810 - * Added fix from Linux 2.4 in serial.c (flush_to_flip_buffer)
18812 - * Revision 1.7 2002/11/26 09:41:42 starvik
18813 - * Added e100_set_config (standard interface to set media type)
18814 - * Added protection against preemptive scheduling
18815 - * Added standard MII ioctls
18817 - * Revision 1.6 2002/11/21 07:18:18 starvik
18818 - * Timers must be initialized in 2.5.48
18820 - * Revision 1.5 2002/11/20 11:56:11 starvik
18821 - * Merge of Linux 2.5.48
18823 - * Revision 1.4 2002/11/18 07:26:46 starvik
18824 - * Linux 2.5 port of latest Linux 2.4 ethernet driver
18826 - * Revision 1.33 2002/10/02 20:16:17 hp
18827 - * SETF, SETS: Use underscored IO_x_ macros rather than incorrect token concatenation
18829 - * Revision 1.32 2002/09/16 06:05:58 starvik
18830 - * Align memory returned by dev_alloc_skb
18831 - * Moved handling of sent packets to interrupt to avoid reference counting problem
18833 - * Revision 1.31 2002/09/10 13:28:23 larsv
18834 - * Return -EINVAL for unknown ioctls to avoid confusing tools that tests
18835 - * for supported functionality by issuing special ioctls, i.e. wireless
18838 - * Revision 1.30 2002/05/07 18:50:08 johana
18839 - * Correct spelling in comments.
18841 - * Revision 1.29 2002/05/06 05:38:49 starvik
18842 - * Performance improvements:
18843 - * Large packets are not copied (breakpoint set to 256 bytes)
18844 - * The cache bug workaround is delayed until half of the receive list
18846 - * Added transmit list
18847 - * Transmit interrupts are only enabled when transmit queue is full
18849 - * Revision 1.28.2.1 2002/04/30 08:15:51 starvik
18850 - * Performance improvements:
18851 - * Large packets are not copied (breakpoint set to 256 bytes)
18852 - * The cache bug workaround is delayed until half of the receive list
18854 - * Added transmit list
18855 - * Transmit interrupts are only enabled when transmit queue is full
18857 - * Revision 1.28 2002/04/22 11:47:21 johana
18858 - * Fix according to 2.4.19-pre7. time_after/time_before and
18859 - * missing end of comment.
18860 - * The patch has a typo for ethernet.c in e100_clear_network_leds(),
18861 - * that is fixed here.
18863 - * Revision 1.27 2002/04/12 11:55:11 bjornw
18866 - * Revision 1.26 2002/03/15 17:11:02 bjornw
18867 - * Use prepare_rx_descriptor after the CPU has touched the receiving descs
18869 - * Revision 1.25 2002/03/08 13:07:53 bjornw
18870 - * Unnecessary spinlock removed
18872 - * Revision 1.24 2002/02/20 12:57:43 fredriks
18873 - * Replaced MIN() with min().
18875 - * Revision 1.23 2002/02/20 10:58:14 fredriks
18876 - * Strip the Ethernet checksum (4 bytes) before forwarding a frame to upper layers.
18878 - * Revision 1.22 2002/01/30 07:48:22 matsfg
18879 - * Initiate R_NETWORK_TR_CTRL
18881 - * Revision 1.21 2001/11/23 11:54:49 starvik
18882 - * Added IFF_PROMISC and IFF_ALLMULTI handling in set_multicast_list
18883 - * Removed compiler warnings
18885 - * Revision 1.20 2001/11/12 19:26:00 pkj
18886 - * * Corrected e100_negotiate() to not assign half to current_duplex when
18887 - * it was supposed to compare them...
18888 - * * Cleaned up failure handling in e100_open().
18889 - * * Fixed compiler warnings.
18891 - * Revision 1.19 2001/11/09 07:43:09 starvik
18892 - * Added full duplex support
18893 - * Added ioctl to set speed and duplex
18894 - * Clear LED timer only runs when LED is lit
18896 - * Revision 1.18 2001/10/03 14:40:43 jonashg
18897 - * Update rx_bytes counter.
18899 - * Revision 1.17 2001/06/11 12:43:46 olof
18900 - * Modified defines for network LED behavior
18902 - * Revision 1.16 2001/05/30 06:12:46 markusl
18903 - * TxDesc.next should not be set to NULL
18905 - * Revision 1.15 2001/05/29 10:27:04 markusl
18906 - * Updated after review remarks:
18907 - * +Use IO_EXTRACT
18908 - * +Handle underrun
18910 - * Revision 1.14 2001/05/29 09:20:14 jonashg
18911 - * Use driver name on printk output so one can tell which driver that complains.
18913 - * Revision 1.13 2001/05/09 12:35:59 johana
18914 - * Use DMA_NBR and IRQ_NBR defines from dma.h and irq.h
18916 - * Revision 1.12 2001/04/05 11:43:11 tobiasa
18917 - * Check dev before panic.
18919 - * Revision 1.11 2001/04/04 11:21:05 markusl
18920 - * Updated according to review remarks
18922 - * Revision 1.10 2001/03/26 16:03:06 bjornw
18923 - * Needs linux/config.h
18925 - * Revision 1.9 2001/03/19 14:47:48 pkj
18926 - * * Make sure there is always a pause after the network LEDs are
18927 - * changed so they will not look constantly lit during heavy traffic.
18928 - * * Always use HZ when setting times relative to jiffies.
18929 - * * Use LED_NETWORK_SET() when setting the network LEDs.
18931 - * Revision 1.8 2001/02/27 13:52:48 bjornw
18932 - * malloc.h -> slab.h
18934 - * Revision 1.7 2001/02/23 13:46:38 bjornw
18935 - * Spellling check
18937 - * Revision 1.6 2001/01/26 15:21:04 starvik
18938 - * Don't disable interrupts while reading MDIO registers (MDIO is slow)
18939 - * Corrected promiscuous mode
18940 - * Improved deallocation of IRQs ("ifconfig eth0 down" now works)
18942 - * Revision 1.5 2000/11/29 17:22:22 bjornw
18943 - * Get rid of the udword types legacy stuff
18945 - * Revision 1.4 2000/11/22 16:36:09 bjornw
18946 - * Please marketing by using the correct case when spelling Etrax.
18948 - * Revision 1.3 2000/11/21 16:43:04 bjornw
18949 - * Minor short->int change
18951 - * Revision 1.2 2000/11/08 14:27:57 bjornw
18954 - * Revision 1.1 2000/11/06 13:56:00 bjornw
18955 - * Verbatim copy of the 1.24 version of e100net.c from elinux
18957 - * Revision 1.24 2000/10/04 15:55:23 bjornw
18958 - * * Use virt_to_phys etc. for DMA addresses
18959 - * * Removed bogus CHECKSUM_UNNECESSARY
18966 #include <asm/bitops.h>
18967 #include <asm/ethernet.h>
18968 #include <asm/cache.h>
18969 +#include <asm/arch/io_interface_mux.h>
18974 * by this lock as well.
18978 + spinlock_t led_lock; /* Protect LED state */
18979 + spinlock_t transceiver_lock; /* Protect transceiver state. */
18982 typedef struct etrax_eth_descr
18984 void (*check_duplex)(struct net_device* dev);
18987 -struct transceiver_ops* transceiver;
18989 /* Duplex settings */
18994 /* Dma descriptors etc. */
18996 -#define MAX_MEDIA_DATA_SIZE 1518
18997 +#define MAX_MEDIA_DATA_SIZE 1522
18999 #define MIN_PACKET_LEN 46
19000 #define ETHER_HEAD_LEN 14
19001 @@ -332,9 +123,9 @@
19002 #define MDIO_TDK_DIAGNOSTIC_DPLX 0x800
19004 /*Intel LXT972A specific*/
19005 -#define MDIO_INT_STATUS_REG_2 0x0011
19006 -#define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 )
19007 -#define MDIO_INT_SPEED ( 1 << 14 )
19008 +#define MDIO_INT_STATUS_REG_2 0x0011
19009 +#define MDIO_INT_FULL_DUPLEX_IND (1 << 9)
19010 +#define MDIO_INT_SPEED (1 << 14)
19012 /* Network flash constants */
19013 #define NET_FLASH_TIME (HZ/50) /* 20 ms */
19014 @@ -345,8 +136,8 @@
19015 #define NO_NETWORK_ACTIVITY 0
19016 #define NETWORK_ACTIVITY 1
19018 -#define NBR_OF_RX_DESC 64
19019 -#define NBR_OF_TX_DESC 256
19020 +#define NBR_OF_RX_DESC 32
19021 +#define NBR_OF_TX_DESC 16
19023 /* Large packets are sent directly to upper layers while small packets are */
19024 /* copied (to reduce memory waste). The following constant decides the breakpoint */
19025 @@ -368,7 +159,6 @@
19026 static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
19028 static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
19029 -static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */
19031 static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
19033 @@ -378,7 +168,6 @@
19034 static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
19036 static unsigned int network_rec_config_shadow = 0;
19037 -static unsigned int mdio_phy_addr; /* Transciever address */
19039 static unsigned int network_tr_ctrl_shadow = 0;
19041 @@ -412,7 +201,7 @@
19042 static void e100_tx_timeout(struct net_device *dev);
19043 static struct net_device_stats *e100_get_stats(struct net_device *dev);
19044 static void set_multicast_list(struct net_device *dev);
19045 -static void e100_hardware_send_packet(char *buf, int length);
19046 +static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
19047 static void update_rx_stats(struct net_device_stats *);
19048 static void update_tx_stats(struct net_device_stats *);
19049 static int e100_probe_transceiver(struct net_device* dev);
19050 @@ -435,7 +224,10 @@
19051 static void e100_set_network_leds(int active);
19053 static const struct ethtool_ops e100_ethtool_ops;
19055 +#if defined(CONFIG_ETRAX_NO_PHY)
19056 +static void dummy_check_speed(struct net_device* dev);
19057 +static void dummy_check_duplex(struct net_device* dev);
19059 static void broadcom_check_speed(struct net_device* dev);
19060 static void broadcom_check_duplex(struct net_device* dev);
19061 static void tdk_check_speed(struct net_device* dev);
19062 @@ -444,16 +236,29 @@
19063 static void intel_check_duplex(struct net_device* dev);
19064 static void generic_check_speed(struct net_device* dev);
19065 static void generic_check_duplex(struct net_device* dev);
19067 +#ifdef CONFIG_NET_POLL_CONTROLLER
19068 +static void e100_netpoll(struct net_device* dev);
19071 +static int autoneg_normal = 1;
19073 struct transceiver_ops transceivers[] =
19075 +#if defined(CONFIG_ETRAX_NO_PHY)
19076 + {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */
19078 {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
19079 {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
19080 {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
19081 - {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
19082 + {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
19083 {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
19087 +struct transceiver_ops* transceiver = &transceivers[0];
19088 +static unsigned int mdio_phy_addr = 0; /* PHY address on MDIO bus */
19090 #define tx_done(dev) (*R_DMA_CH0_CMD == 0)
19093 @@ -468,18 +273,26 @@
19094 etrax_ethernet_init(void)
19096 struct net_device *dev;
19097 - struct net_local* np;
19098 + struct net_local* np;
19102 - "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n");
19104 + "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2006 Axis Communications AB\n");
19106 + if (cris_request_io_interface(if_eth, cardname)) {
19107 + printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
19111 dev = alloc_etherdev(sizeof(struct net_local));
19117 + np = netdev_priv(dev);
19119 + /* we do our own locking */
19120 + dev->features |= NETIF_F_LLTX;
19122 dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
19124 /* now setup our etrax specific stuff */
19125 @@ -495,18 +308,26 @@
19126 dev->get_stats = e100_get_stats;
19127 dev->set_multicast_list = set_multicast_list;
19128 dev->set_mac_address = e100_set_mac_address;
19129 - dev->ethtool_ops = &e100_ethtool_ops;
19130 + dev->ethtool_ops = &e100_ethtool_ops;
19131 dev->do_ioctl = e100_ioctl;
19132 - dev->set_config = e100_set_config;
19133 + dev->set_config = e100_set_config;
19134 dev->tx_timeout = e100_tx_timeout;
19135 +#ifdef CONFIG_NET_POLL_CONTROLLER
19136 + dev->poll_controller = e100_netpoll;
19139 + spin_lock_init(&np->lock);
19140 + spin_lock_init(&np->led_lock);
19141 + spin_lock_init(&np->transceiver_lock);
19143 /* Initialise the list of Etrax DMA-descriptors */
19145 /* Initialise receive descriptors */
19147 for (i = 0; i < NBR_OF_RX_DESC; i++) {
19148 - /* Allocate two extra cachelines to make sure that buffer used by DMA
19149 - * does not share cacheline with any other data (to avoid cache bug)
19150 + /* Allocate two extra cachelines to make sure that buffer used
19151 + * by DMA does not share cacheline with any other data (to
19152 + * avoid cache bug)
19154 RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
19155 if (!RxDescList[i].skb)
19156 @@ -517,6 +338,7 @@
19157 RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data));
19158 RxDescList[i].descr.status = 0;
19159 RxDescList[i].descr.hw_len = 0;
19161 prepare_rx_descriptor(&RxDescList[i].descr);
19164 @@ -542,7 +364,6 @@
19166 myNextRxDesc = &RxDescList[0];
19167 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
19168 - myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
19169 myFirstTxDesc = &TxDescList[0];
19170 myNextTxDesc = &TxDescList[0];
19171 myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
19172 @@ -563,18 +384,19 @@
19173 current_speed = 10;
19174 current_speed_selection = 0; /* Auto */
19175 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
19176 - duplex_timer.data = (unsigned long)dev;
19177 + speed_timer.data = (unsigned long)dev;
19178 speed_timer.function = e100_check_speed;
19180 clear_led_timer.function = e100_clear_network_leds;
19181 + clear_led_timer.data = (unsigned long)dev;
19184 current_duplex = autoneg;
19185 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
19186 - duplex_timer.data = (unsigned long)dev;
19187 + duplex_timer.data = (unsigned long)dev;
19188 duplex_timer.function = e100_check_duplex;
19190 - /* Initialize mii interface */
19191 + /* Initialize mii interface */
19192 np->mii_if.phy_id = mdio_phy_addr;
19193 np->mii_if.phy_id_mask = 0x1f;
19194 np->mii_if.reg_num_mask = 0x1f;
19195 @@ -586,6 +408,9 @@
19196 /* unwanted addresses are matched */
19197 *R_NETWORK_GA_0 = 0x00000000;
19198 *R_NETWORK_GA_1 = 0x00000000;
19200 + /* Initialize next time the led can flash */
19201 + led_next_time = jiffies;
19205 @@ -596,7 +421,7 @@
19207 e100_set_mac_address(struct net_device *dev, void *p)
19209 - struct net_local *np = (struct net_local *)dev->priv;
19210 + struct net_local *np = netdev_priv(dev);
19211 struct sockaddr *addr = p;
19214 @@ -680,17 +505,36 @@
19215 /* allocate the irq corresponding to the transmitting DMA */
19217 if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0,
19218 - cardname, (void *)dev)) {
19219 + cardname, (void *)dev)) {
19223 /* allocate the irq corresponding to the network errors etc */
19225 if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0,
19226 - cardname, (void *)dev)) {
19227 + cardname, (void *)dev)) {
19232 + * Always allocate the DMA channels after the IRQ,
19233 + * and clean up on failure.
19236 + if (cris_request_dma(NETWORK_TX_DMA_NBR,
19238 + DMA_VERBOSE_ON_ERROR,
19240 + goto grace_exit3;
19243 + if (cris_request_dma(NETWORK_RX_DMA_NBR,
19245 + DMA_VERBOSE_ON_ERROR,
19247 + goto grace_exit4;
19250 /* give the HW an idea of what MAC address we want */
19252 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
19253 @@ -705,6 +549,7 @@
19255 *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
19257 + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
19258 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
19259 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
19260 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
19261 @@ -724,8 +569,7 @@
19262 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
19263 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
19265 - save_flags(flags);
19267 + local_irq_save(flags);
19269 /* enable the irq's for ethernet DMA */
19271 @@ -757,12 +601,13 @@
19273 *R_DMA_CH0_FIRST = 0;
19274 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
19275 + netif_start_queue(dev);
19277 - restore_flags(flags);
19278 + local_irq_restore(flags);
19280 /* Probe for transceiver */
19281 if (e100_probe_transceiver(dev))
19282 - goto grace_exit3;
19283 + goto grace_exit5;
19285 /* Start duplex/speed timers */
19286 add_timer(&speed_timer);
19287 @@ -771,10 +616,14 @@
19288 /* We are now ready to accept transmit requeusts from
19289 * the queueing layer of the networking.
19291 - netif_start_queue(dev);
19292 + netif_carrier_on(dev);
19297 + cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
19299 + cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
19301 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
19303 @@ -785,7 +634,13 @@
19308 +#if defined(CONFIG_ETRAX_NO_PHY)
19310 +dummy_check_speed(struct net_device* dev)
19312 + current_speed = 100;
19316 generic_check_speed(struct net_device* dev)
19318 @@ -821,15 +676,18 @@
19319 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
19320 current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
19325 e100_check_speed(unsigned long priv)
19327 struct net_device* dev = (struct net_device*)priv;
19328 + struct net_local *np = netdev_priv(dev);
19329 static int led_initiated = 0;
19330 unsigned long data;
19331 int old_speed = current_speed;
19333 + spin_lock(&np->transceiver_lock);
19335 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR);
19336 if (!(data & BMSR_LSTATUS)) {
19338 @@ -837,14 +695,22 @@
19339 transceiver->check_speed(dev);
19342 + spin_lock(&np->led_lock);
19343 if ((old_speed != current_speed) || !led_initiated) {
19345 e100_set_network_leds(NO_NETWORK_ACTIVITY);
19346 + if (current_speed)
19347 + netif_carrier_on(dev);
19349 + netif_carrier_off(dev);
19351 + spin_unlock(&np->led_lock);
19353 /* Reinitialize the timer. */
19354 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
19355 add_timer(&speed_timer);
19357 + spin_unlock(&np->transceiver_lock);
19361 @@ -857,7 +723,7 @@
19362 ADVERTISE_10HALF | ADVERTISE_10FULL);
19364 switch (current_speed_selection) {
19367 if (current_duplex == full)
19368 data |= ADVERTISE_10FULL;
19369 else if (current_duplex == half)
19370 @@ -866,7 +732,7 @@
19371 data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
19376 if (current_duplex == full)
19377 data |= ADVERTISE_100FULL;
19378 else if (current_duplex == half)
19379 @@ -875,45 +741,54 @@
19380 data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
19383 - case 0 : /* Auto */
19384 + case 0: /* Auto */
19385 if (current_duplex == full)
19386 data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
19387 else if (current_duplex == half)
19388 data |= ADVERTISE_100HALF | ADVERTISE_10HALF;
19390 data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
19391 - ADVERTISE_100HALF | ADVERTISE_100FULL;
19392 + ADVERTISE_100HALF | ADVERTISE_100FULL;
19395 - default : /* assume autoneg speed and duplex */
19396 + default: /* assume autoneg speed and duplex */
19397 data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
19398 - ADVERTISE_100HALF | ADVERTISE_100FULL;
19399 + ADVERTISE_100HALF | ADVERTISE_100FULL;
19403 e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data);
19405 /* Renegotiate with link partner */
19406 - data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
19407 - data |= BMCR_ANENABLE | BMCR_ANRESTART;
19409 + if (autoneg_normal) {
19410 + data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
19411 + data |= BMCR_ANENABLE | BMCR_ANRESTART;
19413 e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data);
19417 e100_set_speed(struct net_device* dev, unsigned long speed)
19419 + struct net_local *np = netdev_priv(dev);
19421 + spin_lock(&np->transceiver_lock);
19422 if (speed != current_speed_selection) {
19423 current_speed_selection = speed;
19424 e100_negotiate(dev);
19426 + spin_unlock(&np->transceiver_lock);
19430 e100_check_duplex(unsigned long priv)
19432 struct net_device *dev = (struct net_device *)priv;
19433 - struct net_local *np = (struct net_local *)dev->priv;
19434 - int old_duplex = full_duplex;
19435 + struct net_local *np = netdev_priv(dev);
19438 + spin_lock(&np->transceiver_lock);
19439 + old_duplex = full_duplex;
19440 transceiver->check_duplex(dev);
19441 if (old_duplex != full_duplex) {
19442 /* Duplex changed */
19443 @@ -925,12 +800,20 @@
19444 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
19445 add_timer(&duplex_timer);
19446 np->mii_if.full_duplex = full_duplex;
19447 + spin_unlock(&np->transceiver_lock);
19450 +#if defined(CONFIG_ETRAX_NO_PHY)
19452 +dummy_check_duplex(struct net_device* dev)
19458 generic_check_duplex(struct net_device* dev)
19460 unsigned long data;
19462 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
19463 if ((data & ADVERTISE_10FULL) ||
19464 (data & ADVERTISE_100FULL))
19465 @@ -943,6 +826,7 @@
19466 tdk_check_duplex(struct net_device* dev)
19468 unsigned long data;
19470 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG);
19471 full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
19473 @@ -951,6 +835,7 @@
19474 broadcom_check_duplex(struct net_device* dev)
19476 unsigned long data;
19478 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG);
19479 full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
19481 @@ -959,26 +844,35 @@
19482 intel_check_duplex(struct net_device* dev)
19484 unsigned long data;
19486 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
19487 full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
19492 e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
19494 + struct net_local *np = netdev_priv(dev);
19496 + spin_lock(&np->transceiver_lock);
19497 if (new_duplex != current_duplex) {
19498 current_duplex = new_duplex;
19499 e100_negotiate(dev);
19501 + spin_unlock(&np->transceiver_lock);
19505 e100_probe_transceiver(struct net_device* dev)
19507 +#if !defined(CONFIG_ETRAX_NO_PHY)
19508 unsigned int phyid_high;
19509 unsigned int phyid_low;
19511 struct transceiver_ops* ops = NULL;
19512 + struct net_local *np = netdev_priv(dev);
19514 + spin_lock(&np->transceiver_lock);
19516 /* Probe MDIO physical address */
19517 for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) {
19518 @@ -986,7 +880,7 @@
19521 if (mdio_phy_addr == 32)
19525 /* Get manufacturer */
19526 phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1);
19527 @@ -999,6 +893,8 @@
19531 + spin_unlock(&np->transceiver_lock);
19536 @@ -1006,7 +902,7 @@
19537 e100_get_mdio_reg(struct net_device *dev, int phy_id, int location)
19539 unsigned short cmd; /* Data to be sent on MDIO port */
19540 - int data; /* Data read from MDIO */
19541 + int data; /* Data read from MDIO */
19544 /* Start of frame, OP Code, Physical Address, Register Address */
19545 @@ -1082,6 +978,7 @@
19546 e100_receive_mdio_bit()
19550 *R_NETWORK_MGM_CTRL = 0;
19551 bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT);
19553 @@ -1117,7 +1014,7 @@
19555 e100_tx_timeout(struct net_device *dev)
19557 - struct net_local *np = (struct net_local *)dev->priv;
19558 + struct net_local *np = netdev_priv(dev);
19559 unsigned long flags;
19561 spin_lock_irqsave(&np->lock, flags);
19562 @@ -1139,8 +1036,7 @@
19563 e100_reset_transceiver(dev);
19565 /* and get rid of the packets that never got an interrupt */
19566 - while (myFirstTxDesc != myNextTxDesc)
19568 + while (myFirstTxDesc != myNextTxDesc) {
19569 dev_kfree_skb(myFirstTxDesc->skb);
19570 myFirstTxDesc->skb = 0;
19571 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
19572 @@ -1166,7 +1062,7 @@
19574 e100_send_packet(struct sk_buff *skb, struct net_device *dev)
19576 - struct net_local *np = (struct net_local *)dev->priv;
19577 + struct net_local *np = netdev_priv(dev);
19578 unsigned char *buf = skb->data;
19579 unsigned long flags;
19581 @@ -1179,7 +1075,7 @@
19583 dev->trans_start = jiffies;
19585 - e100_hardware_send_packet(buf, skb->len);
19586 + e100_hardware_send_packet(np, buf, skb->len);
19588 myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
19590 @@ -1202,13 +1098,15 @@
19591 e100rxtx_interrupt(int irq, void *dev_id)
19593 struct net_device *dev = (struct net_device *)dev_id;
19594 - struct net_local *np = (struct net_local *)dev->priv;
19595 - unsigned long irqbits = *R_IRQ_MASK2_RD;
19596 + struct net_local *np = netdev_priv(dev);
19597 + unsigned long irqbits;
19599 - /* Disable RX/TX IRQs to avoid reentrancy */
19600 - *R_IRQ_MASK2_CLR =
19601 - IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
19602 - IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
19604 + * Note that both rx and tx interrupts are blocked at this point,
19605 + * regardless of which got us here.
19608 + irqbits = *R_IRQ_MASK2_RD;
19610 /* Handle received packets */
19611 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
19612 @@ -1224,7 +1122,7 @@
19613 * allocate a new buffer to put a packet in.
19616 - ((struct net_local *)dev->priv)->stats.rx_packets++;
19617 + np->stats.rx_packets++;
19618 /* restart/continue on the channel, for safety */
19619 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
19620 /* clear dma channel 1 eop/descr irq bits */
19621 @@ -1239,8 +1137,7 @@
19623 /* Report any packets that have been sent */
19624 while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) &&
19625 - myFirstTxDesc != myNextTxDesc)
19627 + (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
19628 np->stats.tx_bytes += myFirstTxDesc->skb->len;
19629 np->stats.tx_packets++;
19631 @@ -1249,19 +1146,15 @@
19632 dev_kfree_skb_irq(myFirstTxDesc->skb);
19633 myFirstTxDesc->skb = 0;
19634 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
19635 + /* Wake up queue. */
19636 + netif_wake_queue(dev);
19639 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
19640 - /* acknowledge the eop interrupt and wake up queue */
19641 + /* acknowledge the eop interrupt. */
19642 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
19643 - netif_wake_queue(dev);
19646 - /* Enable RX/TX IRQs again */
19647 - *R_IRQ_MASK2_SET =
19648 - IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
19649 - IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
19651 return IRQ_HANDLED;
19654 @@ -1269,7 +1162,7 @@
19655 e100nw_interrupt(int irq, void *dev_id)
19657 struct net_device *dev = (struct net_device *)dev_id;
19658 - struct net_local *np = (struct net_local *)dev->priv;
19659 + struct net_local *np = netdev_priv(dev);
19660 unsigned long irqbits = *R_IRQ_MASK0_RD;
19662 /* check for underrun irq */
19663 @@ -1291,7 +1184,6 @@
19664 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
19665 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
19666 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
19667 - *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr);
19668 np->stats.tx_errors++;
19669 D(printk("ethernet excessive collisions!\n"));
19671 @@ -1304,12 +1196,13 @@
19673 struct sk_buff *skb;
19675 - struct net_local *np = (struct net_local *)dev->priv;
19676 + struct net_local *np = netdev_priv(dev);
19677 unsigned char *skb_data_ptr;
19682 + etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */
19683 + spin_lock(&np->led_lock);
19684 if (!led_active && time_after(jiffies, led_next_time)) {
19685 /* light the network leds depending on the current speed. */
19686 e100_set_network_leds(NETWORK_ACTIVITY);
19687 @@ -1319,9 +1212,10 @@
19689 mod_timer(&clear_led_timer, jiffies + HZ/10);
19691 + spin_unlock(&np->led_lock);
19693 length = myNextRxDesc->descr.hw_len - 4;
19694 - ((struct net_local *)dev->priv)->stats.rx_bytes += length;
19695 + np->stats.rx_bytes += length;
19698 printk("Got a packet of length %d:\n", length);
19699 @@ -1341,7 +1235,7 @@
19701 np->stats.rx_errors++;
19702 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
19704 + goto update_nextrxdesc;
19707 skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
19708 @@ -1358,15 +1252,15 @@
19710 /* Large packet, send directly to upper layers and allocate new
19711 * memory (aligned to cache line boundary to avoid bug).
19712 - * Before sending the skb to upper layers we must make sure that
19713 - * skb->data points to the aligned start of the packet.
19714 + * Before sending the skb to upper layers we must make sure
19715 + * that skb->data points to the aligned start of the packet.
19718 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
19720 np->stats.rx_errors++;
19721 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
19723 + goto update_nextrxdesc;
19725 skb = myNextRxDesc->skb;
19726 align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
19727 @@ -1382,9 +1276,10 @@
19728 /* Send the packet to the upper layers */
19731 + update_nextrxdesc:
19732 /* Prepare for next packet */
19733 myNextRxDesc->descr.status = 0;
19734 - myPrevRxDesc = myNextRxDesc;
19735 + prevRxDesc = myNextRxDesc;
19736 myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
19739 @@ -1392,9 +1287,9 @@
19740 /* Check if descriptors should be returned */
19741 if (rx_queue_len == RX_QUEUE_THRESHOLD) {
19742 flush_etrax_cache();
19743 - myPrevRxDesc->descr.ctrl |= d_eol;
19744 + prevRxDesc->descr.ctrl |= d_eol;
19745 myLastRxDesc->descr.ctrl &= ~d_eol;
19746 - myLastRxDesc = myPrevRxDesc;
19747 + myLastRxDesc = prevRxDesc;
19751 @@ -1403,7 +1298,7 @@
19753 e100_close(struct net_device *dev)
19755 - struct net_local *np = (struct net_local *)dev->priv;
19756 + struct net_local *np = netdev_priv(dev);
19758 printk(KERN_INFO "Closing %s.\n", dev->name);
19760 @@ -1431,6 +1326,9 @@
19761 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
19762 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
19764 + cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
19765 + cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
19767 /* Update the statistics here. */
19769 update_rx_stats(&np->stats);
19770 @@ -1448,46 +1346,56 @@
19772 struct mii_ioctl_data *data = if_mii(ifr);
19773 struct net_local *np = netdev_priv(dev);
19777 spin_lock(&np->lock); /* Preempt protection */
19779 - case SIOCGMIIPHY: /* Get PHY address */
19780 + case SIOCGMIIPHY: /* Get PHY address */
19781 data->phy_id = mdio_phy_addr;
19783 - case SIOCGMIIREG: /* Read MII register */
19784 + case SIOCGMIIREG: /* Read MII register */
19785 data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num);
19787 - case SIOCSMIIREG: /* Write MII register */
19788 + case SIOCSMIIREG: /* Write MII register */
19789 e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in);
19792 /* The ioctls below should be considered obsolete but are */
19793 /* still present for compatability with old scripts/apps */
19794 - case SET_ETH_SPEED_10: /* 10 Mbps */
19795 + case SET_ETH_SPEED_10: /* 10 Mbps */
19796 e100_set_speed(dev, 10);
19798 - case SET_ETH_SPEED_100: /* 100 Mbps */
19799 + case SET_ETH_SPEED_100: /* 100 Mbps */
19800 e100_set_speed(dev, 100);
19802 - case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */
19803 + case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */
19804 e100_set_speed(dev, 0);
19806 - case SET_ETH_DUPLEX_HALF: /* Half duplex. */
19807 + case SET_ETH_DUPLEX_HALF: /* Half duplex */
19808 e100_set_duplex(dev, half);
19810 - case SET_ETH_DUPLEX_FULL: /* Full duplex. */
19811 + case SET_ETH_DUPLEX_FULL: /* Full duplex */
19812 e100_set_duplex(dev, full);
19814 - case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/
19815 + case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */
19816 e100_set_duplex(dev, autoneg);
19818 + case SET_ETH_AUTONEG:
19819 + old_autoneg = autoneg_normal;
19820 + autoneg_normal = *(int*)data;
19821 + if (autoneg_normal != old_autoneg)
19822 + e100_negotiate(dev);
19825 + spin_unlock(&np->lock);
19828 spin_unlock(&np->lock);
19833 -static int e100_set_settings(struct net_device *dev,
19834 +static int e100_get_settings(struct net_device *dev,
19835 struct ethtool_cmd *ecmd)
19837 ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |
19838 @@ -1565,7 +1473,8 @@
19840 e100_set_config(struct net_device *dev, struct ifmap *map)
19842 - struct net_local *np = (struct net_local *)dev->priv;
19843 + struct net_local *np = netdev_priv(dev);
19845 spin_lock(&np->lock); /* Preempt protection */
19847 switch(map->port) {
19848 @@ -1574,21 +1483,25 @@
19849 e100_set_speed(dev, 0);
19850 e100_set_duplex(dev, autoneg);
19853 case IF_PORT_10BASET:
19854 e100_set_speed(dev, 10);
19855 e100_set_duplex(dev, autoneg);
19858 case IF_PORT_100BASET:
19859 case IF_PORT_100BASETX:
19860 e100_set_speed(dev, 100);
19861 e100_set_duplex(dev, autoneg);
19864 case IF_PORT_100BASEFX:
19865 case IF_PORT_10BASE2:
19867 spin_unlock(&np->lock);
19868 return -EOPNOTSUPP;
19872 printk(KERN_ERR "%s: Invalid media selected", dev->name);
19873 spin_unlock(&np->lock);
19874 @@ -1602,6 +1515,7 @@
19875 update_rx_stats(struct net_device_stats *es)
19877 unsigned long r = *R_REC_COUNTERS;
19879 /* update stats relevant to reception errors */
19880 es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r);
19881 es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r);
19882 @@ -1613,11 +1527,11 @@
19883 update_tx_stats(struct net_device_stats *es)
19885 unsigned long r = *R_TR_COUNTERS;
19887 /* update stats relevant to transmission errors */
19889 IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
19890 IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
19891 - es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r);
19895 @@ -1627,8 +1541,9 @@
19896 static struct net_device_stats *
19897 e100_get_stats(struct net_device *dev)
19899 - struct net_local *lp = (struct net_local *)dev->priv;
19900 + struct net_local *lp = netdev_priv(dev);
19901 unsigned long flags;
19903 spin_lock_irqsave(&lp->lock, flags);
19905 update_rx_stats(&lp->stats);
19906 @@ -1640,21 +1555,21 @@
19909 * Set or clear the multicast filter for this adaptor.
19910 - * num_addrs == -1 Promiscuous mode, receive all packets
19911 - * num_addrs == 0 Normal mode, clear multicast list
19912 - * num_addrs > 0 Multicast mode, receive normal and MC packets,
19913 - * and do best-effort filtering.
19914 + * num_addrs == -1 Promiscuous mode, receive all packets
19915 + * num_addrs == 0 Normal mode, clear multicast list
19916 + * num_addrs > 0 Multicast mode, receive normal and MC packets,
19917 + * and do best-effort filtering.
19920 set_multicast_list(struct net_device *dev)
19922 - struct net_local *lp = (struct net_local *)dev->priv;
19923 + struct net_local *lp = netdev_priv(dev);
19924 int num_addr = dev->mc_count;
19925 unsigned long int lo_bits;
19926 unsigned long int hi_bits;
19928 spin_lock(&lp->lock);
19929 - if (dev->flags & IFF_PROMISC)
19931 + if (dev->flags & IFF_PROMISC) {
19932 /* promiscuous mode */
19933 lo_bits = 0xfffffffful;
19934 hi_bits = 0xfffffffful;
19935 @@ -1684,9 +1599,10 @@
19936 struct dev_mc_list *dmi = dev->mc_list;
19940 lo_bits = 0x00000000ul;
19941 hi_bits = 0x00000000ul;
19942 - for (i=0; i<num_addr; i++) {
19943 + for (i = 0; i < num_addr; i++) {
19944 /* Calculate the hash index for the GA registers */
19947 @@ -1713,8 +1629,7 @@
19949 if (hash_ix >= 32) {
19950 hi_bits |= (1 << (hash_ix-32));
19954 lo_bits |= (1 << hash_ix);
19957 @@ -1729,10 +1644,11 @@
19961 -e100_hardware_send_packet(char *buf, int length)
19962 +e100_hardware_send_packet(struct net_local *np, char *buf, int length)
19964 D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
19966 + spin_lock(&np->led_lock);
19967 if (!led_active && time_after(jiffies, led_next_time)) {
19968 /* light the network leds depending on the current speed. */
19969 e100_set_network_leds(NETWORK_ACTIVITY);
19970 @@ -1742,15 +1658,16 @@
19972 mod_timer(&clear_led_timer, jiffies + HZ/10);
19974 + spin_unlock(&np->led_lock);
19976 /* configure the tx dma descriptor */
19977 myNextTxDesc->descr.sw_len = length;
19978 myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait;
19979 myNextTxDesc->descr.buf = virt_to_phys(buf);
19981 - /* Move end of list */
19982 - myLastTxDesc->descr.ctrl &= ~d_eol;
19983 - myLastTxDesc = myNextTxDesc;
19984 + /* Move end of list */
19985 + myLastTxDesc->descr.ctrl &= ~d_eol;
19986 + myLastTxDesc = myNextTxDesc;
19988 /* Restart DMA channel */
19989 *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart);
19990 @@ -1759,6 +1676,11 @@
19992 e100_clear_network_leds(unsigned long dummy)
19994 + struct net_device *dev = (struct net_device *)dummy;
19995 + struct net_local *np = netdev_priv(dev);
19997 + spin_lock(&np->led_lock);
19999 if (led_active && time_after(jiffies, led_next_time)) {
20000 e100_set_network_leds(NO_NETWORK_ACTIVITY);
20002 @@ -1766,6 +1688,8 @@
20003 led_next_time = jiffies + NET_FLASH_PAUSE;
20007 + spin_unlock(&np->led_lock);
20011 @@ -1786,19 +1710,25 @@
20013 LED_NETWORK_SET(LED_OFF);
20016 - else if (light_leds) {
20017 + } else if (light_leds) {
20018 if (current_speed == 10) {
20019 LED_NETWORK_SET(LED_ORANGE);
20021 LED_NETWORK_SET(LED_GREEN);
20026 LED_NETWORK_SET(LED_OFF);
20030 +#ifdef CONFIG_NET_POLL_CONTROLLER
20032 +e100_netpoll(struct net_device* netdev)
20034 + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
20039 etrax_init_module(void)
20041 diff -urN linux-2.6.19.2.orig/drivers/net/cris/eth_v32.c linux-2.6.19.2.dev/drivers/net/cris/eth_v32.c
20042 --- linux-2.6.19.2.orig/drivers/net/cris/eth_v32.c 1970-01-01 01:00:00.000000000 +0100
20043 +++ linux-2.6.19.2.dev/drivers/net/cris/eth_v32.c 2007-02-06 11:10:37.000000000 +0100
20046 + * Driver for the ETRAX FS network controller.
20048 + * Copyright (c) 2003-2006 Axis Communications AB.
20051 +#include <linux/module.h>
20053 +#include <linux/kernel.h>
20054 +#include <linux/sched.h>
20055 +#include <linux/delay.h>
20056 +#include <linux/types.h>
20057 +#include <linux/fcntl.h>
20058 +#include <linux/interrupt.h>
20059 +#include <linux/ptrace.h>
20060 +#include <linux/ioport.h>
20061 +#include <linux/in.h>
20062 +#include <linux/slab.h>
20063 +#include <linux/string.h>
20064 +#include <linux/spinlock.h>
20065 +#include <linux/errno.h>
20066 +#include <linux/init.h>
20067 +#include <linux/cpufreq.h>
20069 +#include <linux/netdevice.h>
20070 +#include <linux/etherdevice.h>
20071 +#include <linux/skbuff.h>
20072 +#include <linux/ethtool.h>
20073 +#include <linux/mii.h>
20075 +#include <asm/io.h> /* LED_* I/O functions */
20076 +#include <asm/irq.h>
20077 +#include <asm/arch/hwregs/reg_map.h>
20078 +#include <asm/arch/hwregs/reg_rdwr.h>
20079 +#include <asm/arch/hwregs/dma.h>
20080 +#include <asm/arch/hwregs/eth_defs.h>
20081 +#include <asm/arch/hwregs/config_defs.h>
20082 +#include <asm/arch/hwregs/intr_vect_defs.h>
20083 +#include <asm/system.h>
20084 +#include <asm/bitops.h>
20085 +#include <asm/ethernet.h>
20086 +#include <asm/arch/dma.h>
20087 +#include <asm/arch/intmem.h>
20088 +#include <asm/arch/pinmux.h>
20090 +#include "eth_v32.h"
20093 +#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01)
20095 +/* Toggle network LEDs on/off at runtime */
20096 +static int use_network_leds = 1;
20098 +static void update_rx_stats(struct crisv32_ethernet_local *np);
20099 +static void update_tx_stats(struct crisv32_ethernet_local *np);
20100 +static void crisv32_eth_setup_controller(struct net_device *dev);
20101 +static int crisv32_eth_request_irqdma(struct net_device *dev);
20102 +static void crisv32_eth_init_rings(struct net_device *dev);
20103 +static void crisv32_eth_reset_rings(struct net_device *dev);
20104 +static void crisv32_ethernet_bug(struct net_device *dev);
20107 + * The name of the card. Is used for messages and in the requests for
20108 + * io regions, irqs and dma channels.
20110 +static const char *cardname = "ETRAX FS built-in ethernet controller";
20112 +static int autoneg_normal = 1;
20114 +/* Some chipset needs special care. */
20115 +struct transceiver_ops transceivers[] = {
20116 + {0x1018, broadcom_check_speed, broadcom_check_duplex},
20117 + /* TDK 2120 and TDK 2120C */
20118 + {0xC039, tdk_check_speed, tdk_check_duplex},
20119 + {0x039C, tdk_check_speed, tdk_check_duplex},
20120 + /* Intel LXT972A*/
20121 + {0x04de, intel_check_speed, intel_check_duplex},
20122 + /* National Semiconductor DP83865 */
20123 + {0x0017, national_check_speed, national_check_duplex},
20124 + /* Generic, must be last. */
20125 + {0x0000, generic_check_speed, generic_check_duplex}
20128 +static struct net_device *crisv32_dev[2];
20129 +static struct crisv32_eth_leds *crisv32_leds[3];
20131 +#ifdef CONFIG_CPU_FREQ
20133 +crisv32_ethernet_freq_notifier(struct notifier_block *nb, unsigned long val,
20136 +static struct notifier_block crisv32_ethernet_freq_notifier_block = {
20137 + .notifier_call = crisv32_ethernet_freq_notifier
20142 + * mask in and out tx/rx interrupts.
20144 +static inline void crisv32_disable_tx_ints(struct crisv32_ethernet_local *np)
20146 + reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_no };
20147 + REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx);
20150 +static inline void crisv32_enable_tx_ints(struct crisv32_ethernet_local *np)
20152 + reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_yes };
20153 + REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx);
20156 +static inline void crisv32_disable_rx_ints(struct crisv32_ethernet_local *np)
20158 + reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_no };
20159 + REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx);
20162 +static inline void crisv32_enable_rx_ints(struct crisv32_ethernet_local *np)
20164 + reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_yes };
20165 + REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx);
20168 +/* start/stop receiver */
20169 +static inline void crisv32_start_receiver(struct crisv32_ethernet_local *np)
20171 + reg_eth_rw_rec_ctrl rec_ctrl;
20173 + rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
20174 + rec_ctrl.ma0 = regk_eth_yes;
20175 + rec_ctrl.broadcast = regk_eth_rec;
20176 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
20179 +static inline void crisv32_stop_receiver(struct crisv32_ethernet_local *np)
20181 + reg_eth_rw_rec_ctrl rec_ctrl;
20183 + rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
20184 + rec_ctrl.ma0 = regk_eth_no;
20185 + rec_ctrl.broadcast = regk_eth_discard;
20186 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
20190 +crisv32_eth_request_irqdma(struct net_device *dev)
20192 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20194 + /* Allocate IRQs and DMAs. */
20195 + if (np->eth_inst == regi_eth0) {
20196 + if (request_irq(DMA0_INTR_VECT, crisv32tx_eth_interrupt,
20197 + 0, cardname, dev)) {
20201 + if (request_irq(DMA1_INTR_VECT, crisv32rx_eth_interrupt,
20202 + IRQF_SAMPLE_RANDOM, cardname, dev)) {
20206 + if (crisv32_request_dma(0, cardname, DMA_VERBOSE_ON_ERROR,
20207 + 12500000, dma_eth0))
20210 + if (crisv32_request_dma(1, cardname, DMA_VERBOSE_ON_ERROR,
20211 + 12500000, dma_eth0))
20214 + if (request_irq(ETH0_INTR_VECT, crisv32nw_eth_interrupt, 0,
20215 + cardname, dev)) {
20216 + crisv32_free_dma(1);
20218 + crisv32_free_dma(0);
20220 + free_irq(DMA1_INTR_VECT, dev);
20222 + free_irq(DMA0_INTR_VECT, dev);
20226 + if (request_irq(DMA6_INTR_VECT, crisv32tx_eth_interrupt,
20227 + 0, cardname, dev))
20230 + if (request_irq(DMA7_INTR_VECT, crisv32rx_eth_interrupt,
20231 + IRQF_SAMPLE_RANDOM, cardname, dev))
20234 + if (crisv32_request_dma(6, cardname, DMA_VERBOSE_ON_ERROR,
20238 + if (crisv32_request_dma(7, cardname, DMA_VERBOSE_ON_ERROR,
20242 + if (request_irq(ETH1_INTR_VECT, crisv32nw_eth_interrupt, 0,
20243 + cardname, dev)) {
20244 + crisv32_free_dma(7);
20246 + crisv32_free_dma(6);
20248 + free_irq(DMA7_INTR_VECT, dev);
20250 + free_irq(DMA6_INTR_VECT, dev);
20257 +static void __init
20258 +crisv32_eth_setup_controller(struct net_device *dev)
20260 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20262 + reg_config_rw_pad_ctrl pad_ctrl;
20264 + reg_eth_rw_tr_ctrl tr_ctrl = {
20265 + .retry = regk_eth_yes,
20266 + .pad = regk_eth_yes,
20267 + .crc = regk_eth_yes
20270 + reg_eth_rw_rec_ctrl rec_ctrl = {
20271 + .ma0 = regk_eth_no, /* enable at open() */
20272 + .broadcast = regk_eth_no,
20273 + .max_size = regk_eth_size1522
20276 + reg_eth_rw_ga_lo ga_lo = { 0 };
20277 + reg_eth_rw_ga_hi ga_hi = { 0 };
20279 + reg_eth_rw_gen_ctrl gen_ctrl = {
20280 + .phy = regk_eth_mii_clk,
20281 + .flow_ctrl = regk_eth_yes
20285 + * Initialize group address registers to make sure that no
20286 + * unwanted addresses are matched.
20288 + REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo);
20289 + REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi);
20291 + /* Configure receiver and transmitter */
20292 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
20293 + REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
20295 + /* Enable ethernet controller with mii clk. */
20296 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
20297 + gen_ctrl.en = regk_eth_yes;
20298 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
20300 + /* keep reset low (RESET_LEN) */
20304 + pad_ctrl = REG_RD(config, regi_config, rw_pad_ctrl);
20305 + pad_ctrl.phyrst_n = 1;
20306 + REG_WR(config, regi_config, rw_pad_ctrl, pad_ctrl);
20308 + /* Let the PHY reset (RESET_WAIT) */
20311 + crisv32_eth_probe_transceiver(dev);
20314 +static void __init
20315 +crisv32_eth_init_rings(struct net_device *dev)
20317 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20320 + /* Initialise receive descriptors for interface. */
20321 + for (i = 0; i < NBR_RX_DESC; i++) {
20322 + struct sk_buff *skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
20324 + np->dma_rx_descr_list[i].skb = skb;
20325 + np->dma_rx_descr_list[i].descr.buf =
20326 + (char*)virt_to_phys(skb->data);
20327 + np->dma_rx_descr_list[i].descr.after =
20328 + (char*)virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE);
20330 + np->dma_rx_descr_list[i].descr.eol = 0;
20331 + np->dma_rx_descr_list[i].descr.in_eop = 0;
20332 + np->dma_rx_descr_list[i].descr.next =
20333 + (void *) virt_to_phys(&np->dma_rx_descr_list[i + 1].descr);
20335 + /* bend the list into a ring */
20336 + np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.next =
20337 + (void *) virt_to_phys(&np->dma_rx_descr_list[0].descr);
20339 + /* Initialize transmit descriptors. */
20340 + for (i = 0; i < NBR_TX_DESC; i++) {
20341 + np->dma_tx_descr_list[i].descr.wait = 1;
20342 + np->dma_tx_descr_list[i].descr.eol = 0;
20343 + np->dma_tx_descr_list[i].descr.out_eop = 0;
20344 + np->dma_tx_descr_list[i].descr.next =
20345 + (void*)virt_to_phys(&np->dma_tx_descr_list[i+1].descr);
20347 + /* bend the list into a ring */
20348 + np->dma_tx_descr_list[NBR_TX_DESC - 1].descr.next =
20349 + (void *) virt_to_phys(&np->dma_tx_descr_list[0].descr);
20351 + crisv32_eth_reset_rings(dev);
20355 +crisv32_eth_reset_rings(struct net_device *dev)
20357 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20360 + /* free un-handled tx packets */
20361 + while(np->txpackets
20362 + || np->catch_tx_desc != np->active_tx_desc) {
20364 + if (np->catch_tx_desc->skb)
20365 + dev_kfree_skb(np->catch_tx_desc->skb);
20367 + np->catch_tx_desc->skb = 0;
20368 + np->catch_tx_desc =
20369 + phys_to_virt((int)np->catch_tx_desc->descr.next);
20370 + } while (np->catch_tx_desc != np->active_tx_desc);
20371 + WARN_ON(np->txpackets != 0);
20372 + np->txpackets = 0;
20374 + /* cleanup the rx-ring */
20375 + for (i = 0; i < NBR_RX_DESC; i++) {
20376 + struct sk_buff *skb;
20377 + skb = np->dma_rx_descr_list[i].skb;
20379 + || (np->dma_rx_descr_list[i].descr.buf !=
20380 + (void *)virt_to_phys(skb->data)))
20382 + printk("%s:%d: damaged rx-ring! "
20383 + "i=%d skb=%p %lx %lx %p %p\n",
20384 + __func__, __LINE__, i,
20386 + virt_to_phys(skb->data),
20387 + virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE),
20388 + np->dma_rx_descr_list[i].descr.buf,
20389 + np->dma_rx_descr_list[i].descr.after);
20391 + crisv32_ethernet_bug(dev);
20393 + dev_kfree_skb(skb);
20394 + skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
20395 + np->dma_rx_descr_list[i].skb = skb;
20396 + np->dma_rx_descr_list[i].descr.buf =
20397 + (char*)virt_to_phys(skb->data);
20399 + np->dma_rx_descr_list[i].descr.after =
20400 + (char*)virt_to_phys(skb->data
20401 + + MAX_MEDIA_DATA_SIZE);
20402 + np->dma_rx_descr_list[i].descr.eol = 0;
20403 + np->dma_rx_descr_list[i].descr.in_eop = 0;
20404 + /* Workaround cache bug */
20405 + flush_dma_descr(&np->dma_rx_descr_list[i].descr, 1);
20408 + /* reset rx-ring */
20409 + np->active_rx_desc = &np->dma_rx_descr_list[0];
20410 + np->prev_rx_desc = &np->dma_rx_descr_list[NBR_RX_DESC - 1];
20411 + np->last_rx_desc = np->prev_rx_desc;
20412 + np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.eol = 1;
20414 + /* reset tx-ring */
20415 + np->dma_tx_descr_list[0].descr.buf =
20416 + np->dma_tx_descr_list[0].descr.after = 0;
20417 + np->dma_rx_descr_list[i].descr.in_eop = 0;
20418 + np->dma_tx_descr_list[0].descr.eol = 1;
20420 + np->active_tx_desc = &np->dma_tx_descr_list[0];
20421 + np->prev_tx_desc = &np->dma_tx_descr_list[NBR_TX_DESC - 1];
20422 + np->catch_tx_desc = &np->dma_tx_descr_list[0];
20424 + /* Fill context descriptors. */
20425 + np->ctxt_in.next = 0;
20426 + np->ctxt_in.saved_data =
20427 + (void *)virt_to_phys(&np->active_rx_desc->descr);
20428 + np->ctxt_in.saved_data_buf = np->active_rx_desc->descr.buf;
20430 + np->ctxt_out.next = 0;
20431 + np->ctxt_out.saved_data =
20432 + (void *)virt_to_phys(&np->dma_tx_descr_list[0].descr);
20435 +static void __init
20436 +crisv32_init_leds(int ledgrp, struct net_device* dev)
20438 + struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
20439 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20441 + /* Use already allocated led grp if initialized */
20442 + if (crisv32_leds[ledgrp] != NULL) {
20443 + np->leds = crisv32_leds[ledgrp];
20447 + crisv32_leds[ledgrp] = kmalloc(sizeof(struct crisv32_eth_leds),GFP_KERNEL);
20449 + crisv32_leds[ledgrp]->ledgrp = ledgrp;
20450 + crisv32_leds[ledgrp]->led_active = 0;
20451 + /* NOTE: Should this value be set to zero as the jiffies timer can wrap? */
20452 + crisv32_leds[ledgrp]->led_next_time = jiffies;
20454 + crisv32_leds[ledgrp]->clear_led_timer = timer_init;
20455 + crisv32_leds[ledgrp]->clear_led_timer.function = crisv32_clear_network_leds;
20456 + crisv32_leds[ledgrp]->clear_led_timer.data = (unsigned long) dev;
20458 + spin_lock_init(&crisv32_leds[ledgrp]->led_lock);
20460 + np->leds = crisv32_leds[ledgrp];
20464 +crisv32_ethernet_init(void)
20466 + struct crisv32_ethernet_local *np;
20469 + printk("ETRAX FS 10/100MBit ethernet v0.01 (c)"
20470 + " 2003 Axis Communications AB\n");
20472 +#ifdef CONFIG_ETRAX_ETHERNET_IFACE0
20475 + /* Default MAC address for interface 0.
20476 + * The real one will be set later. */
20477 + static struct sockaddr default_mac_iface0 =
20478 + {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00}};
20480 + if (!(crisv32_dev[iface0] = alloc_etherdev(sizeof *np)))
20483 + ret |= crisv32_ethernet_device_init(crisv32_dev[iface0]);
20485 +#if defined(CONFIG_ETRAX_ETH0_USE_LEDGRP0)
20486 + crisv32_init_leds(LED_GRP_0,crisv32_dev[iface0]);
20487 +#elif defined(CONFIG_ETRAX_ETH0_USE_LEDGRP1)
20488 + crisv32_init_leds(LED_GRP_1,crisv32_dev[iface0]);
20490 + crisv32_init_leds(LED_GRP_NONE,crisv32_dev[iface0]);
20493 + np = (struct crisv32_ethernet_local *) crisv32_dev[iface0]->priv;
20494 + np->eth_inst = regi_eth0;
20495 + np->dma_out_inst = regi_dma0;
20496 + np->dma_in_inst = regi_dma1;
20498 + register_netdev(crisv32_dev[iface0]);
20500 + /* Set up default MAC address */
20501 + memcpy(crisv32_dev[iface0]->dev_addr, default_mac_iface0.sa_data, 6);
20502 + crisv32_eth_set_mac_address(crisv32_dev[iface0], &default_mac_iface0);
20503 + if (crisv32_eth_request_irqdma(crisv32_dev[iface0]))
20504 + printk("%s: eth0 unable to allocate IRQ and DMA resources\n",
20506 + np->txpackets = 0;
20507 + crisv32_eth_init_rings(crisv32_dev[iface0]);
20508 + crisv32_eth_setup_controller(crisv32_dev[iface0]);
20510 +#endif /* CONFIG_ETRAX_ETHERNET_IFACE0 */
20512 +#ifdef CONFIG_ETRAX_ETHERNET_IFACE1
20515 + /* Default MAC address for interface 1.
20516 + * The real one will be set later. */
20517 + static struct sockaddr default_mac_iface1 =
20518 + {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x01}};
20520 + if (crisv32_pinmux_alloc_fixed(pinmux_eth1))
20521 + panic("Eth pinmux\n");
20523 + /* Increase index to device array if interface 0 is enabled as well.*/
20524 +#ifdef CONFIG_ETRAX_ETHERNET_IFACE0
20527 + if (!(crisv32_dev[iface1] = alloc_etherdev(sizeof *np)))
20530 + ret |= crisv32_ethernet_device_init(crisv32_dev[iface1]);
20532 +#if defined(CONFIG_ETRAX_ETH1_USE_LEDGRP0)
20533 + crisv32_init_leds(LED_GRP_0,crisv32_dev[iface1]);
20534 +#elif defined(CONFIG_ETRAX_ETH1_USE_LEDGRP1)
20535 + crisv32_init_leds(LED_GRP_1,crisv32_dev[iface1]);
20537 + crisv32_init_leds(LED_GRP_NONE,crisv32_dev[iface1]);
20540 + np = (struct crisv32_ethernet_local *) crisv32_dev[iface1]->priv;
20541 + np->eth_inst = regi_eth1;
20542 + np->dma_out_inst = regi_dma6;
20543 + np->dma_in_inst = regi_dma7;
20545 + register_netdev(crisv32_dev[iface1]);
20547 + /* Set up default MAC address */
20548 + memcpy(crisv32_dev[iface1]->dev_addr, default_mac_iface1.sa_data, 6);
20549 + crisv32_eth_set_mac_address(crisv32_dev[iface1], &default_mac_iface1);
20551 + if (crisv32_eth_request_irqdma(crisv32_dev[iface1]))
20552 + printk("%s: eth1 unable to allocate IRQ and DMA resources\n",
20554 + np->txpackets = 0;
20555 + crisv32_eth_init_rings(crisv32_dev[iface1]);
20556 + crisv32_eth_setup_controller(crisv32_dev[iface1]);
20558 +#endif /* CONFIG_ETRAX_ETHERNET_IFACE1 */
20560 +#ifdef CONFIG_CPU_FREQ
20561 + cpufreq_register_notifier(&crisv32_ethernet_freq_notifier_block,
20562 + CPUFREQ_TRANSITION_NOTIFIER);
20569 +crisv32_ethernet_device_init(struct net_device* dev)
20571 + struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
20572 + struct crisv32_ethernet_local *np;
20574 + dev->base_addr = 0; /* Just to have something to show. */
20576 + /* we do our own locking */
20577 + dev->features |= NETIF_F_LLTX;
20579 + /* We use several IRQs and DMAs so just report 0 here. */
20584 + * Fill in our handlers so the network layer can talk to us in the
20587 + dev->open = crisv32_eth_open;
20588 + dev->hard_start_xmit = crisv32_eth_send_packet;
20589 + dev->stop = crisv32_eth_close;
20590 + dev->get_stats = crisv32_get_stats;
20591 + dev->set_multicast_list = crisv32_eth_set_multicast_list;
20592 + dev->set_mac_address = crisv32_eth_set_mac_address;
20593 + dev->ethtool_ops = &crisv32_ethtool_ops;
20594 + dev->do_ioctl = crisv32_eth_ioctl;
20595 + dev->set_config = crisv32_eth_set_config;
20596 + dev->tx_timeout = crisv32_eth_tx_timeout;
20597 +#ifdef CONFIG_NET_POLL_CONTROLLER
20598 + dev->poll_controller = crisv32_netpoll;
20601 + np = netdev_priv(dev);
20603 + spin_lock_init(&np->lock);
20604 + spin_lock_init(&np->transceiver_lock);
20606 + /* Initialize speed indicator stuff. */
20607 + np->current_speed = 10;
20608 + np->current_speed_selection = 0; /* Auto. */
20609 + np->speed_timer = timer_init;
20610 + np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
20611 + np->speed_timer.data = (unsigned long) dev;
20612 + np->speed_timer.function = crisv32_eth_check_speed;
20614 + np->full_duplex = 0;
20615 + np->current_duplex = autoneg;
20616 + np->duplex_timer = timer_init;
20617 + np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
20618 + np->duplex_timer.data = (unsigned long) dev;
20619 + np->duplex_timer.function = crisv32_eth_check_duplex;
20625 +crisv32_eth_open(struct net_device *dev)
20627 + struct sockaddr mac_addr;
20628 + reg_dma_rw_ack_intr ack_intr = { .data = 1,.in_eop = 1 };
20629 + reg_dma_rw_cfg dma_cfg = { .en = 1 };
20630 + reg_eth_rw_clr_err clr_err = {.clr = regk_eth_yes};
20631 + int intr_mask_nw = 0x1cff;
20632 + int eth_ack_intr = 0xffff;
20633 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20635 + spin_lock(&np->lock);
20636 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
20637 + np->gigabit_mode = 0;
20639 + crisv32_disable_tx_ints(np);
20640 + crisv32_disable_rx_ints(np);
20642 + REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
20643 + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, eth_ack_intr);
20644 + REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw);
20645 + crisv32_eth_reset_rings(dev);
20647 + /* Give the hardware an idea of what MAC address we want. */
20648 + memcpy(mac_addr.sa_data, dev->dev_addr, dev->addr_len);
20649 + crisv32_eth_set_mac_address(dev, &mac_addr);
20651 + /* Enable irq and make sure that the irqs are cleared. */
20652 + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
20653 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
20655 + /* Prepare input DMA. */
20656 + DMA_RESET(np->dma_in_inst);
20657 + DMA_ENABLE(np->dma_in_inst);
20658 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
20659 + DMA_WR_CMD(np->dma_in_inst, regk_dma_set_w_size2);
20661 + DMA_START_CONTEXT( np->dma_in_inst, virt_to_phys(&np->ctxt_in));
20662 + DMA_CONTINUE(np->dma_in_inst);
20663 + crisv32_enable_rx_ints(np);
20664 + crisv32_start_receiver(np);
20666 + /* Prepare output DMA. */
20667 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
20668 + DMA_WR_CMD(np->dma_out_inst, regk_dma_set_w_size4);
20670 + REG_WR(dma, np->dma_out_inst, rw_cfg, dma_cfg);
20671 + netif_start_queue(dev);
20672 + crisv32_enable_tx_ints(np);
20674 + /* Start duplex/speed timers */
20675 + add_timer(&np->speed_timer);
20676 + add_timer(&np->duplex_timer);
20678 + spin_unlock(&np->lock);
20680 + * We are now ready to accept transmit requeusts from the queueing
20681 + * layer of the networking.
20683 + netif_carrier_on(dev);
20689 +crisv32_eth_close(struct net_device *dev)
20691 + reg_dma_rw_ack_intr ack_intr = {0};
20693 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20694 + unsigned long flags;
20696 + printk(KERN_INFO "Closing %s.\n", dev->name);
20698 + /* stop the receiver before the DMA channels to avoid overruns. */
20699 + crisv32_stop_receiver(np);
20701 + spin_lock_irqsave(&np->lock, flags);
20702 + netif_stop_queue(dev);
20704 + /* Reset the TX DMA in case it has hung on something. */
20705 + DMA_RESET(np->dma_in_inst);
20708 + DMA_STOP(np->dma_in_inst);
20709 + DMA_STOP(np->dma_out_inst);
20711 + /* Disable irq and make sure that the irqs are cleared. */
20712 + crisv32_disable_tx_ints(np);
20713 + ack_intr.data = 1;
20714 + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
20716 + crisv32_disable_rx_ints(np);
20717 + ack_intr.in_eop = 1;
20718 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
20720 + np->sender_started = 0;
20721 + spin_unlock_irqrestore(&np->lock, flags);
20723 + /* Update the statistics. */
20724 + update_rx_stats(np);
20725 + update_tx_stats(np);
20727 + /* Stop speed/duplex timers */
20728 + del_timer(&np->speed_timer);
20729 + del_timer(&np->duplex_timer);
20735 +crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr)
20738 + unsigned char *addr = ((struct sockaddr*)vpntr)->sa_data;
20740 + reg_eth_rw_ma0_lo ma0_lo =
20741 + { addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24)};
20743 + reg_eth_rw_ma0_hi ma0_hi = { addr[4] | (addr[5] << 8) };
20745 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20747 + /* Remember the address. */
20748 + memcpy(dev->dev_addr, addr, dev->addr_len);
20751 + * Write the address to the hardware.
20752 + * Note the way the address is wrapped:
20753 + * ma0_l0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
20754 + * ma0_hi = a0_4 | (a0_5 << 8);
20756 + REG_WR(eth, np->eth_inst, rw_ma0_lo, ma0_lo);
20757 + REG_WR(eth, np->eth_inst, rw_ma0_hi, ma0_hi);
20759 + printk(KERN_INFO "%s: changed MAC to ", dev->name);
20761 + for (i = 0; i < 5; i++)
20762 + printk("%02X:", dev->dev_addr[i]);
20764 + printk("%02X\n", dev->dev_addr[i]);
20769 +static irqreturn_t
20770 +crisv32rx_eth_interrupt(int irq, void *dev_id)
20772 + reg_dma_r_masked_intr masked_in;
20773 + reg_dma_rw_cmd cmd = {0};
20774 + reg_dma_rw_ack_intr ack_intr = {0};
20775 + struct net_device *dev = (struct net_device *) dev_id;
20776 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20778 + masked_in = REG_RD(dma, np->dma_in_inst, r_masked_intr);
20780 + if (masked_in.in_eop) {
20781 + DEBUG(printk("EOP_IN interrupt\n"));
20783 + /* Acknowledge input dma interrupt. */
20784 + ack_intr.in_eop = 1;
20785 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
20787 + np->new_rx_package = 1;
20788 + /* Check if complete packets were indeed received. */
20789 + while (np->active_rx_desc->descr.in_eop == 1
20790 + && np->new_rx_package) {
20792 + * Take out the buffer and give it to the OS, then
20793 + * allocate a new buffer to put a packet in.
20795 + crisv32_eth_receive_packet(dev);
20797 + /* Update number of packets received. */
20798 + np->stats.rx_packets++;
20800 + /* Restarts input dma. */
20801 + cmd.cont_data = 1;
20802 + REG_WR(dma, np->dma_in_inst, rw_cmd, cmd);
20804 + /* Acknowledge input dma interrupt. */
20805 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
20808 + return IRQ_HANDLED;
20811 +static irqreturn_t
20812 +crisv32tx_eth_interrupt(int irq, void *dev_id)
20814 + reg_dma_rw_stat stat;
20815 + dma_descr_data *dma_pos;
20816 + reg_dma_rw_ack_intr ack_intr = { .data = 1 };
20817 + reg_dma_r_masked_intr masked_out;
20819 + struct net_device *dev = (struct net_device *) dev_id;
20820 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20821 + unsigned long flags;
20823 + masked_out = REG_RD(dma, np->dma_out_inst, r_masked_intr);
20825 + /* Get the current output dma position. */
20826 + stat = REG_RD(dma, np->dma_out_inst, rw_stat);
20827 + if (stat.list_state == regk_dma_data_at_eol)
20828 + dma_pos = &np->active_tx_desc->descr;
20830 + dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst,
20833 + /* ack the interrupt */
20834 + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
20836 + /* protect against ethernet excessive-col interrupts */
20837 + spin_lock_irqsave(&np->lock, flags);
20839 + /* Take care of transmited dma descriptors and report sent packet. */
20840 + while (np->txpackets && ((&np->catch_tx_desc->descr != dma_pos)
20841 + || netif_queue_stopped(dev))) {
20842 + /* Update sent packet statistics. */
20843 + np->stats.tx_bytes += np->catch_tx_desc->skb->len;
20844 + np->stats.tx_packets++;
20846 + dev_kfree_skb_irq(np->catch_tx_desc->skb);
20847 + np->catch_tx_desc->skb = 0;
20849 + np->catch_tx_desc =
20850 + phys_to_virt((int)np->catch_tx_desc->descr.next);
20851 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
20852 + if (np->gigabit_mode) {
20853 + np->intmem_tx_buf_catch->free = 1;
20854 + np->intmem_tx_buf_catch = np->intmem_tx_buf_catch->next;
20857 + netif_wake_queue(dev);
20859 + spin_unlock_irqrestore(&np->lock, flags);
20860 + return IRQ_HANDLED;
20864 +/* Update receive errors. */
20866 +update_rx_stats(struct crisv32_ethernet_local *np)
20868 + reg_eth_rs_rec_cnt r;
20869 + reg_eth_rs_phy_cnt rp;
20871 + r = REG_RD(eth, np->eth_inst, rs_rec_cnt);
20872 + rp = REG_RD(eth, np->eth_inst, rs_phy_cnt);
20874 + np->stats.rx_fifo_errors += r.congestion;
20875 + np->stats.rx_crc_errors += r.crc_err;
20876 + np->stats.rx_frame_errors += r.align_err;
20877 + np->stats.rx_length_errors += r.oversize;
20880 +/* Update transmit errors. */
20882 +update_tx_stats(struct crisv32_ethernet_local *np)
20884 + reg_eth_rs_tr_cnt r;
20886 + r = REG_RD(eth, np->eth_inst, rs_tr_cnt);
20888 + np->stats.collisions += r.single_col + r.mult_col;
20889 + np->stats.tx_errors += r.deferred;
20892 +/* Get current statistics. */
20893 +static struct net_device_stats *
20894 +crisv32_get_stats(struct net_device *dev)
20896 + unsigned long flags;
20897 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20899 + spin_lock_irqsave(&np->lock, flags);
20901 + update_rx_stats(np);
20902 + update_tx_stats(np);
20904 + spin_unlock_irqrestore(&np->lock, flags);
20906 + return &np->stats;
20909 +/* Check for network errors. This acknowledge the received interrupt. */
20910 +static irqreturn_t
20911 +crisv32nw_eth_interrupt(int irq, void *dev_id)
20913 + struct net_device *dev = (struct net_device *) dev_id;
20914 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20915 + reg_eth_r_masked_intr intr_mask;
20916 + int ack_intr = 0xffff;
20917 + reg_eth_rw_clr_err clr_err;
20919 + intr_mask = REG_RD(eth, np->eth_inst, r_masked_intr);
20922 + * Check for underrun and/or excessive collisions. Note that the
20923 + * rw_clr_err register clears both underrun and excessive collision
20924 + * errors, so there's no need to check them separately.
20926 + if (np->sender_started
20927 + && (intr_mask.urun || intr_mask.exc_col)) {
20928 + unsigned long flags;
20929 + dma_descr_data *dma_pos;
20930 + reg_dma_rw_stat stat;
20932 + /* Get the current output dma position. */
20933 + stat = REG_RD(dma, np->dma_out_inst, rw_stat);
20934 + if (stat.list_state == regk_dma_data_at_eol)
20935 + dma_pos = &np->active_tx_desc->descr;
20937 + dma_pos = phys_to_virt(REG_RD_INT(dma,
20938 + np->dma_out_inst,
20942 + * Protect against the tx-interrupt messing with
20945 + spin_lock_irqsave(&np->lock, flags);
20947 + * If we have more than one packet in the tx-ring
20948 + * drop one and move ahead. Upper layers rely on
20949 + * packeloss when doing congestion control.
20951 + if (intr_mask.exc_col && np->txpackets > 1) {
20952 + dev_kfree_skb_irq(np->catch_tx_desc->skb);
20953 + np->catch_tx_desc->skb = 0;
20954 + np->catch_tx_desc =
20955 + phys_to_virt((int)
20956 + np->catch_tx_desc->descr.next);
20958 + netif_wake_queue(dev);
20960 + np->ctxt_out.next = 0;
20961 + if (np->txpackets) {
20962 + np->ctxt_out.saved_data = (void *)
20963 + virt_to_phys(&np->catch_tx_desc->descr);
20964 + np->ctxt_out.saved_data_buf =
20965 + np->catch_tx_desc->descr.buf;
20967 + /* restart the DMA */
20968 + DMA_START_CONTEXT(np->dma_out_inst,
20969 + (int) virt_to_phys(&np->ctxt_out));
20972 + /* let the next packet restart the DMA */
20973 + np->ctxt_out.saved_data = (void *)
20974 + virt_to_phys(&np->active_tx_desc->descr);
20975 + np->sender_started = 0;
20978 + spin_unlock_irqrestore(&np->lock, flags);
20979 + np->stats.tx_errors++;
20982 + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, ack_intr);
20984 + REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
20986 + update_rx_stats(np);
20987 + update_tx_stats(np);
20989 + return IRQ_HANDLED;
20992 +/* We have a good packet(s), get it/them out of the buffers. */
20994 +crisv32_eth_receive_packet(struct net_device *dev)
20997 + struct sk_buff *skb;
20998 + struct crisv32_ethernet_local *np = netdev_priv(dev);
20999 + struct sk_buff *tmp;
21000 + unsigned long flags;
21002 + DEBUG(printk("crisv32_receive_packet\n"));
21004 + /* Activate LED */
21005 + spin_lock_irqsave(&np->leds->led_lock, flags);
21006 + if (!np->leds->led_active && time_after(jiffies, np->leds->led_next_time)) {
21007 + /* light the network leds depending on the current speed. */
21008 + crisv32_set_network_leds(LED_ACTIVITY, dev);
21010 + /* Set the earliest time we may clear the LED */
21011 + np->leds->led_next_time = jiffies + NET_FLASH_TIME;
21012 + np->leds->led_active = 1;
21013 + np->leds->clear_led_timer.data = (unsigned long) dev;
21014 + mod_timer(&np->leds->clear_led_timer, jiffies + HZ/10);
21016 + spin_unlock_irqrestore(&np->leds->led_lock, flags);
21018 + /* Discard CRC (4 bytes). */
21019 + length = (np->active_rx_desc->descr.after) -
21020 + (np->active_rx_desc->descr.buf) - 4;
21022 + /* Update received packet statistics. */
21023 + np->stats.rx_bytes += length;
21025 + if (np->active_rx_desc != np->last_rx_desc) {
21026 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21027 + if (np->gigabit_mode) {
21028 + skb = dev_alloc_skb(length);
21030 + np->stats.rx_errors++;
21031 + printk(KERN_NOTICE "%s: memory squeeze,"
21032 + " dropping packet.", dev->name);
21035 + /* Allocate room for the packet body. */
21036 + skb_put(skb, length - ETHER_HEAD_LEN);
21037 + /* Allocate room for the header and copy the data to
21039 + memcpy(skb_push(skb, ETHER_HEAD_LEN),
21040 + crisv32_intmem_phys_to_virt((unsigned long)np->active_rx_desc->descr.buf), length);
21042 + skb->protocol = eth_type_trans(skb, dev);
21043 + skb->ip_summed = CHECKSUM_NONE;
21044 + /* Send the packet to the upper layer. */
21046 + np->last_rx_desc =
21047 + (void *) phys_to_virt(np->last_rx_desc->descr.next);
21050 + tmp = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
21052 + np->stats.rx_errors++;
21053 + printk(KERN_NOTICE "%s: memory squeeze,"
21054 + " dropping packet.",
21058 + skb = np->active_rx_desc->skb;
21059 + np->active_rx_desc->skb = tmp;
21060 + skb_put(skb, length);
21062 + np->active_rx_desc->descr.buf =
21063 + (void *) virt_to_phys(np->active_rx_desc->skb->data);
21064 + np->active_rx_desc->descr.after =
21065 + np->active_rx_desc->descr.buf + MAX_MEDIA_DATA_SIZE;
21068 + skb->protocol = eth_type_trans(skb, dev);
21069 + skb->ip_summed = CHECKSUM_NONE;
21071 + /* Send the packet to the upper layer. */
21073 + np->last_rx_desc =
21074 + phys_to_virt((int)
21075 + np->last_rx_desc->descr.next);
21077 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21081 + * When the input DMA reaches eol precaution must be taken, otherwise
21082 + * the DMA could stop. The problem occurs if the eol flag is re-placed
21083 + * on the descriptor that the DMA stands on before the DMA proceed to
21084 + * the next descriptor. This case could, for example, happen if there
21085 + * is a traffic burst and then the network goes silent. To prevent this
21086 + * we make sure that we do not set the eol flag on the descriptor that
21087 + * the DMA stands on.
21089 + if(virt_to_phys(&np->active_rx_desc->descr) !=
21090 + REG_RD_INT(dma, np->dma_in_inst, rw_saved_data)) {
21091 + np->active_rx_desc->descr.after =
21092 + np->active_rx_desc->descr.buf + MAX_MEDIA_DATA_SIZE;
21093 + np->active_rx_desc->descr.eol = 1;
21094 + np->active_rx_desc->descr.in_eop = 0;
21095 + np->active_rx_desc =
21096 + phys_to_virt((int)np->active_rx_desc->descr.next);
21098 + np->prev_rx_desc->descr.eol = 0;
21099 + flush_dma_descr(&np->prev_rx_desc->descr, 0); // Workaround cache bug
21100 + np->prev_rx_desc =
21101 + phys_to_virt((int)np->prev_rx_desc->descr.next);
21102 + flush_dma_descr(&np->prev_rx_desc->descr, 1); // Workaround cache bug
21104 + np->new_rx_package = 0;
21109 + * This function (i.e. hard_start_xmit) is protected from concurent calls by a
21110 + * spinlock (xmit_lock) in the net_device structure.
21113 +crisv32_eth_send_packet(struct sk_buff *skb, struct net_device *dev)
21115 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21116 + unsigned char *buf = skb->data;
21117 + unsigned long flags;
21119 + dev->trans_start = jiffies;
21121 + spin_lock_irqsave(&np->leds->led_lock, flags);
21122 + if (!np->leds->led_active && time_after(jiffies, np->leds->led_next_time)) {
21123 + /* light the network leds depending on the current speed. */
21124 + crisv32_set_network_leds(LED_ACTIVITY, dev);
21126 + /* Set the earliest time we may clear the LED */
21127 + np->leds->led_next_time = jiffies + NET_FLASH_TIME;
21128 + np->leds->led_active = 1;
21129 + np->leds->clear_led_timer.data = (unsigned long) dev;
21130 + mod_timer(&np->leds->clear_led_timer, jiffies + HZ/10);
21132 + spin_unlock_irqrestore(&np->leds->led_lock, flags);
21135 + * Need to disable irq to avoid updating pointer in interrupt while
21136 + * sending packets.
21138 + spin_lock_irqsave(&np->lock, flags);
21140 + np->active_tx_desc->skb = skb;
21141 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21142 + if (np->gigabit_mode) {
21143 + if(np->intmem_tx_buf_active->free) {
21144 + memcpy(np->intmem_tx_buf_active->buf,
21145 + skb->data, skb->len);
21146 + np->intmem_tx_buf_active->free = 0;
21147 + crisv32_eth_hw_send_packet(
21148 + np->intmem_tx_buf_active->buf, skb->len, np);
21149 + np->intmem_tx_buf_active =
21150 + np->intmem_tx_buf_active->next;
21152 + printk("%s: Internal tx memory buffer not free!\n\r",
21154 + spin_unlock_irqrestore(&np->lock, flags);
21161 + crisv32_eth_hw_send_packet(buf, skb->len, np);
21163 + /* Stop queue if full. */
21164 + if (np->active_tx_desc == np->catch_tx_desc)
21165 + netif_stop_queue(dev);
21168 + spin_unlock_irqrestore(&np->lock, flags);
21175 +crisv32_eth_hw_send_packet(unsigned char *buf, int length, void *priv)
21177 + struct crisv32_ethernet_local *np =
21178 + (struct crisv32_ethernet_local *) priv;
21180 + /* Configure the tx dma descriptor. */
21181 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21182 + if (np->gigabit_mode) {
21183 + np->active_tx_desc->descr.buf = (unsigned char *) crisv32_intmem_virt_to_phys(buf);
21187 + np->active_tx_desc->descr.buf = (unsigned char *) virt_to_phys(buf);
21190 + np->active_tx_desc->descr.after = np->active_tx_desc->descr.buf +
21192 + np->active_tx_desc->descr.intr = 1;
21193 + np->active_tx_desc->descr.out_eop = 1;
21196 + np->active_tx_desc->descr.eol = 1;
21197 + np->prev_tx_desc->descr.eol = 0;
21200 + /* Update pointers. */
21201 + np->prev_tx_desc = np->active_tx_desc;
21202 + np->active_tx_desc = phys_to_virt((int)np->active_tx_desc->descr.next);
21205 + crisv32_start_dma_out(np);
21209 +crisv32_start_dma_out(struct crisv32_ethernet_local* np)
21211 + if (!np->sender_started) {
21212 + /* Start DMA for the first time. */
21213 + np->ctxt_out.saved_data_buf = np->prev_tx_desc->descr.buf;
21214 + REG_WR(dma, np->dma_out_inst, rw_group_down,
21215 + (int) virt_to_phys(&np->ctxt_out));
21216 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
21217 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_d | regk_dma_burst);
21218 + np->sender_started = 1;
21220 + DMA_CONTINUE_DATA(np->dma_out_inst);
21225 + * Called by upper layers if they decide it took too long to complete sending
21226 + * a packet - we need to reset and stuff.
21229 +crisv32_eth_tx_timeout(struct net_device *dev)
21231 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21232 + reg_dma_rw_cfg cfg = {0};
21233 + reg_dma_rw_stat stat = {0};
21234 + unsigned long flags;
21236 + printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
21239 + spin_lock_irqsave(&np->lock, flags);
21240 + crisv32_ethernet_bug(dev);
21242 + np->txpackets = 0;
21243 + /* Update error stats. */
21244 + np->stats.tx_errors++;
21246 + /* Reset the TX DMA in case it has hung on something. */
21248 + REG_WR(dma, np->dma_out_inst, rw_cfg, cfg);
21251 + stat = REG_RD(dma, np->dma_out_inst, rw_stat);
21252 + } while (stat.mode != regk_dma_rst);
21254 + /* Reset the tranceiver. */
21255 + crisv32_eth_reset_tranceiver(dev);
21257 + /* Get rid of the packets that never got an interrupt. */
21259 + if (np->catch_tx_desc->skb)
21260 + dev_kfree_skb(np->catch_tx_desc->skb);
21262 + np->catch_tx_desc->skb = 0;
21263 + np->catch_tx_desc =
21264 + phys_to_virt((int)np->catch_tx_desc->descr.next);
21265 + } while (np->catch_tx_desc != np->active_tx_desc);
21268 + /* Start output DMA. */
21269 + REG_WR(dma, np->dma_out_inst, rw_group_down,
21270 + (int) virt_to_phys(&np->ctxt_out));
21271 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
21272 + DMA_WR_CMD(np->dma_out_inst, regk_dma_load_d | regk_dma_burst);
21273 + spin_unlock_irqrestore(&np->lock, flags);
21275 + /* Tell the upper layers we're ok again. */
21276 + netif_wake_queue(dev);
21280 + * Set or clear the multicast filter for this adaptor.
21281 + * num_addrs == -1 Promiscuous mode, receive all packets
21282 + * num_addrs == 0 Normal mode, clear multicast list
21283 + * num_addrs > 0 Multicast mode, receive normal and MC packets,
21284 + * and do best-effort filtering.
21287 +crisv32_eth_set_multicast_list(struct net_device *dev)
21289 + int num_addr = dev->mc_count;
21290 + unsigned long int lo_bits;
21291 + unsigned long int hi_bits;
21292 + reg_eth_rw_rec_ctrl rec_ctrl = {0};
21293 + reg_eth_rw_ga_lo ga_lo = {0};
21294 + reg_eth_rw_ga_hi ga_hi = {0};
21295 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21297 + if (dev->flags & IFF_PROMISC) {
21298 + /* Promiscuous mode. */
21299 + lo_bits = 0xfffffffful;
21300 + hi_bits = 0xfffffffful;
21302 + /* Enable individual receive. */
21303 + rec_ctrl = (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst,
21305 + rec_ctrl.individual = regk_eth_yes;
21306 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21307 + } else if (dev->flags & IFF_ALLMULTI) {
21308 + /* Enable all multicasts. */
21309 + lo_bits = 0xfffffffful;
21310 + hi_bits = 0xfffffffful;
21312 + /* Disable individual receive */
21314 + (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
21315 + rec_ctrl.individual = regk_eth_no;
21316 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21317 + } else if (num_addr == 0) {
21318 + /* Normal, clear the mc list. */
21319 + lo_bits = 0x00000000ul;
21320 + hi_bits = 0x00000000ul;
21322 + /* Disable individual receive */
21324 + (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
21325 + rec_ctrl.individual = regk_eth_no;
21326 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21328 + /* MC mode, receive normal and MC packets. */
21330 + struct dev_mc_list *dmi = dev->mc_list;
21333 + lo_bits = 0x00000000ul;
21334 + hi_bits = 0x00000000ul;
21336 + for (i = 0; i < num_addr; i++) {
21337 + /* Calculate the hash index for the GA registers. */
21339 + baddr = dmi->dmi_addr;
21340 + hash_ix ^= (*baddr) & 0x3f;
21341 + hash_ix ^= ((*baddr) >> 6) & 0x03;
21343 + hash_ix ^= ((*baddr) << 2) & 0x03c;
21344 + hash_ix ^= ((*baddr) >> 4) & 0xf;
21346 + hash_ix ^= ((*baddr) << 4) & 0x30;
21347 + hash_ix ^= ((*baddr) >> 2) & 0x3f;
21349 + hash_ix ^= (*baddr) & 0x3f;
21350 + hash_ix ^= ((*baddr) >> 6) & 0x03;
21352 + hash_ix ^= ((*baddr) << 2) & 0x03c;
21353 + hash_ix ^= ((*baddr) >> 4) & 0xf;
21355 + hash_ix ^= ((*baddr) << 4) & 0x30;
21356 + hash_ix ^= ((*baddr) >> 2) & 0x3f;
21360 + if (hash_ix > 32)
21361 + hi_bits |= (1 << (hash_ix - 32));
21363 + lo_bits |= (1 << hash_ix);
21368 + /* Disable individual receive. */
21370 + (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
21371 + rec_ctrl.individual = regk_eth_no;
21372 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21375 + ga_lo.tbl = (unsigned int) lo_bits;
21376 + ga_hi.tbl = (unsigned int) hi_bits;
21378 + REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo);
21379 + REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi);
21383 +crisv32_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
21385 + struct mii_ioctl_data *data = if_mii(ifr);
21386 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21389 + spin_lock(&np->lock); /* Preempt protection */
21391 + case SIOCGMIIPHY: /* Get PHY address */
21392 + data->phy_id = np->mdio_phy_addr;
21394 + case SIOCGMIIREG: /* Read MII register */
21395 + data->val_out = crisv32_eth_get_mdio_reg(dev,
21398 + case SIOCSMIIREG: /* Write MII register */
21399 + crisv32_eth_set_mdio_reg(dev, data->reg_num,
21402 + case SET_ETH_ENABLE_LEDS:
21403 + use_network_leds = 1;
21405 + case SET_ETH_DISABLE_LEDS:
21406 + use_network_leds = 0;
21408 + case SET_ETH_AUTONEG:
21409 + old_autoneg = autoneg_normal;
21410 + autoneg_normal = *(int*)data;
21411 + if (autoneg_normal != old_autoneg)
21412 + crisv32_eth_negotiate(dev);
21415 + spin_unlock(&np->lock); /* Preempt protection */
21418 + spin_unlock(&np->lock);
21422 +static int crisv32_eth_get_settings(struct net_device *dev,
21423 + struct ethtool_cmd *ecmd)
21425 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21426 + /* What about GMII and 1000xpause? not included in ethtool.h */
21427 + ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |
21428 + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
21429 + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
21430 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21431 + ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
21433 + ecmd->port = PORT_TP;
21434 + ecmd->transceiver = XCVR_EXTERNAL;
21435 + ecmd->phy_address = np->mdio_phy_addr;
21436 + ecmd->speed = np->current_speed;
21437 + ecmd->duplex = np->full_duplex;
21438 + ecmd->advertising = ADVERTISED_TP;
21440 + if (np->current_duplex == autoneg && np->current_speed_selection == 0)
21441 + ecmd->advertising |= ADVERTISED_Autoneg;
21443 + ecmd->advertising |=
21444 + ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
21445 + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
21446 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21447 + ecmd->advertising |= ADVERTISED_1000baseT_Half |
21448 + ADVERTISED_1000baseT_Full;
21450 + if (np->current_speed_selection == 10)
21451 + ecmd->advertising &= ~(ADVERTISED_100baseT_Half |
21452 + ADVERTISED_100baseT_Full |
21453 + ADVERTISED_1000baseT_Half |
21454 + ADVERTISED_1000baseT_Full);
21456 + else if (np->current_speed_selection == 100)
21457 + ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
21458 + ADVERTISED_10baseT_Full |
21459 + ADVERTISED_1000baseT_Half |
21460 + ADVERTISED_1000baseT_Full);
21462 + else if (np->current_speed_selection == 1000)
21463 + ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
21464 + ADVERTISED_10baseT_Full |
21465 + ADVERTISED_100baseT_Half |
21466 + ADVERTISED_100baseT_Full);
21468 + if (np->current_duplex == half)
21469 + ecmd->advertising &= ~(ADVERTISED_10baseT_Full |
21470 + ADVERTISED_100baseT_Full |
21471 + ADVERTISED_1000baseT_Full);
21472 + else if (np->current_duplex == full)
21473 + ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
21474 + ADVERTISED_100baseT_Half |
21475 + ADVERTISED_1000baseT_Half);
21478 + ecmd->autoneg = AUTONEG_ENABLE;
21482 +static int crisv32_eth_set_settings(struct net_device *dev,
21483 + struct ethtool_cmd *ecmd)
21485 + if (ecmd->autoneg == AUTONEG_ENABLE) {
21486 + crisv32_eth_set_duplex(dev, autoneg);
21487 + crisv32_eth_set_speed(dev, 0);
21489 + crisv32_eth_set_duplex(dev, ecmd->duplex);
21490 + crisv32_eth_set_speed(dev, ecmd->speed);
21496 +static void crisv32_eth_get_drvinfo(struct net_device *dev,
21497 + struct ethtool_drvinfo *info)
21499 + strncpy(info->driver, "ETRAX FS", sizeof(info->driver) - 1);
21500 + strncpy(info->version, "$Revision: 1.96 $", sizeof(info->version) - 1);
21501 + strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
21502 + strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
21505 +static int crisv32_eth_nway_reset(struct net_device *dev)
21507 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21509 + if (np->current_duplex == autoneg && np->current_speed_selection == 0)
21510 + crisv32_eth_negotiate(dev);
21514 +static struct ethtool_ops crisv32_ethtool_ops = {
21515 + .get_settings = crisv32_eth_get_settings,
21516 + .set_settings = crisv32_eth_set_settings,
21517 + .get_drvinfo = crisv32_eth_get_drvinfo,
21518 + .nway_reset = crisv32_eth_nway_reset,
21519 + .get_link = ethtool_op_get_link,
21522 +/* Is this function really needed? Use ethtool instead? */
21524 +crisv32_eth_set_config(struct net_device *dev, struct ifmap *map)
21526 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21528 + spin_lock(&np->lock); /* Preempt protection */
21530 + switch(map->port) {
21531 + case IF_PORT_UNKNOWN:
21532 + /* Use autoneg */
21533 + crisv32_eth_set_speed(dev, 0);
21534 + crisv32_eth_set_duplex(dev, autoneg);
21536 + case IF_PORT_10BASET:
21537 + crisv32_eth_set_speed(dev, 10);
21538 + crisv32_eth_set_duplex(dev, autoneg);
21540 + case IF_PORT_100BASET:
21541 + case IF_PORT_100BASETX:
21542 + crisv32_eth_set_speed(dev, 100);
21543 + crisv32_eth_set_duplex(dev, autoneg);
21545 + case IF_PORT_100BASEFX:
21546 + case IF_PORT_10BASE2:
21547 + case IF_PORT_AUI:
21548 + spin_unlock(&np->lock);
21549 + return -EOPNOTSUPP;
21552 + printk(KERN_ERR "%s: Invalid media selected",
21554 + spin_unlock(&np->lock);
21557 + spin_unlock(&np->lock);
21561 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21563 + * Switch the behaviour of the tx and rx buffers using
21564 + * external or internal memory. Usage of the internal
21565 + * memory is required for gigabit operation.
21568 +crisv32_eth_switch_intmem_usage(struct net_device *dev)
21570 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21573 + reg_dma_rw_stat stat;
21574 + reg_dma_rw_cfg cfg = {0};
21575 + reg_dma_rw_intr_mask intr_mask_in = { .in_eop = regk_dma_yes };
21576 + reg_dma_rw_ack_intr ack_intr = { .data = 1,.in_eop = 1 };
21577 + unsigned char *intmem_tmp;
21579 + /* Notify the kernel that the interface has stopped */
21580 + netif_stop_queue(dev);
21582 + /* Stop the receiver DMA */
21583 + cfg.en = regk_dma_no;
21584 + REG_WR(dma, np->dma_in_inst, rw_cfg, cfg);
21586 + if (!(np->gigabit_mode)) {
21587 + /* deallocate SKBs in rx_desc */
21588 + for (i = 0; i < NBR_RX_DESC; i++)
21589 + dev_kfree_skb(np->dma_rx_descr_list[i].skb);
21592 + for(i=0; i < NBR_INTMEM_TX_BUF; i++) {
21593 + /* Allocate internal memory */
21594 + intmem_tmp = NULL;
21595 + intmem_tmp = crisv32_intmem_alloc(MAX_MEDIA_DATA_SIZE,
21597 + /* Check that we really got the memory */
21598 + if (intmem_tmp == NULL) {
21599 + printk(KERN_ERR "%s: Can't allocate intmem for"
21600 + " RX buffer nbr: %d\n", dev->name, i);
21603 + /* Setup the list entry */
21604 + np->tx_intmem_buf_list[i].free = 1;
21605 + np->tx_intmem_buf_list[i].buf = intmem_tmp;
21606 + np->tx_intmem_buf_list[i].next = &np->tx_intmem_buf_list[i + 1];
21608 + /* Setup the last list entry */
21609 + np->tx_intmem_buf_list[NBR_INTMEM_TX_BUF - 1].next = &np->tx_intmem_buf_list[0];
21610 + /* Setup initial pointer */
21611 + np->intmem_tx_buf_active = np->tx_intmem_buf_list;
21612 + np->intmem_tx_buf_catch = np->tx_intmem_buf_list;
21615 + for (i=0; i < NBR_INTMEM_RX_DESC; i++) {
21616 + /* Allocate internal memory */
21617 + intmem_tmp = NULL;
21618 + intmem_tmp = crisv32_intmem_alloc(MAX_MEDIA_DATA_SIZE, 32);
21619 + /* Check that we really got the memory */
21620 + if (intmem_tmp == NULL) {
21621 + printk(KERN_ERR "%s: Can't allocate intmem for"
21622 + " desc nbr: %d\n", dev->name, i);
21625 + /* Setup the descriptors*/
21626 + np->dma_rx_descr_list[i].skb = NULL;
21627 + np->dma_rx_descr_list[i].descr.buf =
21628 + (void *) crisv32_intmem_virt_to_phys(intmem_tmp);
21629 + np->dma_rx_descr_list[i].descr.after =
21630 + (void *) crisv32_intmem_virt_to_phys(intmem_tmp + MAX_MEDIA_DATA_SIZE);
21631 + np->dma_rx_descr_list[i].descr.eol = 0;
21632 + np->dma_rx_descr_list[i].descr.in_eop = 0;
21633 + np->dma_rx_descr_list[i].descr.next =
21634 + (void *) virt_to_phys(&np->dma_rx_descr_list[i+1].descr);
21636 + /* Setup the last rx descriptor */
21637 + np->dma_rx_descr_list[NBR_INTMEM_RX_DESC - 1].descr.eol = 1;
21638 + np->dma_rx_descr_list[NBR_INTMEM_RX_DESC - 1].descr.next =
21639 + (void*) virt_to_phys(&np->dma_rx_descr_list[0].descr);
21640 + /* Initialise initial receive pointers. */
21641 + np->active_rx_desc = &np->dma_rx_descr_list[0];
21642 + np->prev_rx_desc = &np->dma_rx_descr_list[NBR_INTMEM_RX_DESC - 1];
21643 + np->last_rx_desc = np->prev_rx_desc;
21645 + np->gigabit_mode = 1;
21647 + /* dealloc TX intmem */
21648 + for(i=0; i < NBR_INTMEM_TX_BUF; i++)
21649 + crisv32_intmem_free(np->tx_intmem_buf_list[i].buf);
21651 + /* dealloc RX intmem */
21652 + for (i=0; i < NBR_INTMEM_RX_DESC; i++)
21653 + crisv32_intmem_free(crisv32_intmem_phys_to_virt((unsigned long)np->dma_rx_descr_list[i].descr.buf));
21655 + /* Setup new rx_desc and alloc SKBs */
21656 + for (i = 0; i < NBR_RX_DESC; i++) {
21657 + struct sk_buff *skb;
21659 + skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
21660 + np->dma_rx_descr_list[i].skb = skb;
21661 + np->dma_rx_descr_list[i].descr.buf =
21662 + (char*)virt_to_phys(skb->data);
21663 + np->dma_rx_descr_list[i].descr.after =
21664 + (char*)virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE);
21666 + np->dma_rx_descr_list[i].descr.eol = 0;
21667 + np->dma_rx_descr_list[i].descr.in_eop = 0;
21668 + np->dma_rx_descr_list[i].descr.next =
21669 + (void *) virt_to_phys(&np->dma_rx_descr_list[i + 1].descr);
21672 + np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.eol = 1;
21673 + np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.next =
21674 + (void *) virt_to_phys(&np->dma_rx_descr_list[0].descr);
21676 + /* Initialise initial receive pointers. */
21677 + np->active_rx_desc = &np->dma_rx_descr_list[0];
21678 + np->prev_rx_desc = &np->dma_rx_descr_list[NBR_RX_DESC - 1];
21679 + np->last_rx_desc = np->prev_rx_desc;
21681 + np->gigabit_mode = 0;
21684 + /* Fill context descriptors. */
21685 + np->ctxt_in.next = 0;
21686 + np->ctxt_in.saved_data =
21687 + (dma_descr_data *) virt_to_phys(&np->dma_rx_descr_list[0].descr);
21688 + np->ctxt_in.saved_data_buf = np->dma_rx_descr_list[0].descr.buf;
21690 + /* Enable irq and make sure that the irqs are cleared. */
21691 + REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_in);
21692 + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
21694 + /* Start input dma */
21695 + cfg.en = regk_dma_yes;
21696 + REG_WR(dma, np->dma_in_inst, rw_cfg, cfg);
21697 + REG_WR(dma, np->dma_in_inst, rw_group_down,
21698 + (int) virt_to_phys(&np->ctxt_in));
21700 + DMA_WR_CMD(np->dma_in_inst, regk_dma_load_c);
21701 + DMA_WR_CMD(np->dma_in_inst, regk_dma_load_d | regk_dma_burst);
21703 + netif_wake_queue(dev);
21705 + stat = REG_RD(dma, np->dma_in_inst, rw_stat);
21710 +crisv32_eth_negotiate(struct net_device *dev)
21712 + unsigned short data =
21713 + crisv32_eth_get_mdio_reg(dev, MII_ADVERTISE);
21714 + unsigned short ctrl1000 =
21715 + crisv32_eth_get_mdio_reg(dev, MII_CTRL1000);
21716 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21718 + /* Make all capabilities available */
21719 + data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
21720 + ADVERTISE_100HALF | ADVERTISE_100FULL;
21721 + ctrl1000 |= ADVERTISE_1000HALF | ADVERTISE_1000FULL;
21723 + /* Remove the speed capabilities that we that do not want */
21724 + switch (np->current_speed_selection) {
21726 + data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL);
21727 + ctrl1000 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
21730 + data &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL);
21731 + ctrl1000 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
21734 + data &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
21735 + ADVERTISE_100HALF | ADVERTISE_100FULL);
21739 + /* Remove the duplex capabilites that we do not want */
21740 + if (np->current_duplex == full) {
21741 + data &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
21742 + ctrl1000 &= ~(ADVERTISE_1000HALF);
21744 + else if (np->current_duplex == half) {
21745 + data &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
21746 + ctrl1000 &= ~(ADVERTISE_1000FULL);
21749 + crisv32_eth_set_mdio_reg(dev, MII_ADVERTISE, data);
21750 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21751 + crisv32_eth_set_mdio_reg(dev, MII_CTRL1000, ctrl1000);
21754 + /* Renegotiate with link partner */
21755 + if (autoneg_normal) {
21756 + data = crisv32_eth_get_mdio_reg(dev, MII_BMCR);
21757 + data |= BMCR_ANENABLE | BMCR_ANRESTART;
21759 + crisv32_eth_set_mdio_reg(dev, MII_BMCR, data);
21762 +crisv32_eth_check_speed(unsigned long idev)
21764 + static int led_initiated = 0;
21765 + struct net_device *dev = (struct net_device *) idev;
21766 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21768 + unsigned long data;
21770 + unsigned long flags;
21773 + BUG_ON(!np->transceiver);
21775 + spin_lock(&np->transceiver_lock);
21777 + old_speed = np->current_speed;
21778 + data = crisv32_eth_get_mdio_reg(dev, MII_BMSR);
21780 + if (!(data & BMSR_LSTATUS))
21781 + np->current_speed = 0;
21783 + np->transceiver->check_speed(dev);
21785 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
21786 + if ((old_speed != np->current_speed)
21787 + && ((old_speed == 1000) || (np->current_speed == 1000))) {
21788 + /* Switch between mii and gmii */
21789 + reg_eth_rw_gen_ctrl gen_ctrl = REG_RD(eth, np->eth_inst,
21791 + reg_eth_rw_tr_ctrl tr_ctrl = REG_RD(eth, np->eth_inst,
21793 + if (old_speed == 1000) {
21794 + gen_ctrl.phy = regk_eth_mii;
21795 + gen_ctrl.gtxclk_out = regk_eth_no;
21796 + tr_ctrl.carrier_ext = regk_eth_no;
21799 + gen_ctrl.phy = regk_eth_gmii;
21800 + gen_ctrl.gtxclk_out = regk_eth_yes;
21801 + tr_ctrl.carrier_ext = regk_eth_yes;
21803 + REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
21804 + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
21806 + crisv32_eth_switch_intmem_usage(dev);
21810 + spin_lock_irqsave(&np->leds->led_lock, flags);
21811 + if ((old_speed != np->current_speed) || !led_initiated) {
21812 + led_initiated = 1;
21813 + np->leds->clear_led_timer.data = (unsigned long) dev;
21814 + if (np->current_speed) {
21815 + netif_carrier_on(dev);
21816 + crisv32_set_network_leds(LED_LINK, dev);
21818 + netif_carrier_off(dev);
21819 + crisv32_set_network_leds(LED_NOLINK, dev);
21822 + spin_unlock_irqrestore(&np->leds->led_lock, flags);
21824 + /* Reinitialize the timer. */
21825 + np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
21826 + add_timer(&np->speed_timer);
21828 + spin_unlock(&np->transceiver_lock);
21832 +crisv32_eth_set_speed(struct net_device *dev, unsigned long speed)
21834 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21836 + spin_lock(&np->transceiver_lock);
21837 + if (np->current_speed_selection != speed) {
21838 + np->current_speed_selection = speed;
21839 + crisv32_eth_negotiate(dev);
21841 + spin_unlock(&np->transceiver_lock);
21845 +crisv32_eth_check_duplex(unsigned long idev)
21847 + struct net_device *dev = (struct net_device *) idev;
21848 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21849 + reg_eth_rw_rec_ctrl rec_ctrl;
21850 + int old_duplex = np->full_duplex;
21852 + np->transceiver->check_duplex(dev);
21854 + if (old_duplex != np->full_duplex) {
21855 + /* Duplex changed. */
21856 + rec_ctrl = (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst,
21858 + rec_ctrl.duplex = np->full_duplex;
21859 + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
21862 + /* Reinitialize the timer. */
21863 + np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
21864 + add_timer(&np->duplex_timer);
21868 +crisv32_eth_set_duplex(struct net_device *dev, enum duplex new_duplex)
21870 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21871 + spin_lock(&np->transceiver_lock);
21872 + if (np->current_duplex != new_duplex) {
21873 + np->current_duplex = new_duplex;
21874 + crisv32_eth_negotiate(dev);
21876 + spin_unlock(&np->transceiver_lock);
21880 +crisv32_eth_probe_transceiver(struct net_device *dev)
21882 + unsigned int phyid_high;
21883 + unsigned int phyid_low;
21884 + unsigned int oui;
21885 + struct transceiver_ops *ops = NULL;
21886 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21888 + /* Probe MDIO physical address. */
21889 + for (np->mdio_phy_addr = 0;
21890 + np->mdio_phy_addr <= 31; np->mdio_phy_addr++) {
21891 + if (crisv32_eth_get_mdio_reg(dev, MII_BMSR) != 0xffff)
21895 + if (np->mdio_phy_addr == 32)
21898 + /* Get manufacturer. */
21899 + phyid_high = crisv32_eth_get_mdio_reg(dev, MII_PHYSID1);
21900 + phyid_low = crisv32_eth_get_mdio_reg(dev, MII_PHYSID2);
21902 + oui = (phyid_high << 6) | (phyid_low >> 10);
21904 + for (ops = &transceivers[0]; ops->oui; ops++) {
21905 + if (ops->oui == oui)
21909 + np->transceiver = ops;
21914 +generic_check_speed(struct net_device *dev)
21916 + unsigned long data;
21917 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21919 + data = crisv32_eth_get_mdio_reg(dev, MII_ADVERTISE);
21920 + if ((data & ADVERTISE_100FULL) ||
21921 + (data & ADVERTISE_100HALF))
21922 + np->current_speed = 100;
21924 + np->current_speed = 10;
21928 +generic_check_duplex(struct net_device *dev)
21930 + unsigned long data;
21931 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21933 + data = crisv32_eth_get_mdio_reg(dev, MII_ADVERTISE);
21934 + if ((data & ADVERTISE_10FULL) ||
21935 + (data & ADVERTISE_100FULL))
21936 + np->full_duplex = 1;
21938 + np->full_duplex = 0;
21942 +broadcom_check_speed(struct net_device *dev)
21944 + unsigned long data;
21945 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21947 + data = crisv32_eth_get_mdio_reg(dev, MDIO_AUX_CTRL_STATUS_REG);
21948 + np->current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
21952 +broadcom_check_duplex(struct net_device *dev)
21954 + unsigned long data;
21955 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21957 + data = crisv32_eth_get_mdio_reg(dev, MDIO_AUX_CTRL_STATUS_REG);
21958 + np->full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
21962 +tdk_check_speed(struct net_device *dev)
21964 + unsigned long data;
21965 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21967 + data = crisv32_eth_get_mdio_reg(dev, MDIO_TDK_DIAGNOSTIC_REG);
21968 + np->current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
21972 +tdk_check_duplex(struct net_device *dev)
21974 + unsigned long data;
21975 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21977 + data = crisv32_eth_get_mdio_reg(dev, MDIO_TDK_DIAGNOSTIC_REG);
21978 + np->full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
21983 +intel_check_speed(struct net_device *dev)
21985 + unsigned long data;
21986 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21987 + data = crisv32_eth_get_mdio_reg(dev, MDIO_INT_STATUS_REG_2);
21988 + np->current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
21992 +intel_check_duplex(struct net_device *dev)
21994 + unsigned long data;
21995 + struct crisv32_ethernet_local *np = netdev_priv(dev);
21997 + data = crisv32_eth_get_mdio_reg(dev, MDIO_INT_STATUS_REG_2);
21998 + np->full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
22002 +national_check_speed(struct net_device *dev)
22004 + unsigned long data;
22005 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22007 + data = crisv32_eth_get_mdio_reg(dev, MDIO_NAT_LINK_AN_REG);
22008 + if (data & MDIO_NAT_1000)
22009 + np->current_speed = 1000;
22010 + else if (data & MDIO_NAT_100)
22011 + np->current_speed = 100;
22013 + np->current_speed = 10;
22017 +national_check_duplex(struct net_device *dev)
22019 + unsigned long data;
22020 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22022 + data = crisv32_eth_get_mdio_reg(dev, MDIO_NAT_LINK_AN_REG);
22023 + if (data & MDIO_NAT_FULL_DUPLEX_IND)
22024 + np->full_duplex = 1;
22026 + np->full_duplex = 0;
22030 +crisv32_eth_reset_tranceiver(struct net_device *dev)
22033 + unsigned short cmd;
22034 + unsigned short data;
22035 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22037 + data = crisv32_eth_get_mdio_reg(dev, MII_BMCR);
22039 + cmd = (MDIO_START << 14)
22040 + | (MDIO_WRITE << 12)
22041 + | (np->mdio_phy_addr << 7)
22042 + | (MII_BMCR << 2);
22044 + crisv32_eth_send_mdio_cmd(dev, cmd, 1);
22048 + /* Magic value is number of bits. */
22049 + for (i = 15; i >= 0; i--)
22050 + crisv32_eth_send_mdio_bit(dev, GET_BIT(i, data));
22053 +static unsigned short
22054 +crisv32_eth_get_mdio_reg(struct net_device *dev, unsigned char reg_num)
22057 + unsigned short cmd; /* Data to be sent on MDIO port. */
22058 + unsigned short data; /* Data read from MDIO. */
22059 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22061 + /* Start of frame, OP Code, Physical Address, Register Address. */
22062 + cmd = (MDIO_START << 14)
22063 + | (MDIO_READ << 12)
22064 + | (np->mdio_phy_addr << 7)
22065 + | (reg_num << 2);
22067 + crisv32_eth_send_mdio_cmd(dev, cmd, 0);
22071 + /* Receive data. Magic value is number of bits. */
22072 + for (i = 15; i >= 0; i--)
22073 + data |= (crisv32_eth_receive_mdio_bit(dev) << i);
22079 +crisv32_eth_set_mdio_reg(struct net_device *dev, unsigned char reg, int value)
22082 + unsigned short cmd;
22083 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22085 + cmd = (MDIO_START << 14)
22086 + | (MDIO_WRITE << 12)
22087 + | (np->mdio_phy_addr << 7)
22090 + crisv32_eth_send_mdio_cmd(dev, cmd, 1);
22093 + for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
22094 + crisv32_eth_send_mdio_bit(dev, GET_BIT(bitCounter, value));
22099 +crisv32_eth_send_mdio_cmd(struct net_device *dev, unsigned short cmd,
22103 + unsigned char data = 0x2;
22105 + /* Preamble. Magic value is number of bits. */
22106 + for (i = 31; i >= 0; i--)
22107 + crisv32_eth_send_mdio_bit(dev, GET_BIT(i, MDIO_PREAMBLE));
22109 + for (i = 15; i >= 2; i--)
22110 + crisv32_eth_send_mdio_bit(dev, GET_BIT(i, cmd));
22112 + /* Turnaround. */
22113 + for (i = 1; i >= 0; i--)
22115 + crisv32_eth_send_mdio_bit(dev, GET_BIT(i, data));
22117 + crisv32_eth_receive_mdio_bit(dev);
22121 +crisv32_eth_send_mdio_bit(struct net_device *dev, unsigned char bit)
22123 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22125 + reg_eth_rw_mgm_ctrl mgm_ctrl = {
22126 + .mdoe = regk_eth_yes,
22130 + REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
22134 + mgm_ctrl.mdc = 1;
22135 + REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
22140 +static unsigned char
22141 +crisv32_eth_receive_mdio_bit(struct net_device *dev)
22143 + reg_eth_r_stat stat;
22144 + reg_eth_rw_mgm_ctrl mgm_ctrl = {0};
22145 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22147 + REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
22148 + stat = REG_RD(eth, np->eth_inst, r_stat);
22152 + mgm_ctrl.mdc = 1;
22153 + REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
22156 + return stat.mdio;
22160 +crisv32_clear_network_leds(unsigned long priv)
22162 + struct net_device *dev = (struct net_device*)priv;
22163 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22164 + unsigned long flags;
22166 + spin_lock_irqsave(&np->leds->led_lock, flags);
22167 + if (np->leds->led_active && time_after(jiffies, np->leds->led_next_time)) {
22168 + crisv32_set_network_leds(LED_NOACTIVITY, dev);
22170 + /* Set the earliest time we may set the LED */
22171 + np->leds->led_next_time = jiffies + NET_FLASH_PAUSE;
22172 + np->leds->led_active = 0;
22174 + spin_unlock_irqrestore(&np->leds->led_lock, flags);
22178 +crisv32_set_network_leds(int active, struct net_device *dev)
22180 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22181 + int light_leds = 0;
22183 + if (np->leds->ledgrp == LED_GRP_NONE)
22186 + if (active == LED_NOLINK) {
22187 + if (dev == crisv32_dev[0])
22188 + np->leds->ifisup[0] = 0;
22190 + np->leds->ifisup[1] = 0;
22192 + else if (active == LED_LINK) {
22193 + if (dev == crisv32_dev[0])
22194 + np->leds->ifisup[0] = 1;
22196 + np->leds->ifisup[1] = 1;
22197 +#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
22200 + light_leds = (active == LED_NOACTIVITY);
22201 +#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
22204 + light_leds = (active == LED_ACTIVITY);
22206 +#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
22210 + if (!use_network_leds) {
22211 + NET_LED_SET(np->leds->ledgrp,LED_OFF);
22215 + if (!np->current_speed) {
22216 + /* Set link down if none of the interfaces that use this led group is up */
22217 + if ((np->leds->ifisup[0] + np->leds->ifisup[1]) == 0) {
22218 +#if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION)
22219 + /* Make LED red, link is down */
22220 + NET_LED_SET(np->leds->ledgrp,LED_RED);
22222 + NET_LED_SET(np->leds->ledgrp,LED_OFF);
22226 + else if (light_leds) {
22227 + if (np->current_speed == 10) {
22228 + NET_LED_SET(np->leds->ledgrp,LED_ORANGE);
22230 + NET_LED_SET(np->leds->ledgrp,LED_GREEN);
22234 + NET_LED_SET(np->leds->ledgrp,LED_OFF);
22238 +#ifdef CONFIG_NET_POLL_CONTROLLER
22240 +crisv32_netpoll(struct net_device* netdev)
22242 + crisv32rx_eth_interrupt(DMA0_INTR_VECT, netdev, NULL);
22246 +#ifdef CONFIG_CPU_FREQ
22248 +crisv32_ethernet_freq_notifier(struct notifier_block *nb,
22249 + unsigned long val, void *data)
22251 + struct cpufreq_freqs *freqs = data;
22252 + if (val == CPUFREQ_POSTCHANGE) {
22254 + for (i = 0; i < 2; i++) {
22255 + struct net_device* dev = crisv32_dev[i];
22256 + unsigned short data;
22260 + data = crisv32_eth_get_mdio_reg(dev, MII_BMCR);
22261 + if (freqs->new == 200000)
22262 + data &= ~BMCR_PDOWN;
22264 + data |= BMCR_PDOWN;
22265 + crisv32_eth_set_mdio_reg(dev, MII_BMCR, data);
22273 + * Must be called with the np->lock held.
22275 +static void crisv32_ethernet_bug(struct net_device *dev)
22277 + struct crisv32_ethernet_local *np = netdev_priv(dev);
22278 + dma_descr_data *dma_pos;
22279 + dma_descr_data *in_dma_pos;
22280 + reg_dma_rw_stat stat = {0};
22281 + reg_dma_rw_stat in_stat = {0};
22284 + /* Get the current output dma position. */
22285 + stat = REG_RD(dma, np->dma_out_inst, rw_stat);
22286 + dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst, rw_data));
22287 + in_stat = REG_RD(dma, np->dma_in_inst, rw_stat);
22288 + in_dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_in_inst, rw_data));
22291 + "stat.list_state=%x\n"
22293 + "stat.stream_cmd_src=%x\n"
22295 + "in_stat.list_state=%x\n"
22296 + "in_stat.mode=%x\n"
22297 + "in_stat.stream_cmd_src=%x\n"
22298 + "in_dma_pos=%x\n"
22299 + "catch=%x active=%x\n"
22300 + "packets=%d queue=%d\n"
22301 + "intr_vect.r_vect=%x\n"
22302 + "dma.r_masked_intr=%x dma.rw_ack_intr=%x "
22303 + "dma.r_intr=%x dma.rw_intr_masked=%x\n"
22304 + "eth.r_stat=%x\n",
22306 + stat.list_state, stat.mode, stat.stream_cmd_src,
22307 + (unsigned int)dma_pos,
22308 + in_stat.list_state, in_stat.mode, in_stat.stream_cmd_src,
22309 + (unsigned int)in_dma_pos,
22310 + (unsigned int)&np->catch_tx_desc->descr,
22311 + (unsigned int)&np->active_tx_desc->descr,
22313 + netif_queue_stopped(dev),
22314 + REG_RD_INT(intr_vect, regi_irq, r_vect),
22315 + REG_RD_INT(dma, np->dma_out_inst, r_masked_intr),
22316 + REG_RD_INT(dma, np->dma_out_inst, rw_ack_intr),
22317 + REG_RD_INT(dma, np->dma_out_inst, r_intr),
22318 + REG_RD_INT(dma, np->dma_out_inst, rw_intr_mask),
22319 + REG_RD_INT(eth, np->eth_inst, r_stat));
22321 + printk("tx-descriptors:\n");
22322 + for (i = 0; i < NBR_TX_DESC; i++) {
22323 + printk("txdesc[%d]=0x%x\n", i, (unsigned int)
22324 + virt_to_phys(&np->dma_tx_descr_list[i].descr));
22325 + printk("txdesc[%d].skb=0x%x\n", i,
22326 + (unsigned int)np->dma_tx_descr_list[i].skb);
22327 + printk("txdesc[%d].buf=0x%x\n", i,
22328 + (unsigned int)np->dma_tx_descr_list[i].descr.buf);
22329 + printk("txdesc[%d].after=0x%x\n", i,
22330 + (unsigned int)np->dma_tx_descr_list[i].descr.after);
22331 + printk("txdesc[%d].intr=%x\n", i,
22332 + np->dma_tx_descr_list[i].descr.intr);
22333 + printk("txdesc[%d].eol=%x\n", i,
22334 + np->dma_tx_descr_list[i].descr.eol);
22335 + printk("txdesc[%d].out_eop=%x\n", i,
22336 + np->dma_tx_descr_list[i].descr.out_eop);
22337 + printk("txdesc[%d].wait=%x\n", i,
22338 + np->dma_tx_descr_list[i].descr.wait);
22344 +crisv32_init_module(void)
22346 + return crisv32_ethernet_init();
22349 +module_init(crisv32_init_module);
22350 diff -urN linux-2.6.19.2.orig/drivers/net/cris/eth_v32.h linux-2.6.19.2.dev/drivers/net/cris/eth_v32.h
22351 --- linux-2.6.19.2.orig/drivers/net/cris/eth_v32.h 1970-01-01 01:00:00.000000000 +0100
22352 +++ linux-2.6.19.2.dev/drivers/net/cris/eth_v32.h 2007-02-06 11:10:37.000000000 +0100
22355 + * Definitions for ETRAX FS ethernet driver.
22357 + * Copyright (C) 2003, 2004, 2005 Axis Communications.
22360 +#ifndef _ETRAX_ETHERNET_H_
22361 +#define _ETRAX_ETHERNET_H_
22363 +#include <asm/arch/hwregs/dma.h>
22366 +#define MAX_MEDIA_DATA_SIZE 1522 /* Max packet size. */
22368 +#define NBR_RX_DESC 64 /* Number of RX descriptors. */
22369 +#define NBR_TX_DESC 16 /* Number of TX descriptors. */
22370 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
22371 +#define NBR_INTMEM_RX_DESC 5 /* Number of RX descriptors in int. mem.
22372 + * when running in gigabit mode.
22373 + * Should be less then NBR_RX_DESC
22375 +#define NBR_INTMEM_TX_BUF 4 /* Number of TX buffers in int. mem
22376 + * when running in gigabit mode.
22377 + * Should be less than NBR_TX_DESC
22381 +/* Large packets are sent directly to upper layers while small packets
22382 + * are copied (to reduce memory waste). The following constant
22383 + * decides the breakpoint.
22385 +#define RX_COPYBREAK (256)
22387 +#define ETHER_HEAD_LEN (14)
22390 +** MDIO constants.
22392 +#define MDIO_START 0x1
22393 +#define MDIO_READ 0x2
22394 +#define MDIO_WRITE 0x1
22395 +#define MDIO_PREAMBLE 0xfffffffful
22397 +/* Broadcom specific */
22398 +#define MDIO_AUX_CTRL_STATUS_REG 0x18
22399 +#define MDIO_BC_FULL_DUPLEX_IND 0x1
22400 +#define MDIO_BC_SPEED 0x2
22402 +/* TDK specific */
22403 +#define MDIO_TDK_DIAGNOSTIC_REG 18
22404 +#define MDIO_TDK_DIAGNOSTIC_RATE 0x400
22405 +#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800
22407 +/*Intel LXT972A specific*/
22408 +#define MDIO_INT_STATUS_REG_2 0x0011
22409 +#define MDIO_INT_FULL_DUPLEX_IND ( 0x0001 << 9 )
22410 +#define MDIO_INT_SPEED ( 0x0001 << 14 )
22412 +/*National Semiconductor DP83865 specific*/
22413 +#define MDIO_NAT_LINK_AN_REG 0x11
22414 +#define MDIO_NAT_1000 (0x0001 << 4)
22415 +#define MDIO_NAT_100 (0x0001 << 3)
22416 +#define MDIO_NAT_FULL_DUPLEX_IND (0x0001 << 1)
22418 +/* Network flash constants */
22419 +#define NET_FLASH_TIME (HZ/50) /* 20 ms */
22420 +#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */
22421 +#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 seconds. */
22422 +#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 seconds. */
22424 +/* Duplex settings. */
22431 +/* Some transceivers requires special handling. */
22432 +struct transceiver_ops {
22433 + unsigned int oui;
22434 + void (*check_speed) (struct net_device * dev);
22435 + void (*check_duplex) (struct net_device * dev);
22438 +typedef struct crisv32_eth_descr {
22439 + dma_descr_data descr __attribute__ ((__aligned__(32)));
22440 + struct sk_buff *skb;
22441 + unsigned char *linearized_packet;
22442 +} crisv32_eth_descr;
22446 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
22447 +struct tx_buffer_list {
22448 + struct tx_buffer_list *next;
22449 + unsigned char *buf;
22455 +#define LED_GRP_0 0
22456 +#define LED_GRP_1 1
22457 +#define LED_GRP_NONE 2
22459 +#define LED_ACTIVITY 0
22460 +#define LED_NOACTIVITY 1
22461 +#define LED_LINK 2
22462 +#define LED_NOLINK 3
22464 +struct crisv32_eth_leds {
22465 + unsigned int ledgrp;
22467 + unsigned long led_next_time;
22468 + struct timer_list clear_led_timer;
22469 + spinlock_t led_lock; /* Protect LED state */
22473 +#define NET_LED_SET(x,y) \
22475 + if (x == 0) LED_NETWORK_GRP0_SET(y); \
22476 + if (x == 1) LED_NETWORK_GRP1_SET(y); \
22479 +/* Information that need to be kept for each device. */
22480 +struct crisv32_ethernet_local {
22481 + dma_descr_context ctxt_in __attribute__ ((__aligned__(32)));
22482 + dma_descr_context ctxt_out __attribute__ ((__aligned__(32)));
22484 + crisv32_eth_descr *active_rx_desc;
22485 + crisv32_eth_descr *prev_rx_desc;
22486 + crisv32_eth_descr *last_rx_desc;
22488 + crisv32_eth_descr *active_tx_desc;
22489 + crisv32_eth_descr *prev_tx_desc;
22490 + crisv32_eth_descr *catch_tx_desc;
22492 + crisv32_eth_descr dma_rx_descr_list[NBR_RX_DESC];
22493 + crisv32_eth_descr dma_tx_descr_list[NBR_TX_DESC];
22494 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
22495 + struct tx_buffer_list tx_intmem_buf_list[NBR_INTMEM_TX_BUF];
22496 + struct tx_buffer_list *intmem_tx_buf_active;
22497 + struct tx_buffer_list *intmem_tx_buf_catch;
22498 + char gigabit_mode;
22500 + char new_rx_package;
22502 + /* DMA and ethernet registers for the device. */
22505 + int dma_out_inst;
22507 + /* Network speed indication. */
22508 + struct timer_list speed_timer;
22509 + int current_speed; /* Speed read from tranceiver */
22510 + int current_speed_selection; /* Speed selected by user */
22511 + int sender_started;
22514 + struct crisv32_eth_leds *leds;
22517 + struct timer_list duplex_timer;
22519 + enum duplex current_duplex;
22521 + struct net_device_stats stats;
22523 + /* Transciever address. */
22524 + unsigned int mdio_phy_addr;
22526 + struct transceiver_ops *transceiver;
22529 + * TX control lock. This protects the transmit buffer ring state along
22530 + * with the "tx full" state of the driver. This means all netif_queue
22531 + * flow control actions are protected by this lock as well.
22534 + spinlock_t transceiver_lock; /* Protect transceiver state. */
22537 +/* Function prototypes. */
22538 +static int crisv32_ethernet_init(void);
22539 +static int crisv32_ethernet_device_init(struct net_device* dev);
22540 +static int crisv32_eth_open(struct net_device *dev);
22541 +static int crisv32_eth_close(struct net_device *dev);
22542 +static int crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr);
22543 +static irqreturn_t crisv32rx_eth_interrupt(int irq, void *dev_id);
22544 +static irqreturn_t crisv32tx_eth_interrupt(int irq, void *dev_id);
22545 +static irqreturn_t crisv32nw_eth_interrupt(int irq, void *dev_id);
22546 +static void crisv32_eth_receive_packet(struct net_device *dev);
22547 +static int crisv32_eth_send_packet(struct sk_buff *skb, struct net_device *dev);
22548 +static void crisv32_eth_hw_send_packet(unsigned char *buf, int length,
22550 +static void crisv32_eth_tx_timeout(struct net_device *dev);
22551 +static void crisv32_eth_set_multicast_list(struct net_device *dev);
22552 +static int crisv32_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
22554 +static int crisv32_eth_set_config(struct net_device* dev, struct ifmap* map);
22555 +#ifdef CONFIG_CRIS_MACH_ARTPEC3
22556 +static void crisv32_eth_switch_intmem_usage(struct net_device *dev);
22558 +static void crisv32_eth_negotiate(struct net_device *dev);
22559 +static void crisv32_eth_check_speed(unsigned long idev);
22560 +static void crisv32_eth_set_speed(struct net_device *dev, unsigned long speed);
22561 +static void crisv32_eth_check_duplex(unsigned long idev);
22562 +static void crisv32_eth_set_duplex(struct net_device *dev, enum duplex);
22563 +static int crisv32_eth_probe_transceiver(struct net_device *dev);
22565 +static struct ethtool_ops crisv32_ethtool_ops;
22567 +static void generic_check_speed(struct net_device *dev);
22568 +static void generic_check_duplex(struct net_device *dev);
22569 +static void broadcom_check_speed(struct net_device *dev);
22570 +static void broadcom_check_duplex(struct net_device *dev);
22571 +static void tdk_check_speed(struct net_device *dev);
22572 +static void tdk_check_duplex(struct net_device *dev);
22573 +static void intel_check_speed(struct net_device* dev);
22574 +static void intel_check_duplex(struct net_device *dev);
22575 +static void national_check_speed(struct net_device* dev);
22576 +static void national_check_duplex(struct net_device *dev);
22578 +#ifdef CONFIG_NET_POLL_CONTROLLER
22579 +static void crisv32_netpoll(struct net_device* dev);
22582 +static void crisv32_clear_network_leds(unsigned long dummy);
22583 +static void crisv32_set_network_leds(int active, struct net_device* dev);
22585 +static void crisv32_eth_reset_tranceiver(struct net_device *dev);
22586 +static unsigned short crisv32_eth_get_mdio_reg(struct net_device *dev,
22587 + unsigned char reg_num);
22588 +static void crisv32_eth_set_mdio_reg(struct net_device *dev,
22589 + unsigned char reg_num,
22591 +static void crisv32_eth_send_mdio_cmd(struct net_device *dev,
22592 + unsigned short cmd, int write_cmd);
22593 +static void crisv32_eth_send_mdio_bit(struct net_device *dev,
22594 + unsigned char bit);
22595 +static unsigned char crisv32_eth_receive_mdio_bit(struct net_device *dev);
22597 +static struct net_device_stats *crisv32_get_stats(struct net_device *dev);
22598 +static void crisv32_start_dma_out(struct crisv32_ethernet_local* np);
22601 +#endif /* _ETRAX_ETHERNET_H_ */