3 * ETRAX 100LX USB Host Controller Driver
5 * Copyright (C) 2005 - 2008 Axis Communications AB
7 * Author: Konrad Eriksson <konrad.eriksson@axis.se>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/moduleparam.h>
15 #include <linux/spinlock.h>
16 #include <linux/usb.h>
17 #include <linux/platform_device.h>
21 #include <asm/arch/dma.h>
22 #include <asm/arch/io_interface_mux.h>
24 #include "../core/hcd.h"
25 #include "../core/hub.h"
26 #include "hc-crisv10.h"
27 #include "hc-cris-dbg.h"
30 /***************************************************************************/
31 /***************************************************************************/
32 /* Host Controller settings */
33 /***************************************************************************/
34 /***************************************************************************/
36 #define VERSION "1.00-openwrt_diff"
37 #define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
38 #define DESCRIPTION "ETRAX 100LX USB Host Controller"
40 #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
41 #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
42 #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
44 /* Number of physical ports in Etrax 100LX */
45 #define USB_ROOT_HUB_PORTS 2
47 const char hc_name
[] = "hc-crisv10";
48 const char product_desc
[] = DESCRIPTION
;
50 /* The number of epids is, among other things, used for pre-allocating
51 ctrl, bulk and isoc EP descriptors (one for each epid).
52 Assumed to be > 1 when initiating the DMA lists. */
53 #define NBR_OF_EPIDS 32
55 /* Support interrupt traffic intervals up to 128 ms. */
56 #define MAX_INTR_INTERVAL 128
58 /* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
59 table must be "invalid". By this we mean that we shouldn't care about epid
60 attentions for this epid, or at least handle them differently from epid
61 attentions for "valid" epids. This define determines which one to use
63 #define INVALID_EPID 31
64 /* A special epid for the bulk dummys. */
69 MODULE_DESCRIPTION(DESCRIPTION
);
70 MODULE_LICENSE("GPL");
71 MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
74 /* Module parameters */
76 /* 0 = No ports enabled
77 1 = Only port 1 enabled (on board ethernet on devboard)
78 2 = Only port 2 enabled (external connector on devboard)
79 3 = Both ports enabled
81 static unsigned int ports
= 3;
82 module_param(ports
, uint
, S_IRUGO
);
83 MODULE_PARM_DESC(ports
, "Bitmask indicating USB ports to use");
86 /***************************************************************************/
87 /***************************************************************************/
88 /* Shared global variables for this module */
89 /***************************************************************************/
90 /***************************************************************************/
92 /* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
93 static volatile struct USB_EP_Desc TxBulkEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
95 static volatile struct USB_EP_Desc TxCtrlEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
97 /* EP descriptor lists for period transfers. Must be 32-bit aligned. */
98 static volatile struct USB_EP_Desc TxIntrEPList
[MAX_INTR_INTERVAL
] __attribute__ ((aligned (4)));
99 static volatile struct USB_SB_Desc TxIntrSB_zout
__attribute__ ((aligned (4)));
101 static volatile struct USB_EP_Desc TxIsocEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
102 static volatile struct USB_SB_Desc TxIsocSB_zout
__attribute__ ((aligned (4)));
104 static volatile struct USB_SB_Desc TxIsocSBList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
106 /* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
107 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
108 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
109 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
111 static volatile struct USB_EP_Desc TxBulkDummyEPList
[NBR_OF_EPIDS
][2] __attribute__ ((aligned (4)));
113 /* List of URB pointers, where each points to the active URB for a epid.
114 For Bulk, Ctrl and Intr this means which URB that currently is added to
115 DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
116 URB has completed is the queue examined and the first URB in queue is
117 removed and moved to the activeUrbList while its state change to STARTED and
118 its transfer(s) gets added to DMA list (exception Isoc where URBs enter
119 state STARTED directly and added transfers added to DMA lists). */
120 static struct urb
*activeUrbList
[NBR_OF_EPIDS
];
122 /* Additional software state info for each epid */
123 static struct etrax_epid epid_state
[NBR_OF_EPIDS
];
125 /* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
126 even if there is new data waiting to be processed */
127 static struct timer_list bulk_start_timer
= TIMER_INITIALIZER(NULL
, 0, 0);
128 static struct timer_list bulk_eot_timer
= TIMER_INITIALIZER(NULL
, 0, 0);
130 /* We want the start timer to expire before the eot timer, because the former
131 might start traffic, thus making it unnecessary for the latter to time
133 #define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
134 #define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
136 /* Delay before a URB completion happen when it's scheduled to be delayed */
137 #define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
139 /* Simplifying macros for checking software state info of a epid */
140 /* ----------------------------------------------------------------------- */
141 #define epid_inuse(epid) epid_state[epid].inuse
142 #define epid_out_traffic(epid) epid_state[epid].out_traffic
143 #define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
144 #define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
147 /***************************************************************************/
148 /***************************************************************************/
149 /* DEBUG FUNCTIONS */
150 /***************************************************************************/
151 /***************************************************************************/
152 /* Note that these functions are always available in their "__" variants,
153 for use in error situations. The "__" missing variants are controlled by
154 the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
155 static void __dump_urb(struct urb
* purb
)
157 struct crisv10_urb_priv
*urb_priv
= purb
->hcpriv
;
160 urb_num
= urb_priv
->urb_num
;
162 printk("\nURB:0x%x[%d]\n", (unsigned int)purb
, urb_num
);
163 printk("dev :0x%08lx\n", (unsigned long)purb
->dev
);
164 printk("pipe :0x%08x\n", purb
->pipe
);
165 printk("status :%d\n", purb
->status
);
166 printk("transfer_flags :0x%08x\n", purb
->transfer_flags
);
167 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb
->transfer_buffer
);
168 printk("transfer_buffer_length:%d\n", purb
->transfer_buffer_length
);
169 printk("actual_length :%d\n", purb
->actual_length
);
170 printk("setup_packet :0x%08lx\n", (unsigned long)purb
->setup_packet
);
171 printk("start_frame :%d\n", purb
->start_frame
);
172 printk("number_of_packets :%d\n", purb
->number_of_packets
);
173 printk("interval :%d\n", purb
->interval
);
174 printk("error_count :%d\n", purb
->error_count
);
175 printk("context :0x%08lx\n", (unsigned long)purb
->context
);
176 printk("complete :0x%08lx\n\n", (unsigned long)purb
->complete
);
179 static void __dump_in_desc(volatile struct USB_IN_Desc
*in
)
181 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in
);
182 printk(" sw_len : 0x%04x (%d)\n", in
->sw_len
, in
->sw_len
);
183 printk(" command : 0x%04x\n", in
->command
);
184 printk(" next : 0x%08lx\n", in
->next
);
185 printk(" buf : 0x%08lx\n", in
->buf
);
186 printk(" hw_len : 0x%04x (%d)\n", in
->hw_len
, in
->hw_len
);
187 printk(" status : 0x%04x\n\n", in
->status
);
190 static void __dump_sb_desc(volatile struct USB_SB_Desc
*sb
)
192 char tt
= (sb
->command
& 0x30) >> 4;
209 tt_string
= "unknown (weird)";
212 printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb
);
213 printk(" command:0x%04x (", sb
->command
);
214 printk("rem:%d ", (sb
->command
& 0x3f00) >> 8);
215 printk("full:%d ", (sb
->command
& 0x40) >> 6);
216 printk("tt:%d(%s) ", tt
, tt_string
);
217 printk("intr:%d ", (sb
->command
& 0x8) >> 3);
218 printk("eot:%d ", (sb
->command
& 0x2) >> 1);
219 printk("eol:%d)", sb
->command
& 0x1);
220 printk(" sw_len:0x%04x(%d)", sb
->sw_len
, sb
->sw_len
);
221 printk(" next:0x%08lx", sb
->next
);
222 printk(" buf:0x%08lx\n", sb
->buf
);
226 static void __dump_ep_desc(volatile struct USB_EP_Desc
*ep
)
228 printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep
);
229 printk(" command:0x%04x (", ep
->command
);
230 printk("ep_id:%d ", (ep
->command
& 0x1f00) >> 8);
231 printk("enable:%d ", (ep
->command
& 0x10) >> 4);
232 printk("intr:%d ", (ep
->command
& 0x8) >> 3);
233 printk("eof:%d ", (ep
->command
& 0x2) >> 1);
234 printk("eol:%d)", ep
->command
& 0x1);
235 printk(" hw_len:0x%04x(%d)", ep
->hw_len
, ep
->hw_len
);
236 printk(" next:0x%08lx", ep
->next
);
237 printk(" sub:0x%08lx\n", ep
->sub
);
240 static inline void __dump_ep_list(int pipe_type
)
242 volatile struct USB_EP_Desc
*ep
;
243 volatile struct USB_EP_Desc
*first_ep
;
244 volatile struct USB_SB_Desc
*sb
;
249 first_ep
= &TxBulkEPList
[0];
252 first_ep
= &TxCtrlEPList
[0];
255 first_ep
= &TxIntrEPList
[0];
257 case PIPE_ISOCHRONOUS
:
258 first_ep
= &TxIsocEPList
[0];
261 warn("Cannot dump unknown traffic type");
266 printk("\n\nDumping EP list...\n\n");
270 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
271 sb
= ep
->sub
? phys_to_virt(ep
->sub
) : 0;
274 sb
= sb
->next
? phys_to_virt(sb
->next
) : 0;
276 ep
= (volatile struct USB_EP_Desc
*)(phys_to_virt(ep
->next
));
278 } while (ep
!= first_ep
);
281 static inline void __dump_ept_data(int epid
)
284 __u32 r_usb_ept_data
;
286 if (epid
< 0 || epid
> 31) {
287 printk("Cannot dump ept data for invalid epid %d\n", epid
);
291 local_irq_save(flags
);
292 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
294 r_usb_ept_data
= *R_USB_EPT_DATA
;
295 local_irq_restore(flags
);
297 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data
, epid
);
298 if (r_usb_ept_data
== 0) {
299 /* No need for more detailed printing. */
302 printk(" valid : %d\n", (r_usb_ept_data
& 0x80000000) >> 31);
303 printk(" hold : %d\n", (r_usb_ept_data
& 0x40000000) >> 30);
304 printk(" error_count_in : %d\n", (r_usb_ept_data
& 0x30000000) >> 28);
305 printk(" t_in : %d\n", (r_usb_ept_data
& 0x08000000) >> 27);
306 printk(" low_speed : %d\n", (r_usb_ept_data
& 0x04000000) >> 26);
307 printk(" port : %d\n", (r_usb_ept_data
& 0x03000000) >> 24);
308 printk(" error_code : %d\n", (r_usb_ept_data
& 0x00c00000) >> 22);
309 printk(" t_out : %d\n", (r_usb_ept_data
& 0x00200000) >> 21);
310 printk(" error_count_out : %d\n", (r_usb_ept_data
& 0x00180000) >> 19);
311 printk(" max_len : %d\n", (r_usb_ept_data
& 0x0003f800) >> 11);
312 printk(" ep : %d\n", (r_usb_ept_data
& 0x00000780) >> 7);
313 printk(" dev : %d\n", (r_usb_ept_data
& 0x0000003f));
316 static inline void __dump_ept_data_iso(int epid
)
321 if (epid
< 0 || epid
> 31) {
322 printk("Cannot dump ept data for invalid epid %d\n", epid
);
326 local_irq_save(flags
);
327 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
329 ept_data
= *R_USB_EPT_DATA_ISO
;
330 local_irq_restore(flags
);
332 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data
, epid
);
334 /* No need for more detailed printing. */
337 printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, valid
,
339 printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, port
,
341 printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, error_code
,
343 printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, max_len
,
345 printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, ep
,
347 printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, dev
,
351 static inline void __dump_ept_data_list(void)
355 printk("Dumping the whole R_USB_EPT_DATA list\n");
357 for (i
= 0; i
< 32; i
++) {
362 static void debug_epid(int epid
) {
365 if(epid_isoc(epid
)) {
366 __dump_ept_data_iso(epid
);
368 __dump_ept_data(epid
);
372 for(i
= 0; i
< 32; i
++) {
373 if(IO_EXTRACT(USB_EP_command
, epid
, TxBulkEPList
[i
].command
) ==
375 printk("%d: ", i
); __dump_ep_desc(&(TxBulkEPList
[i
]));
380 for(i
= 0; i
< 32; i
++) {
381 if(IO_EXTRACT(USB_EP_command
, epid
, TxCtrlEPList
[i
].command
) ==
383 printk("%d: ", i
); __dump_ep_desc(&(TxCtrlEPList
[i
]));
388 for(i
= 0; i
< MAX_INTR_INTERVAL
; i
++) {
389 if(IO_EXTRACT(USB_EP_command
, epid
, TxIntrEPList
[i
].command
) ==
391 printk("%d: ", i
); __dump_ep_desc(&(TxIntrEPList
[i
]));
396 for(i
= 0; i
< 32; i
++) {
397 if(IO_EXTRACT(USB_EP_command
, epid
, TxIsocEPList
[i
].command
) ==
399 printk("%d: ", i
); __dump_ep_desc(&(TxIsocEPList
[i
]));
403 __dump_ept_data_list();
404 __dump_ep_list(PIPE_INTERRUPT
);
410 char* hcd_status_to_str(__u8 bUsbStatus
) {
411 static char hcd_status_str
[128];
412 hcd_status_str
[0] = '\0';
413 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, ourun
, yes
)) {
414 strcat(hcd_status_str
, "ourun ");
416 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, perror
, yes
)) {
417 strcat(hcd_status_str
, "perror ");
419 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, device_mode
, yes
)) {
420 strcat(hcd_status_str
, "device_mode ");
422 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, host_mode
, yes
)) {
423 strcat(hcd_status_str
, "host_mode ");
425 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, started
, yes
)) {
426 strcat(hcd_status_str
, "started ");
428 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, running
, yes
)) {
429 strcat(hcd_status_str
, "running ");
431 return hcd_status_str
;
435 char* sblist_to_str(struct USB_SB_Desc
* sb_desc
) {
436 static char sblist_to_str_buff
[128];
437 char tmp
[32], tmp2
[32];
438 sblist_to_str_buff
[0] = '\0';
439 while(sb_desc
!= NULL
) {
440 switch(IO_EXTRACT(USB_SB_command
, tt
, sb_desc
->command
)) {
441 case 0: sprintf(tmp
, "zout"); break;
442 case 1: sprintf(tmp
, "in"); break;
443 case 2: sprintf(tmp
, "out"); break;
444 case 3: sprintf(tmp
, "setup"); break;
446 sprintf(tmp2
, "(%s %d)", tmp
, sb_desc
->sw_len
);
447 strcat(sblist_to_str_buff
, tmp2
);
448 if(sb_desc
->next
!= 0) {
449 sb_desc
= phys_to_virt(sb_desc
->next
);
454 return sblist_to_str_buff
;
457 char* port_status_to_str(__u16 wPortStatus
) {
458 static char port_status_str
[128];
459 port_status_str
[0] = '\0';
460 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, connected
, yes
)) {
461 strcat(port_status_str
, "connected ");
463 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) {
464 strcat(port_status_str
, "enabled ");
466 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, suspended
, yes
)) {
467 strcat(port_status_str
, "suspended ");
469 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, reset
, yes
)) {
470 strcat(port_status_str
, "reset ");
472 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, speed
, full
)) {
473 strcat(port_status_str
, "full-speed ");
475 strcat(port_status_str
, "low-speed ");
477 return port_status_str
;
481 char* endpoint_to_str(struct usb_endpoint_descriptor
*ed
) {
482 static char endpoint_to_str_buff
[128];
484 int epnum
= ed
->bEndpointAddress
& 0x0F;
485 int dir
= ed
->bEndpointAddress
& 0x80;
486 int type
= ed
->bmAttributes
& 0x03;
487 endpoint_to_str_buff
[0] = '\0';
488 sprintf(endpoint_to_str_buff
, "ep:%d ", epnum
);
491 sprintf(tmp
, " ctrl");
494 sprintf(tmp
, " isoc");
497 sprintf(tmp
, " bulk");
500 sprintf(tmp
, " intr");
503 strcat(endpoint_to_str_buff
, tmp
);
507 sprintf(tmp
, " out");
509 strcat(endpoint_to_str_buff
, tmp
);
511 return endpoint_to_str_buff
;
514 /* Debug helper functions for Transfer Controller */
515 char* pipe_to_str(unsigned int pipe
) {
516 static char pipe_to_str_buff
[128];
518 sprintf(pipe_to_str_buff
, "dir:%s", str_dir(pipe
));
519 sprintf(tmp
, " type:%s", str_type(pipe
));
520 strcat(pipe_to_str_buff
, tmp
);
522 sprintf(tmp
, " dev:%d", usb_pipedevice(pipe
));
523 strcat(pipe_to_str_buff
, tmp
);
524 sprintf(tmp
, " ep:%d", usb_pipeendpoint(pipe
));
525 strcat(pipe_to_str_buff
, tmp
);
526 return pipe_to_str_buff
;
530 #define USB_DEBUG_DESC 1
532 #ifdef USB_DEBUG_DESC
533 #define dump_in_desc(x) __dump_in_desc(x)
534 #define dump_sb_desc(...) __dump_sb_desc(...)
535 #define dump_ep_desc(x) __dump_ep_desc(x)
536 #define dump_ept_data(x) __dump_ept_data(x)
538 #define dump_in_desc(...) do {} while (0)
539 #define dump_sb_desc(...) do {} while (0)
540 #define dump_ep_desc(...) do {} while (0)
544 /* Uncomment this to enable massive function call trace
545 #define USB_DEBUG_TRACE */
547 #ifdef USB_DEBUG_TRACE
548 #define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
549 #define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
551 #define DBFENTER do {} while (0)
552 #define DBFEXIT do {} while (0)
555 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
556 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
558 /* Most helpful debugging aid */
559 #define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
562 /***************************************************************************/
563 /***************************************************************************/
564 /* Forward declarations */
565 /***************************************************************************/
566 /***************************************************************************/
567 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg
*reg
);
568 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg
*reg
);
569 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg
*reg
);
570 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg
*reg
);
572 void rh_port_status_change(__u16
[]);
573 int rh_clear_port_feature(__u8
, __u16
);
574 int rh_set_port_feature(__u8
, __u16
);
575 static void rh_disable_port(unsigned int port
);
577 static void check_finished_bulk_tx_epids(struct usb_hcd
*hcd
,
580 static int tc_setup_epid(struct usb_host_endpoint
*ep
, struct urb
*urb
,
582 static void tc_free_epid(struct usb_host_endpoint
*ep
);
583 static int tc_allocate_epid(void);
584 static void tc_finish_urb(struct usb_hcd
*hcd
, struct urb
*urb
, int status
);
585 static void tc_finish_urb_later(struct usb_hcd
*hcd
, struct urb
*urb
,
588 static int urb_priv_create(struct usb_hcd
*hcd
, struct urb
*urb
, int epid
,
590 static void urb_priv_free(struct usb_hcd
*hcd
, struct urb
*urb
);
592 static int crisv10_usb_check_bandwidth(struct usb_device
*dev
,struct urb
*urb
);
593 static void crisv10_usb_claim_bandwidth(
594 struct usb_device
*dev
, struct urb
*urb
, int bustime
, int isoc
);
595 static void crisv10_usb_release_bandwidth(
596 struct usb_hcd
*hcd
, int isoc
, int bandwidth
);
598 static inline struct urb
*urb_list_first(int epid
);
599 static inline void urb_list_add(struct urb
*urb
, int epid
,
601 static inline urb_entry_t
*urb_list_entry(struct urb
*urb
, int epid
);
602 static inline void urb_list_del(struct urb
*urb
, int epid
);
603 static inline void urb_list_move_last(struct urb
*urb
, int epid
);
604 static inline struct urb
*urb_list_next(struct urb
*urb
, int epid
);
606 int create_sb_for_urb(struct urb
*urb
, int mem_flags
);
607 int init_intr_urb(struct urb
*urb
, int mem_flags
);
609 static inline void etrax_epid_set(__u8 index
, __u32 data
);
610 static inline void etrax_epid_clear_error(__u8 index
);
611 static inline void etrax_epid_set_toggle(__u8 index
, __u8 dirout
,
613 static inline __u8
etrax_epid_get_toggle(__u8 index
, __u8 dirout
);
614 static inline __u32
etrax_epid_get(__u8 index
);
616 /* We're accessing the same register position in Etrax so
617 when we do full access the internal difference doesn't matter */
618 #define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
619 #define etrax_epid_iso_get(index) etrax_epid_get(index)
622 static void tc_dma_process_isoc_urb(struct urb
*urb
);
623 static void tc_dma_process_queue(int epid
);
624 static void tc_dma_unlink_intr_urb(struct urb
*urb
);
625 static irqreturn_t
tc_dma_tx_interrupt(int irq
, void *vhc
);
626 static irqreturn_t
tc_dma_rx_interrupt(int irq
, void *vhc
);
628 static void tc_bulk_start_timer_func(unsigned long dummy
);
629 static void tc_bulk_eot_timer_func(unsigned long dummy
);
632 /*************************************************************/
633 /*************************************************************/
634 /* Host Controler Driver block */
635 /*************************************************************/
636 /*************************************************************/
639 static irqreturn_t
crisv10_hcd_top_irq(int irq
, void*);
640 static int crisv10_hcd_reset(struct usb_hcd
*);
641 static int crisv10_hcd_start(struct usb_hcd
*);
642 static void crisv10_hcd_stop(struct usb_hcd
*);
644 static int crisv10_hcd_suspend(struct device
*, u32
, u32
);
645 static int crisv10_hcd_resume(struct device
*, u32
);
646 #endif /* CONFIG_PM */
647 static int crisv10_hcd_get_frame(struct usb_hcd
*);
649 static int tc_urb_enqueue(struct usb_hcd
*, struct urb
*, gfp_t mem_flags
);
650 static int tc_urb_dequeue(struct usb_hcd
*, struct urb
*, int);
651 static void tc_endpoint_disable(struct usb_hcd
*, struct usb_host_endpoint
*ep
);
653 static int rh_status_data_request(struct usb_hcd
*, char *);
654 static int rh_control_request(struct usb_hcd
*, u16
, u16
, u16
, char*, u16
);
657 static int crisv10_hcd_hub_suspend(struct usb_hcd
*);
658 static int crisv10_hcd_hub_resume(struct usb_hcd
*);
659 #endif /* CONFIG_PM */
660 #ifdef CONFIG_USB_OTG
661 static int crisv10_hcd_start_port_reset(struct usb_hcd
*, unsigned);
662 #endif /* CONFIG_USB_OTG */
664 /* host controller driver interface */
665 static const struct hc_driver crisv10_hc_driver
=
667 .description
= hc_name
,
668 .product_desc
= product_desc
,
669 .hcd_priv_size
= sizeof(struct crisv10_hcd
),
671 /* Attaching IRQ handler manualy in probe() */
672 /* .irq = crisv10_hcd_irq, */
676 /* called to init HCD and root hub */
677 .reset
= crisv10_hcd_reset
,
678 .start
= crisv10_hcd_start
,
680 /* cleanly make HCD stop writing memory and doing I/O */
681 .stop
= crisv10_hcd_stop
,
683 /* return current frame number */
684 .get_frame_number
= crisv10_hcd_get_frame
,
687 /* Manage i/o requests via the Transfer Controller */
688 .urb_enqueue
= tc_urb_enqueue
,
689 .urb_dequeue
= tc_urb_dequeue
,
691 /* hw synch, freeing endpoint resources that urb_dequeue can't */
692 .endpoint_disable
= tc_endpoint_disable
,
695 /* Root Hub support */
696 .hub_status_data
= rh_status_data_request
,
697 .hub_control
= rh_control_request
,
699 .hub_suspend
= rh_suspend_request
,
700 .hub_resume
= rh_resume_request
,
701 #endif /* CONFIG_PM */
702 #ifdef CONFIG_USB_OTG
703 .start_port_reset
= crisv10_hcd_start_port_reset
,
704 #endif /* CONFIG_USB_OTG */
709 * conversion between pointers to a hcd and the corresponding
713 static inline struct crisv10_hcd
*hcd_to_crisv10_hcd(struct usb_hcd
*hcd
)
715 return (struct crisv10_hcd
*) hcd
->hcd_priv
;
718 static inline struct usb_hcd
*crisv10_hcd_to_hcd(struct crisv10_hcd
*hcd
)
720 return container_of((void *) hcd
, struct usb_hcd
, hcd_priv
);
723 /* check if specified port is in use */
724 static inline int port_in_use(unsigned int port
)
726 return ports
& (1 << port
);
729 /* number of ports in use */
730 static inline unsigned int num_ports(void)
732 unsigned int i
, num
= 0;
733 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++)
739 /* map hub port number to the port number used internally by the HC */
740 static inline unsigned int map_port(unsigned int port
)
742 unsigned int i
, num
= 0;
743 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++)
750 /* size of descriptors in slab cache */
752 #define MAX(x, y) ((x) > (y) ? (x) : (y))
756 /******************************************************************/
757 /* Hardware Interrupt functions */
758 /******************************************************************/
760 /* Fast interrupt handler for HC */
761 static irqreturn_t
crisv10_hcd_top_irq(int irq
, void *vcd
)
763 struct usb_hcd
*hcd
= vcd
;
764 struct crisv10_irq_reg reg
;
773 /* Turn of other interrupts while handling these sensitive cases */
774 local_irq_save(flags
);
776 /* Read out which interrupts that are flaged */
777 irq_mask
= *R_USB_IRQ_MASK_READ
;
778 reg
.r_usb_irq_mask_read
= irq_mask
;
780 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
781 R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
782 clears the ourun and perror fields of R_USB_STATUS. */
783 reg
.r_usb_status
= *R_USB_STATUS
;
785 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
787 reg
.r_usb_epid_attn
= *R_USB_EPID_ATTN
;
789 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
790 port_status interrupt. */
791 reg
.r_usb_rh_port_status_1
= *R_USB_RH_PORT_STATUS_1
;
792 reg
.r_usb_rh_port_status_2
= *R_USB_RH_PORT_STATUS_2
;
794 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
795 /* Note: the lower 11 bits contain the actual frame number, sent with each
797 reg
.r_usb_fm_number
= *R_USB_FM_NUMBER
;
799 /* Interrupts are handled in order of priority. */
800 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, port_status
)) {
801 crisv10_hcd_port_status_irq(®
);
803 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, epid_attn
)) {
804 crisv10_hcd_epid_attn_irq(®
);
806 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, ctl_status
)) {
807 crisv10_hcd_ctl_status_irq(®
);
809 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, iso_eof
)) {
810 crisv10_hcd_isoc_eof_irq(®
);
812 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, bulk_eot
)) {
813 /* Update/restart the bulk start timer since obviously the channel is
815 mod_timer(&bulk_start_timer
, jiffies
+ BULK_START_TIMER_INTERVAL
);
816 /* Update/restart the bulk eot timer since we just received an bulk eot
818 mod_timer(&bulk_eot_timer
, jiffies
+ BULK_EOT_TIMER_INTERVAL
);
820 /* Check for finished bulk transfers on epids */
821 check_finished_bulk_tx_epids(hcd
, 0);
823 local_irq_restore(flags
);
830 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg
*reg
) {
831 struct usb_hcd
*hcd
= reg
->hcd
;
832 struct crisv10_urb_priv
*urb_priv
;
836 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
837 if (test_bit(epid
, (void *)®
->r_usb_epid_attn
)) {
842 if (epid
== DUMMY_EPID
|| epid
== INVALID_EPID
) {
843 /* We definitely don't care about these ones. Besides, they are
844 always disabled, so any possible disabling caused by the
845 epid attention interrupt is irrelevant. */
846 warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid
);
850 if(!epid_inuse(epid
)) {
851 irq_err("Epid attention on epid:%d that isn't in use\n", epid
);
852 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
857 /* Note that although there are separate R_USB_EPT_DATA and
858 R_USB_EPT_DATA_ISO registers, they are located at the same address and
859 are of the same size. In other words, this read should be ok for isoc
861 ept_data
= etrax_epid_get(epid
);
862 error_code
= IO_EXTRACT(R_USB_EPT_DATA
, error_code
, ept_data
);
864 /* Get the active URB for this epid. We blatantly assume
865 that only this URB could have caused the epid attention. */
866 urb
= activeUrbList
[epid
];
868 irq_err("Attention on epid:%d error:%d with no active URB.\n",
870 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
875 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
878 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
879 if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
881 /* Isoc traffic doesn't have error_count_in/error_count_out. */
882 if ((usb_pipetype(urb
->pipe
) != PIPE_ISOCHRONOUS
) &&
883 (IO_EXTRACT(R_USB_EPT_DATA
, error_count_in
, ept_data
) == 3 ||
884 IO_EXTRACT(R_USB_EPT_DATA
, error_count_out
, ept_data
) == 3)) {
885 /* Check if URB allready is marked for late-finish, we can get
886 several 3rd error for Intr traffic when a device is unplugged */
887 if(urb_priv
->later_data
== NULL
) {
889 irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid
,
890 str_dir(urb
->pipe
), str_type(urb
->pipe
),
891 (unsigned int)urb
, urb_priv
->urb_num
);
893 tc_finish_urb_later(hcd
, urb
, -EPROTO
);
896 } else if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, perror
)) {
897 irq_warn("Perror for epid:%d\n", epid
);
898 printk("FM_NUMBER: %d\n", reg
->r_usb_fm_number
& 0x7ff);
899 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
903 if (!(ept_data
& IO_MASK(R_USB_EPT_DATA
, valid
))) {
905 panic("Perror because of invalid epid."
906 " Deconfigured too early?");
908 /* past eof1, near eof, zout transfer, setup transfer */
909 /* Dump the urb and the relevant EP descriptor. */
910 panic("Something wrong with DMA descriptor contents."
911 " Too much traffic inserted?");
913 } else if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, ourun
)) {
915 printk("FM_NUMBER: %d\n", reg
->r_usb_fm_number
& 0x7ff);
916 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
920 panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid
);
922 irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid
,
923 str_dir(urb
->pipe
), str_type(urb
->pipe
));
924 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
929 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
931 /* Not really a protocol error, just says that the endpoint gave
932 a stall response. Note that error_code cannot be stall for isoc. */
933 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
934 panic("Isoc traffic cannot stall");
937 tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid
,
938 str_dir(urb
->pipe
), str_type(urb
->pipe
), (unsigned int)urb
);
939 tc_finish_urb(hcd
, urb
, -EPIPE
);
941 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
943 /* Two devices responded to a transaction request. Must be resolved
944 by software. FIXME: Reset ports? */
945 panic("Bus error for epid %d."
946 " Two devices responded to transaction request\n",
949 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
951 /* DMA overrun or underrun. */
952 irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid
,
953 str_dir(urb
->pipe
), str_type(urb
->pipe
));
955 /* It seems that error_code = buffer_error in
956 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
957 are the same error. */
958 tc_finish_urb(hcd
, urb
, -EPROTO
);
960 irq_warn("Unknown attention on epid:%d (%s %s)\n", epid
,
961 str_dir(urb
->pipe
), str_type(urb
->pipe
));
969 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg
*reg
)
971 __u16 port_reg
[USB_ROOT_HUB_PORTS
];
973 port_reg
[0] = reg
->r_usb_rh_port_status_1
;
974 port_reg
[1] = reg
->r_usb_rh_port_status_2
;
975 rh_port_status_change(port_reg
);
979 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg
*reg
)
983 struct crisv10_urb_priv
*urb_priv
;
987 for (epid
= 0; epid
< NBR_OF_EPIDS
- 1; epid
++) {
989 /* Only check epids that are in use, is valid and has SB list */
990 if (!epid_inuse(epid
) || epid
== INVALID_EPID
||
991 TxIsocEPList
[epid
].sub
== 0 || epid
== DUMMY_EPID
) {
992 /* Nothing here to see. */
995 ASSERT(epid_isoc(epid
));
997 /* Get the active URB for this epid (if any). */
998 urb
= activeUrbList
[epid
];
1000 isoc_warn("Ignoring NULL urb for epid:%d\n", epid
);
1003 if(!epid_out_traffic(epid
)) {
1005 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
);
1007 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
1010 if (urb_priv
->urb_state
== NOT_STARTED
) {
1011 /* If ASAP is not set and urb->start_frame is the current frame,
1012 start the transfer. */
1013 if (!(urb
->transfer_flags
& URB_ISO_ASAP
) &&
1014 (urb
->start_frame
== (*R_USB_FM_NUMBER
& 0x7ff))) {
1015 /* EP should not be enabled if we're waiting for start_frame */
1016 ASSERT((TxIsocEPList
[epid
].command
&
1017 IO_STATE(USB_EP_command
, enable
, yes
)) == 0);
1019 isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid
);
1020 TxIsocEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
1022 /* This urb is now active. */
1023 urb_priv
->urb_state
= STARTED
;
1033 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg
*reg
)
1035 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(reg
->hcd
);
1038 ASSERT(crisv10_hcd
);
1040 /* irq_dbg("ctr_status_irq, controller status: %s\n",
1041 hcd_status_to_str(reg->r_usb_status));*/
1043 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
1044 list for the corresponding epid? */
1045 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, ourun
)) {
1046 panic("USB controller got ourun.");
1048 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, perror
)) {
1050 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
1051 an interrupt pipe. I don't see how re-enabling all EP descriptors
1052 will help if there was a programming error. */
1053 panic("USB controller got perror.");
1056 /* Keep track of USB Controller, if it's running or not */
1057 if(reg
->r_usb_status
& IO_STATE(R_USB_STATUS
, running
, yes
)) {
1058 crisv10_hcd
->running
= 1;
1060 crisv10_hcd
->running
= 0;
1063 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, device_mode
)) {
1064 /* We should never operate in device mode. */
1065 panic("USB controller in device mode.");
1068 /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
1069 using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
1070 set_bit(HCD_FLAG_SAW_IRQ
, ®
->hcd
->flags
);
1076 /******************************************************************/
1077 /* Host Controller interface functions */
1078 /******************************************************************/
1080 static inline void crisv10_ready_wait(void) {
1081 volatile int timeout
= 10000;
1082 /* Check the busy bit of USB controller in Etrax */
1083 while((*R_USB_COMMAND
& IO_MASK(R_USB_COMMAND
, busy
)) &&
1086 warn("Timeout while waiting for USB controller to be idle\n");
1090 /* reset host controller */
1091 static int crisv10_hcd_reset(struct usb_hcd
*hcd
)
1094 hcd_dbg(hcd
, "reset\n");
1097 /* Reset the USB interface. */
1100 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1101 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1102 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
1109 /* start host controller */
1110 static int crisv10_hcd_start(struct usb_hcd
*hcd
)
1113 hcd_dbg(hcd
, "start\n");
1115 crisv10_ready_wait();
1117 /* Start processing of USB traffic. */
1119 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1120 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1121 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1125 hcd
->state
= HC_STATE_RUNNING
;
1131 /* stop host controller */
1132 static void crisv10_hcd_stop(struct usb_hcd
*hcd
)
1135 hcd_dbg(hcd
, "stop\n");
1136 crisv10_hcd_reset(hcd
);
1140 /* return the current frame number */
1141 static int crisv10_hcd_get_frame(struct usb_hcd
*hcd
)
1145 return (*R_USB_FM_NUMBER
& 0x7ff);
1148 #ifdef CONFIG_USB_OTG
1150 static int crisv10_hcd_start_port_reset(struct usb_hcd
*hcd
, unsigned port
)
1152 return 0; /* no-op for now */
1155 #endif /* CONFIG_USB_OTG */
1158 /******************************************************************/
1159 /* Root Hub functions */
1160 /******************************************************************/
1162 /* root hub status */
1163 static const struct usb_hub_status rh_hub_status
=
1169 /* root hub descriptor */
1170 static const u8 rh_hub_descr
[] =
1172 0x09, /* bDescLength */
1173 0x29, /* bDescriptorType */
1174 USB_ROOT_HUB_PORTS
, /* bNbrPorts */
1175 0x00, /* wHubCharacteristics */
1177 0x01, /* bPwrOn2pwrGood */
1178 0x00, /* bHubContrCurrent */
1179 0x00, /* DeviceRemovable */
1180 0xff /* PortPwrCtrlMask */
1183 /* Actual holder of root hub status*/
1184 struct crisv10_rh rh
;
1186 /* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
1189 /* Reset port status flags */
1190 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++) {
1191 rh
.wPortChange
[i
] = 0;
1192 rh
.wPortStatusPrev
[i
] = 0;
1197 #define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
1198 (1<<USB_PORT_FEAT_ENABLE)|\
1199 (1<<USB_PORT_FEAT_SUSPEND)|\
1200 (1<<USB_PORT_FEAT_RESET))
1202 /* Handle port status change interrupt (called from bottom part interrupt) */
1203 void rh_port_status_change(__u16 port_reg
[]) {
1207 for(i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++) {
1208 /* Xor out changes since last read, masked for important flags */
1209 wChange
= (port_reg
[i
] & RH_FEAT_MASK
) ^ rh
.wPortStatusPrev
[i
];
1210 /* Or changes together with (if any) saved changes */
1211 rh
.wPortChange
[i
] |= wChange
;
1212 /* Save new status */
1213 rh
.wPortStatusPrev
[i
] = port_reg
[i
];
1216 rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i
+1,
1217 port_status_to_str(wChange
),
1218 port_status_to_str(port_reg
[i
]));
1223 /* Construct port status change bitmap for the root hub */
1224 static int rh_status_data_request(struct usb_hcd
*hcd
, char *buf
)
1226 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1231 * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
1232 * return bitmap indicating ports with status change
1235 spin_lock(&crisv10_hcd
->lock
);
1236 for (i
= 1; i
<= crisv10_hcd
->num_ports
; i
++) {
1237 if (rh
.wPortChange
[map_port(i
)]) {
1239 rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i
,
1240 port_status_to_str(rh
.wPortChange
[map_port(i
)]),
1241 port_status_to_str(rh
.wPortStatusPrev
[map_port(i
)]));
1244 spin_unlock(&crisv10_hcd
->lock
);
1246 return *buf
== 0 ? 0 : 1;
1249 /* Handle a control request for the root hub (called from hcd_driver) */
1250 static int rh_control_request(struct usb_hcd
*hcd
,
1257 struct crisv10_hcd
*crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1263 case GetHubDescriptor
:
1264 rh_dbg("GetHubDescriptor\n");
1265 len
= min_t(unsigned int, sizeof rh_hub_descr
, wLength
);
1266 memcpy(buf
, rh_hub_descr
, len
);
1267 buf
[2] = crisv10_hcd
->num_ports
;
1270 rh_dbg("GetHubStatus\n");
1271 len
= min_t(unsigned int, sizeof rh_hub_status
, wLength
);
1272 memcpy(buf
, &rh_hub_status
, len
);
1275 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1277 rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex
,
1278 port_status_to_str(rh
.wPortChange
[map_port(wIndex
)]),
1279 port_status_to_str(rh
.wPortStatusPrev
[map_port(wIndex
)]));
1280 *(u16
*) buf
= cpu_to_le16(rh
.wPortStatusPrev
[map_port(wIndex
)]);
1281 *(u16
*) (buf
+ 2) = cpu_to_le16(rh
.wPortChange
[map_port(wIndex
)]);
1284 rh_dbg("SetHubFeature\n");
1285 case ClearHubFeature
:
1286 rh_dbg("ClearHubFeature\n");
1288 case C_HUB_OVER_CURRENT
:
1289 case C_HUB_LOCAL_POWER
:
1290 rh_warn("Not implemented hub request:%d \n", typeReq
);
1291 /* not implemented */
1297 case SetPortFeature
:
1298 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1300 if(rh_set_port_feature(map_port(wIndex
), wValue
))
1303 case ClearPortFeature
:
1304 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1306 if(rh_clear_port_feature(map_port(wIndex
), wValue
))
1310 rh_warn("Unknown hub request: %d\n", typeReq
);
1318 int rh_set_port_feature(__u8 bPort
, __u16 wFeature
) {
1319 __u8 bUsbCommand
= 0;
1322 case USB_PORT_FEAT_RESET
:
1323 rh_dbg("SetPortFeature: reset\n");
1325 if (rh
.wPortStatusPrev
[bPort
] &
1326 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
))
1328 __u8 restart_controller
= 0;
1330 if ( (rh
.wPortStatusPrev
[0] &
1331 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) &&
1332 (rh
.wPortStatusPrev
[1] &
1333 IO_STATE(R_USB_RH_PORT_STATUS_2
, enabled
, yes
)) )
1335 /* Both ports is enabled. The USB controller will not change state. */
1336 restart_controller
= 0;
1340 /* Only ports is enabled. The USB controller will change state and
1341 must be restarted. */
1342 restart_controller
= 1;
1345 In ETRAX 100LX it's not possible to reset an enabled root hub port.
1346 The workaround is to disable and enable the port before resetting it.
1347 Disabling the port can, if both ports are disabled at once, cause the
1348 USB controller to change state to HOST_MODE state.
1349 The USB controller state transition causes a lot of unwanted
1350 interrupts that must be avoided.
1351 Disabling the USB controller status and port status interrupts before
1352 disabling/resetting the port stops these interrupts.
1354 These actions are performed:
1355 1. Disable USB controller status and port status interrupts.
1357 3. Wait for the port to be disabled.
1359 5. Wait for the port to be enabled.
1361 7. Wait for for the reset to end.
1362 8. Wait for the USB controller entering started state.
1363 9. Order the USB controller to running state.
1364 10. Wait for the USB controller reaching running state.
1365 11. Clear all interrupts generated during the disable/enable/reset
1367 12. Enable the USB controller status and port status interrupts.
1370 /* 1. Disable USB controller status and USB port status interrupts. */
1371 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, ctl_status
, clr
);
1372 __asm__
__volatile__ (" nop");
1373 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, port_status
, clr
);
1374 __asm__
__volatile__ (" nop");
1378 /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX
1379 root hub port reset is 10 ms we must perform 5 port resets to
1380 achieve a proper root hub port reset. */
1381 for (reset_cnt
= 0; reset_cnt
< 5; reset_cnt
++)
1383 rh_dbg("Disable Port %d\n", bPort
+ 1);
1385 /* 2. Disable the port*/
1388 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, yes
);
1392 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, yes
);
1395 /* 3. Wait for the port to be disabled. */
1396 while ( (bPort
== 0) ?
1397 *R_USB_RH_PORT_STATUS_1
&
1398 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
) :
1399 *R_USB_RH_PORT_STATUS_2
&
1400 IO_STATE(R_USB_RH_PORT_STATUS_2
, enabled
, yes
) ) {}
1402 rh_dbg("Port %d is disabled. Enable it!\n", bPort
+ 1);
1404 /* 4. Enable the port. */
1407 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, no
);
1411 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, no
);
1414 /* 5. Wait for the port to be enabled again. */
1415 while (!( (bPort
== 0) ?
1416 *R_USB_RH_PORT_STATUS_1
&
1417 IO_STATE(R_USB_RH_PORT_STATUS_1
, connected
, yes
) :
1418 *R_USB_RH_PORT_STATUS_2
&
1419 IO_STATE(R_USB_RH_PORT_STATUS_2
, connected
, yes
) ) ) {}
1421 rh_dbg("Port %d is enabled.\n", bPort
+ 1);
1423 /* 6. Reset the port */
1424 crisv10_ready_wait();
1427 IO_STATE(R_USB_COMMAND
, port_sel
, port1
):
1428 IO_STATE(R_USB_COMMAND
, port_sel
, port2
) ) |
1429 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1430 IO_STATE(R_USB_COMMAND
, busy
, no
) |
1431 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, nop
);
1432 rh_dbg("Port %d is resetting.\n", bPort
+ 1);
1434 /* 7. The USB specification says that we should wait for at least
1435 10ms for device recover */
1436 udelay(10500); /* 10,5ms blocking wait */
1438 crisv10_ready_wait();
1443 /* Check if the USB controller needs to be restarted. */
1444 if (restart_controller
)
1446 /* 8. Wait for the USB controller entering started state. */
1447 while (!(*R_USB_STATUS
& IO_STATE(R_USB_STATUS
, started
, yes
))) {}
1449 /* 9. Order the USB controller to running state. */
1450 crisv10_ready_wait();
1452 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1453 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1454 IO_STATE(R_USB_COMMAND
, busy
, no
) |
1455 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1457 /* 10. Wait for the USB controller reaching running state. */
1458 while (!(*R_USB_STATUS
& IO_STATE(R_USB_STATUS
, running
, yes
))) {}
1461 /* 11. Clear any controller or port satus interrupts before enabling
1466 /* Clear the port status interrupt of the reset port. */
1469 rh_dbg("Clearing port 1 interrupts\n");
1470 dummy
= *R_USB_RH_PORT_STATUS_1
;
1474 rh_dbg("Clearing port 2 interrupts\n");
1475 dummy
= *R_USB_RH_PORT_STATUS_2
;
1478 if (restart_controller
)
1480 /* The USB controller is restarted. Clear all interupts. */
1481 rh_dbg("Clearing all interrupts\n");
1482 dummy
= *R_USB_STATUS
;
1483 dummy
= *R_USB_RH_PORT_STATUS_1
;
1484 dummy
= *R_USB_RH_PORT_STATUS_2
;
1488 /* 12. Enable USB controller status and USB port status interrupts. */
1489 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, ctl_status
, set
);
1490 __asm__
__volatile__ (" nop");
1491 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, port_status
, set
);
1492 __asm__
__volatile__ (" nop");
1498 bUsbCommand
|= IO_STATE(R_USB_COMMAND
, port_cmd
, reset
);
1499 /* Select which port via the port_sel field */
1500 bUsbCommand
|= IO_FIELD(R_USB_COMMAND
, port_sel
, bPort
+1);
1502 /* Make sure the controller isn't busy. */
1503 crisv10_ready_wait();
1504 /* Send out the actual command to the USB controller */
1505 *R_USB_COMMAND
= bUsbCommand
;
1507 /* Wait a while for controller to first become started after port reset */
1508 udelay(12000); /* 12ms blocking wait */
1510 /* Make sure the controller isn't busy. */
1511 crisv10_ready_wait();
1513 /* If all enabled ports were disabled the host controller goes down into
1514 started mode, so we need to bring it back into the running state.
1515 (This is safe even if it's already in the running state.) */
1517 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1518 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1519 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1523 case USB_PORT_FEAT_SUSPEND
:
1524 rh_dbg("SetPortFeature: suspend\n");
1525 bUsbCommand
|= IO_STATE(R_USB_COMMAND
, port_cmd
, suspend
);
1528 case USB_PORT_FEAT_POWER
:
1529 rh_dbg("SetPortFeature: power\n");
1531 case USB_PORT_FEAT_C_CONNECTION
:
1532 rh_dbg("SetPortFeature: c_connection\n");
1534 case USB_PORT_FEAT_C_RESET
:
1535 rh_dbg("SetPortFeature: c_reset\n");
1537 case USB_PORT_FEAT_C_OVER_CURRENT
:
1538 rh_dbg("SetPortFeature: c_over_current\n");
1542 /* Select which port via the port_sel field */
1543 bUsbCommand
|= IO_FIELD(R_USB_COMMAND
, port_sel
, bPort
+1);
1545 /* Make sure the controller isn't busy. */
1546 crisv10_ready_wait();
1547 /* Send out the actual command to the USB controller */
1548 *R_USB_COMMAND
= bUsbCommand
;
1551 rh_dbg("SetPortFeature: unknown feature\n");
1557 int rh_clear_port_feature(__u8 bPort
, __u16 wFeature
) {
1559 case USB_PORT_FEAT_ENABLE
:
1560 rh_dbg("ClearPortFeature: enable\n");
1561 rh_disable_port(bPort
);
1563 case USB_PORT_FEAT_SUSPEND
:
1564 rh_dbg("ClearPortFeature: suspend\n");
1566 case USB_PORT_FEAT_POWER
:
1567 rh_dbg("ClearPortFeature: power\n");
1570 case USB_PORT_FEAT_C_ENABLE
:
1571 rh_dbg("ClearPortFeature: c_enable\n");
1573 case USB_PORT_FEAT_C_SUSPEND
:
1574 rh_dbg("ClearPortFeature: c_suspend\n");
1576 case USB_PORT_FEAT_C_CONNECTION
:
1577 rh_dbg("ClearPortFeature: c_connection\n");
1579 case USB_PORT_FEAT_C_OVER_CURRENT
:
1580 rh_dbg("ClearPortFeature: c_over_current\n");
1582 case USB_PORT_FEAT_C_RESET
:
1583 rh_dbg("ClearPortFeature: c_reset\n");
1586 rh
.wPortChange
[bPort
] &= ~(1 << (wFeature
- 16));
1589 rh_dbg("ClearPortFeature: unknown feature\n");
1597 /* Handle a suspend request for the root hub (called from hcd_driver) */
1598 static int rh_suspend_request(struct usb_hcd
*hcd
)
1600 return 0; /* no-op for now */
1603 /* Handle a resume request for the root hub (called from hcd_driver) */
1604 static int rh_resume_request(struct usb_hcd
*hcd
)
1606 return 0; /* no-op for now */
1608 #endif /* CONFIG_PM */
1612 /* Wrapper function for workaround port disable registers in USB controller */
1613 static void rh_disable_port(unsigned int port
) {
1614 volatile int timeout
= 10000;
1615 volatile char* usb_portx_disable
;
1618 usb_portx_disable
= R_USB_PORT1_DISABLE
;
1621 usb_portx_disable
= R_USB_PORT2_DISABLE
;
1624 /* Invalid port index */
1627 /* Set disable flag in special register */
1628 *usb_portx_disable
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, yes
);
1629 /* Wait until not enabled anymore */
1630 while((rh
.wPortStatusPrev
[port
] &
1631 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) &&
1634 warn("Timeout while waiting for port %d to become disabled\n", port
);
1636 /* clear disable flag in special register */
1637 *usb_portx_disable
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, no
);
1638 rh_info("Physical port %d disabled\n", port
+1);
1642 /******************************************************************/
1643 /* Transfer Controller (TC) functions */
1644 /******************************************************************/
1646 /* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
1648 To adjust it dynamically we would have to get an interrupt when we reach
1649 the end of the rx descriptor list, or when we get close to the end, and
1650 then allocate more descriptors. */
1651 #define NBR_OF_RX_DESC 512
1652 #define RX_DESC_BUF_SIZE 1024
1653 #define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
1656 /* Local variables for Transfer Controller */
1657 /* --------------------------------------- */
1659 /* This is a circular (double-linked) list of the active urbs for each epid.
1660 The head is never removed, and new urbs are linked onto the list as
1661 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
1662 functions instead (which includes spin_locks) */
1663 static struct list_head urb_list
[NBR_OF_EPIDS
];
1665 /* Read about the need and usage of this lock in submit_ctrl_urb. */
1666 /* Lock for URB lists for each EPID */
1667 static spinlock_t urb_list_lock
;
1669 /* Lock for EPID array register (R_USB_EPT_x) in Etrax */
1670 static spinlock_t etrax_epid_lock
;
1672 /* Lock for dma8 sub0 handling */
1673 static spinlock_t etrax_dma8_sub0_lock
;
1675 /* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
1676 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
1678 static volatile unsigned char RxBuf
[RX_BUF_SIZE
] __attribute__ ((aligned (32)));
1679 static volatile struct USB_IN_Desc RxDescList
[NBR_OF_RX_DESC
] __attribute__ ((aligned (4)));
1681 /* Pointers into RxDescList. */
1682 static volatile struct USB_IN_Desc
*myNextRxDesc
;
1683 static volatile struct USB_IN_Desc
*myLastRxDesc
;
1685 /* A zout transfer makes a memory access at the address of its buf pointer,
1686 which means that setting this buf pointer to 0 will cause an access to the
1687 flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
1688 (depending on DMA burst size) transfer.
1689 Instead, we set it to 1, and point it to this buffer. */
1690 static int zout_buffer
[4] __attribute__ ((aligned (4)));
1692 /* Cache for allocating new EP and SB descriptors. */
1693 static struct kmem_cache
*usb_desc_cache
;
1695 /* Cache for the data allocated in the isoc descr top half. */
1696 static struct kmem_cache
*isoc_compl_cache
;
1698 /* Cache for the data allocated when delayed finishing of URBs */
1699 static struct kmem_cache
*later_data_cache
;
1702 /* Counter to keep track of how many Isoc EP we have sat up. Used to enable
1703 and disable iso_eof interrupt. We only need these interrupts when we have
1704 Isoc data endpoints (consumes CPU cycles).
1705 FIXME: This could be more fine granular, so this interrupt is only enabled
1706 when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
1707 static int isoc_epid_counter
;
1709 /* Protecting wrapper functions for R_USB_EPT_x */
1710 /* -------------------------------------------- */
1711 static inline void etrax_epid_set(__u8 index
, __u32 data
) {
1712 unsigned long flags
;
1713 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1714 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1716 *R_USB_EPT_DATA
= data
;
1717 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1720 static inline void etrax_epid_clear_error(__u8 index
) {
1721 unsigned long flags
;
1722 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1723 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1726 ~(IO_MASK(R_USB_EPT_DATA
, error_count_in
) |
1727 IO_MASK(R_USB_EPT_DATA
, error_count_out
) |
1728 IO_MASK(R_USB_EPT_DATA
, error_code
));
1729 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1732 static inline void etrax_epid_set_toggle(__u8 index
, __u8 dirout
,
1734 unsigned long flags
;
1735 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1736 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1739 *R_USB_EPT_DATA
&= ~IO_MASK(R_USB_EPT_DATA
, t_out
);
1740 *R_USB_EPT_DATA
|= IO_FIELD(R_USB_EPT_DATA
, t_out
, toggle
);
1742 *R_USB_EPT_DATA
&= ~IO_MASK(R_USB_EPT_DATA
, t_in
);
1743 *R_USB_EPT_DATA
|= IO_FIELD(R_USB_EPT_DATA
, t_in
, toggle
);
1745 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1748 static inline __u8
etrax_epid_get_toggle(__u8 index
, __u8 dirout
) {
1749 unsigned long flags
;
1751 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1752 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1755 toggle
= IO_EXTRACT(R_USB_EPT_DATA
, t_out
, *R_USB_EPT_DATA
);
1757 toggle
= IO_EXTRACT(R_USB_EPT_DATA
, t_in
, *R_USB_EPT_DATA
);
1759 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1764 static inline __u32
etrax_epid_get(__u8 index
) {
1765 unsigned long flags
;
1767 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1768 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1770 data
= *R_USB_EPT_DATA
;
1771 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1778 /* Main functions for Transfer Controller */
1779 /* -------------------------------------- */
1781 /* Init structs, memories and lists used by Transfer Controller */
1782 int tc_init(struct usb_hcd
*hcd
) {
1784 /* Clear software state info for all epids */
1785 memset(epid_state
, 0, sizeof(struct etrax_epid
) * NBR_OF_EPIDS
);
1787 /* Set Invalid and Dummy as being in use and disabled */
1788 epid_state
[INVALID_EPID
].inuse
= 1;
1789 epid_state
[DUMMY_EPID
].inuse
= 1;
1790 epid_state
[INVALID_EPID
].disabled
= 1;
1791 epid_state
[DUMMY_EPID
].disabled
= 1;
1793 /* Clear counter for how many Isoc epids we have sat up */
1794 isoc_epid_counter
= 0;
1796 /* Initialize the urb list by initiating a head for each list.
1797 Also reset list hodling active URB for each epid */
1798 for (i
= 0; i
< NBR_OF_EPIDS
; i
++) {
1799 INIT_LIST_HEAD(&urb_list
[i
]);
1800 activeUrbList
[i
] = NULL
;
1803 /* Init lock for URB lists */
1804 spin_lock_init(&urb_list_lock
);
1805 /* Init lock for Etrax R_USB_EPT register */
1806 spin_lock_init(&etrax_epid_lock
);
1807 /* Init lock for Etrax dma8 sub0 handling */
1808 spin_lock_init(&etrax_dma8_sub0_lock
);
1810 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
1812 /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
1813 allocate SB descriptors from this cache. This is ok since
1814 sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
1815 usb_desc_cache
= kmem_cache_create("usb_desc_cache",
1816 sizeof(struct USB_EP_Desc
), 0,
1817 SLAB_HWCACHE_ALIGN
, 0);
1818 if(usb_desc_cache
== NULL
) {
1822 /* Create slab cache for speedy allocation of memory for isoc bottom-half
1823 interrupt handling */
1825 kmem_cache_create("isoc_compl_cache",
1826 sizeof(struct crisv10_isoc_complete_data
),
1827 0, SLAB_HWCACHE_ALIGN
, 0);
1828 if(isoc_compl_cache
== NULL
) {
1832 /* Create slab cache for speedy allocation of memory for later URB finish
1835 kmem_cache_create("later_data_cache",
1836 sizeof(struct urb_later_data
),
1837 0, SLAB_HWCACHE_ALIGN
, 0);
1838 if(later_data_cache
== NULL
) {
1843 /* Initiate the bulk start timer. */
1844 init_timer(&bulk_start_timer
);
1845 bulk_start_timer
.expires
= jiffies
+ BULK_START_TIMER_INTERVAL
;
1846 bulk_start_timer
.function
= tc_bulk_start_timer_func
;
1847 add_timer(&bulk_start_timer
);
1850 /* Initiate the bulk eot timer. */
1851 init_timer(&bulk_eot_timer
);
1852 bulk_eot_timer
.expires
= jiffies
+ BULK_EOT_TIMER_INTERVAL
;
1853 bulk_eot_timer
.function
= tc_bulk_eot_timer_func
;
1854 bulk_eot_timer
.data
= (unsigned long)hcd
;
1855 add_timer(&bulk_eot_timer
);
1860 /* Uninitialize all resources used by Transfer Controller */
1861 void tc_destroy(void) {
1863 /* Destroy all slab cache */
1864 kmem_cache_destroy(usb_desc_cache
);
1865 kmem_cache_destroy(isoc_compl_cache
);
1866 kmem_cache_destroy(later_data_cache
);
1869 del_timer(&bulk_start_timer
);
1870 del_timer(&bulk_eot_timer
);
1873 static void restart_dma8_sub0(void) {
1874 unsigned long flags
;
1875 spin_lock_irqsave(&etrax_dma8_sub0_lock
, flags
);
1876 /* Verify that the dma is not running */
1877 if ((*R_DMA_CH8_SUB0_CMD
& IO_MASK(R_DMA_CH8_SUB0_CMD
, cmd
)) == 0) {
1878 struct USB_EP_Desc
*ep
= (struct USB_EP_Desc
*)phys_to_virt(*R_DMA_CH8_SUB0_EP
);
1879 while (DUMMY_EPID
== IO_EXTRACT(USB_EP_command
, epid
, ep
->command
)) {
1880 ep
= (struct USB_EP_Desc
*)phys_to_virt(ep
->next
);
1882 /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */
1883 *R_DMA_CH8_SUB0_EP
= virt_to_phys(ep
);
1884 /* Restart the DMA */
1885 *R_DMA_CH8_SUB0_CMD
= IO_STATE(R_DMA_CH8_SUB0_CMD
, cmd
, start
);
1887 spin_unlock_irqrestore(&etrax_dma8_sub0_lock
, flags
);
1890 /* queue an URB with the transfer controller (called from hcd_driver) */
1891 static int tc_urb_enqueue(struct usb_hcd
*hcd
,
1898 unsigned long flags
;
1899 struct crisv10_urb_priv
*urb_priv
;
1900 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1903 if(!(crisv10_hcd
->running
)) {
1904 /* The USB Controller is not running, probably because no device is
1905 attached. No idea to enqueue URBs then */
1906 tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
1911 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
1912 /* Special case check for In Isoc transfers. Specification states that each
1913 In Isoc transfer consists of one packet and therefore it should fit into
1914 the transfer-buffer of an URB.
1915 We do the check here to be sure (an invalid scenario can be produced with
1916 parameters to the usbtest suite) */
1917 if(usb_pipeisoc(urb
->pipe
) && usb_pipein(urb
->pipe
) &&
1918 (urb
->transfer_buffer_length
< maxpacket
)) {
1919 tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb
->transfer_buffer_length
, maxpacket
);
1923 /* Check if there is a epid for URBs destination, if not this function
1925 epid
= tc_setup_epid(urb
->ep
, urb
, mem_flags
);
1927 tc_err("Failed setup epid:%d for URB:0x%x\n", epid
, (unsigned int)urb
);
1932 if(urb
== activeUrbList
[epid
]) {
1933 tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb
);
1937 if(urb_list_entry(urb
, epid
)) {
1938 tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb
);
1942 /* If we actively have flaged endpoint as disabled then refuse submition */
1943 if(epid_state
[epid
].disabled
) {
1947 /* Allocate and init HC-private data for URB */
1948 if(urb_priv_create(hcd
, urb
, epid
, mem_flags
) != 0) {
1952 urb_priv
= urb
->hcpriv
;
1954 /* Check if there is enough bandwidth for periodic transfer */
1955 if(usb_pipeint(urb
->pipe
) || usb_pipeisoc(urb
->pipe
)) {
1956 /* only check (and later claim) if not already claimed */
1957 if (urb_priv
->bandwidth
== 0) {
1958 bustime
= crisv10_usb_check_bandwidth(urb
->dev
, urb
);
1960 tc_err("Not enough periodic bandwidth\n");
1961 urb_priv_free(hcd
, urb
);
1968 tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
1969 (unsigned int)urb
, urb_priv
->urb_num
, epid
,
1970 pipe_to_str(urb
->pipe
), urb
->transfer_buffer_length
);
1972 /* Create and link SBs required for this URB */
1973 retval
= create_sb_for_urb(urb
, mem_flags
);
1975 tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb
,
1977 urb_priv_free(hcd
, urb
);
1982 /* Init intr EP pool if this URB is a INTR transfer. This pool is later
1983 used when inserting EPs in the TxIntrEPList. We do the alloc here
1984 so we can't run out of memory later */
1985 if(usb_pipeint(urb
->pipe
)) {
1986 retval
= init_intr_urb(urb
, mem_flags
);
1988 tc_warn("Failed to init Intr URB\n");
1989 urb_priv_free(hcd
, urb
);
1995 /* Disable other access when inserting USB */
1996 local_irq_save(flags
);
1998 /* Claim bandwidth, if needed */
2000 crisv10_usb_claim_bandwidth(urb
->dev
,
2003 (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
));
2006 /* Add URB to EP queue */
2007 urb_list_add(urb
, epid
, mem_flags
);
2009 if(usb_pipeisoc(urb
->pipe
)) {
2010 /* Special processing of Isoc URBs. */
2011 tc_dma_process_isoc_urb(urb
);
2013 /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
2014 tc_dma_process_queue(epid
);
2017 local_irq_restore(flags
);
2023 /* remove an URB from the transfer controller queues (called from hcd_driver)*/
2024 static int tc_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
) {
2025 struct crisv10_urb_priv
*urb_priv
;
2026 unsigned long flags
;
2030 /* Disable interrupts here since a descriptor interrupt for the isoc epid
2031 will modify the sb list. This could possibly be done more granular, but
2032 urb_dequeue should not be used frequently anyway.
2034 local_irq_save(flags
);
2036 urb
->status
= status
;
2037 urb_priv
= urb
->hcpriv
;
2040 /* This happens if a device driver calls unlink on an urb that
2041 was never submitted (lazy driver) or if the urb was completed
2042 while dequeue was being called. */
2043 tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb
);
2044 local_irq_restore(flags
);
2047 epid
= urb_priv
->epid
;
2049 tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2050 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2051 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2052 str_type(urb
->pipe
), epid
, urb
->status
,
2053 (urb_priv
->later_data
) ? "later-sched" : "");
2055 /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
2056 that isn't active can be dequeued by just removing it from the queue */
2057 if(usb_pipebulk(urb
->pipe
) || usb_pipecontrol(urb
->pipe
) ||
2058 usb_pipeint(urb
->pipe
)) {
2060 /* Check if URB haven't gone further than the queue */
2061 if(urb
!= activeUrbList
[epid
]) {
2062 ASSERT(urb_priv
->later_data
== NULL
);
2063 tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
2064 " (not active)\n", (unsigned int)urb
, urb_priv
->urb_num
,
2065 str_dir(urb
->pipe
), str_type(urb
->pipe
), epid
);
2067 /* Finish the URB with error status from USB core */
2068 tc_finish_urb(hcd
, urb
, urb
->status
);
2069 local_irq_restore(flags
);
2074 /* Set URB status to Unlink for handling when interrupt comes. */
2075 urb_priv
->urb_state
= UNLINK
;
2077 /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
2078 switch(usb_pipetype(urb
->pipe
)) {
2080 /* Check if EP still is enabled */
2081 if (TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2082 /* The EP was enabled, disable it. */
2083 TxBulkEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2085 /* Kicking dummy list out of the party. */
2086 TxBulkEPList
[epid
].next
= virt_to_phys(&TxBulkEPList
[(epid
+ 1) % NBR_OF_EPIDS
]);
2089 /* Check if EP still is enabled */
2090 if (TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2091 /* The EP was enabled, disable it. */
2092 TxCtrlEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2095 case PIPE_ISOCHRONOUS
:
2096 /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
2097 finish_isoc_urb(). Because there might the case when URB is dequeued
2098 but there are other valid URBs waiting */
2100 /* Check if In Isoc EP still is enabled */
2101 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2102 /* The EP was enabled, disable it. */
2103 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2106 case PIPE_INTERRUPT
:
2107 /* Special care is taken for interrupt URBs. EPs are unlinked in
2114 /* Asynchronous unlink, finish the URB later from scheduled or other
2115 event (data finished, error) */
2116 tc_finish_urb_later(hcd
, urb
, urb
->status
);
2118 local_irq_restore(flags
);
2124 static void tc_sync_finish_epid(struct usb_hcd
*hcd
, int epid
) {
2125 volatile int timeout
= 10000;
2127 struct crisv10_urb_priv
* urb_priv
;
2128 unsigned long flags
;
2130 volatile struct USB_EP_Desc
*first_ep
; /* First EP in the list. */
2131 volatile struct USB_EP_Desc
*curr_ep
; /* Current EP, the iterator. */
2132 volatile struct USB_EP_Desc
*next_ep
; /* The EP after current. */
2134 int type
= epid_state
[epid
].type
;
2136 /* Setting this flag will cause enqueue() to return -ENOENT for new
2137 submitions on this endpoint and finish_urb() wont process queue further */
2138 epid_state
[epid
].disabled
= 1;
2142 /* Check if EP still is enabled */
2143 if (TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2144 /* The EP was enabled, disable it. */
2145 TxBulkEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2146 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid
);
2148 /* Do busy-wait until DMA not using this EP descriptor anymore */
2149 while((*R_DMA_CH8_SUB0_EP
==
2150 virt_to_phys(&TxBulkEPList
[epid
])) &&
2153 warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
2154 " epid:%d\n", epid
);
2160 /* Check if EP still is enabled */
2161 if (TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2162 /* The EP was enabled, disable it. */
2163 TxCtrlEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2164 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid
);
2166 /* Do busy-wait until DMA not using this EP descriptor anymore */
2167 while((*R_DMA_CH8_SUB1_EP
==
2168 virt_to_phys(&TxCtrlEPList
[epid
])) &&
2171 warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
2172 " epid:%d\n", epid
);
2177 case PIPE_INTERRUPT
:
2178 local_irq_save(flags
);
2179 /* Disable all Intr EPs belonging to epid */
2180 first_ep
= &TxIntrEPList
[0];
2183 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
2184 if (IO_EXTRACT(USB_EP_command
, epid
, next_ep
->command
) == epid
) {
2186 next_ep
->command
&= ~IO_MASK(USB_EP_command
, enable
);
2188 curr_ep
= phys_to_virt(curr_ep
->next
);
2189 } while (curr_ep
!= first_ep
);
2191 local_irq_restore(flags
);
2194 case PIPE_ISOCHRONOUS
:
2195 /* Check if EP still is enabled */
2196 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2197 tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid
);
2198 /* The EP was enabled, disable it. */
2199 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2201 while((*R_DMA_CH8_SUB3_EP
== virt_to_phys(&TxIsocEPList
[epid
])) &&
2204 warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
2205 " epid:%d\n", epid
);
2211 local_irq_save(flags
);
2213 /* Finish if there is active URB for this endpoint */
2214 if(activeUrbList
[epid
] != NULL
) {
2215 urb
= activeUrbList
[epid
];
2216 urb_priv
= urb
->hcpriv
;
2218 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2219 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2220 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2221 str_type(urb
->pipe
), epid
, urb
->status
,
2222 (urb_priv
->later_data
) ? "later-sched" : "");
2224 tc_finish_urb(hcd
, activeUrbList
[epid
], -ENOENT
);
2225 ASSERT(activeUrbList
[epid
] == NULL
);
2228 /* Finish any queued URBs for this endpoint. There won't be any resubmitions
2229 because epid_disabled causes enqueue() to fail for this endpoint */
2230 while((urb
= urb_list_first(epid
)) != NULL
) {
2231 urb_priv
= urb
->hcpriv
;
2234 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2235 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2236 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2237 str_type(urb
->pipe
), epid
, urb
->status
,
2238 (urb_priv
->later_data
) ? "later-sched" : "");
2240 tc_finish_urb(hcd
, urb
, -ENOENT
);
2242 epid_state
[epid
].disabled
= 0;
2243 local_irq_restore(flags
);
2246 /* free resources associated with an endpoint (called from hcd_driver) */
2247 static void tc_endpoint_disable(struct usb_hcd
*hcd
,
2248 struct usb_host_endpoint
*ep
) {
2250 /* Only free epid if it has been allocated. We get two endpoint_disable
2251 requests for ctrl endpoints so ignore the second one */
2252 if(ep
->hcpriv
!= NULL
) {
2253 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2254 int epid
= ep_priv
->epid
;
2255 tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
2256 (unsigned int)ep
, (unsigned int)ep
->hcpriv
,
2257 endpoint_to_str(&(ep
->desc
)), epid
);
2259 tc_sync_finish_epid(hcd
, epid
);
2261 ASSERT(activeUrbList
[epid
] == NULL
);
2262 ASSERT(list_empty(&urb_list
[epid
]));
2266 tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep
,
2267 (unsigned int)ep
->hcpriv
, endpoint_to_str(&(ep
->desc
)));
2272 static void tc_finish_urb_later_proc(struct work_struct
* work
) {
2273 unsigned long flags
;
2274 struct urb_later_data
* uld
;
2276 local_irq_save(flags
);
2277 uld
= container_of(work
, struct urb_later_data
, dws
.work
);
2278 if(uld
->urb
== NULL
) {
2279 late_dbg("Later finish of URB = NULL (allready finished)\n");
2281 struct crisv10_urb_priv
* urb_priv
= uld
->urb
->hcpriv
;
2283 if(urb_priv
->urb_num
== uld
->urb_num
) {
2284 late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld
->urb
),
2286 if(uld
->status
!= uld
->urb
->status
) {
2287 errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
2288 uld
->urb
->status
, uld
->status
);
2290 if(uld
!= urb_priv
->later_data
) {
2291 panic("Scheduled uld not same as URBs uld\n");
2293 tc_finish_urb(uld
->hcd
, uld
->urb
, uld
->status
);
2295 late_warn("Ignoring later finish of URB:0x%x[%d]"
2296 ", urb_num doesn't match current URB:0x%x[%d]",
2297 (unsigned int)(uld
->urb
), uld
->urb_num
,
2298 (unsigned int)(uld
->urb
), urb_priv
->urb_num
);
2301 local_irq_restore(flags
);
2302 kmem_cache_free(later_data_cache
, uld
);
2305 static void tc_finish_urb_later(struct usb_hcd
*hcd
, struct urb
*urb
,
2307 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2308 struct urb_later_data
* uld
;
2312 if(urb_priv
->later_data
!= NULL
) {
2313 /* Later-finish allready scheduled for this URB, just update status to
2314 return when finishing later */
2315 errno_dbg("Later-finish schedule change URB status:%d with new"
2316 " status:%d\n", urb_priv
->later_data
->status
, status
);
2318 urb_priv
->later_data
->status
= status
;
2322 uld
= kmem_cache_alloc(later_data_cache
, GFP_ATOMIC
);
2327 uld
->urb_num
= urb_priv
->urb_num
;
2328 uld
->status
= status
;
2330 INIT_DELAYED_WORK(&uld
->dws
, tc_finish_urb_later_proc
);
2331 urb_priv
->later_data
= uld
;
2333 /* Schedule the finishing of the URB to happen later */
2334 schedule_delayed_work(&uld
->dws
, LATER_TIMER_DELAY
);
2337 static void tc_finish_isoc_urb(struct usb_hcd
*hcd
, struct urb
*urb
,
2340 static void tc_finish_urb(struct usb_hcd
*hcd
, struct urb
*urb
, int status
) {
2341 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
2342 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2348 ASSERT(urb_priv
!= NULL
);
2349 epid
= urb_priv
->epid
;
2350 urb_num
= urb_priv
->urb_num
;
2352 if(urb
!= activeUrbList
[epid
]) {
2353 if(urb_list_entry(urb
, epid
)) {
2354 /* Remove this URB from the list. Only happens when URB are finished
2355 before having been processed (dequeing) */
2356 urb_list_del(urb
, epid
);
2358 tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
2359 " epid:%d\n", (unsigned int)urb
, urb_num
, epid
);
2363 /* Cancel any pending later-finish of this URB */
2364 if(urb_priv
->later_data
) {
2365 urb_priv
->later_data
->urb
= NULL
;
2368 /* For an IN pipe, we always set the actual length, regardless of whether
2369 there was an error or not (which means the device driver can use the data
2371 if(usb_pipein(urb
->pipe
)) {
2372 urb
->actual_length
= urb_priv
->rx_offset
;
2374 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2376 if (status
== 0 && urb
->status
== -EINPROGRESS
) {
2377 urb
->actual_length
= urb
->transfer_buffer_length
;
2379 /* We wouldn't know of any partial writes if there was an error. */
2380 urb
->actual_length
= 0;
2385 /* URB status mangling */
2386 if(urb
->status
== -EINPROGRESS
) {
2387 /* The USB core hasn't changed the status, let's set our finish status */
2388 urb
->status
= status
;
2390 if ((status
== 0) && (urb
->transfer_flags
& URB_SHORT_NOT_OK
) &&
2391 usb_pipein(urb
->pipe
) &&
2392 (urb
->actual_length
!= urb
->transfer_buffer_length
)) {
2393 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
2394 max length) is to be treated as an error. */
2395 errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
2396 " data:%d\n", (unsigned int)urb
, urb_num
,
2397 urb
->actual_length
);
2398 urb
->status
= -EREMOTEIO
;
2401 if(urb_priv
->urb_state
== UNLINK
) {
2402 /* URB has been requested to be unlinked asynchronously */
2403 urb
->status
= -ECONNRESET
;
2404 errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
2405 (unsigned int)urb
, urb_num
, urb
->status
);
2408 /* The USB Core wants to signal some error via the URB, pass it through */
2411 /* use completely different finish function for Isoc URBs */
2412 if(usb_pipeisoc(urb
->pipe
)) {
2413 tc_finish_isoc_urb(hcd
, urb
, status
);
2417 /* Do special unlinking of EPs for Intr traffic */
2418 if(usb_pipeint(urb
->pipe
)) {
2419 tc_dma_unlink_intr_urb(urb
);
2422 /* Release allocated bandwidth for periodic transfers */
2423 if(usb_pipeint(urb
->pipe
) || usb_pipeisoc(urb
->pipe
))
2424 crisv10_usb_release_bandwidth(hcd
,
2425 usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
,
2426 urb_priv
->bandwidth
);
2428 /* This URB is active on EP */
2429 if(urb
== activeUrbList
[epid
]) {
2430 /* We need to fiddle with the toggle bits because the hardware doesn't do
2432 toggle
= etrax_epid_get_toggle(epid
, usb_pipeout(urb
->pipe
));
2433 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
2434 usb_pipeout(urb
->pipe
), toggle
);
2436 /* Checks for Ctrl and Bulk EPs */
2437 switch(usb_pipetype(urb
->pipe
)) {
2439 /* Check so Bulk EP realy is disabled before finishing active URB */
2440 ASSERT((TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) ==
2441 IO_STATE(USB_EP_command
, enable
, no
));
2442 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2444 TxBulkEPList
[epid
].sub
= 0;
2445 /* No need to wait for the DMA before changing the next pointer.
2446 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2447 the last one (INVALID_EPID) for actual traffic. */
2448 TxBulkEPList
[epid
].next
=
2449 virt_to_phys(&TxBulkEPList
[(epid
+ 1) % NBR_OF_EPIDS
]);
2452 /* Check so Ctrl EP realy is disabled before finishing active URB */
2453 ASSERT((TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) ==
2454 IO_STATE(USB_EP_command
, enable
, no
));
2455 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2457 TxCtrlEPList
[epid
].sub
= 0;
2462 /* Free HC-private URB data*/
2463 urb_priv_free(hcd
, urb
);
2466 errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2467 (unsigned int)urb
, urb_num
, str_dir(urb
->pipe
),
2468 str_type(urb
->pipe
), urb
->actual_length
, urb
->status
);
2470 tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2471 (unsigned int)urb
, urb_num
, str_dir(urb
->pipe
),
2472 str_type(urb
->pipe
), urb
->actual_length
, urb
->status
);
2475 /* If we just finished an active URB, clear active pointer. */
2476 if (urb
== activeUrbList
[epid
]) {
2477 /* Make URB not active on EP anymore */
2478 activeUrbList
[epid
] = NULL
;
2480 if(urb
->status
== 0) {
2481 /* URB finished sucessfully, process queue to see if there are any more
2482 URBs waiting before we call completion function.*/
2483 if(crisv10_hcd
->running
) {
2484 /* Only process queue if USB controller is running */
2485 tc_dma_process_queue(epid
);
2487 tc_warn("No processing of queue for epid:%d, USB Controller not"
2488 " running\n", epid
);
2493 /* Hand the URB from HCD to its USB device driver, using its completion
2495 usb_hcd_giveback_urb (hcd
, urb
, status
);
2497 /* Check the queue once more if the URB returned with error, because we
2498 didn't do it before the completion function because the specification
2499 states that the queue should not restart until all it's unlinked
2500 URBs have been fully retired, with the completion functions run */
2501 if(crisv10_hcd
->running
) {
2502 /* Only process queue if USB controller is running */
2503 tc_dma_process_queue(epid
);
2505 tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
2512 static void tc_finish_isoc_urb(struct usb_hcd
*hcd
, struct urb
*urb
,
2514 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2516 volatile int timeout
= 10000;
2520 epid
= urb_priv
->epid
;
2522 ASSERT(usb_pipeisoc(urb
->pipe
));
2524 /* Set that all isoc packets have status and length set before
2525 completing the urb. */
2526 for (i
= urb_priv
->isoc_packet_counter
; i
< urb
->number_of_packets
; i
++){
2527 urb
->iso_frame_desc
[i
].actual_length
= 0;
2528 urb
->iso_frame_desc
[i
].status
= -EPROTO
;
2531 /* Check if the URB is currently active (done or error) */
2532 if(urb
== activeUrbList
[epid
]) {
2533 /* Check if there are another In Isoc URB queued for this epid */
2534 if (!list_empty(&urb_list
[epid
])&& !epid_state
[epid
].disabled
) {
2535 /* Move it from queue to active and mark it started so Isoc transfers
2536 won't be interrupted.
2537 All Isoc URBs data transfers are already added to DMA lists so we
2538 don't have to insert anything in DMA lists here. */
2539 activeUrbList
[epid
] = urb_list_first(epid
);
2540 ((struct crisv10_urb_priv
*)(activeUrbList
[epid
]->hcpriv
))->urb_state
=
2542 urb_list_del(activeUrbList
[epid
], epid
);
2545 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2546 " status:%d, new waiting URB:0x%x[%d]\n",
2547 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2548 str_type(urb
->pipe
), urb_priv
->isoc_packet_counter
,
2549 urb
->number_of_packets
, urb
->status
,
2550 (unsigned int)activeUrbList
[epid
],
2551 ((struct crisv10_urb_priv
*)(activeUrbList
[epid
]->hcpriv
))->urb_num
);
2554 } else { /* No other URB queued for this epid */
2556 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2557 " status:%d, no new URB waiting\n",
2558 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2559 str_type(urb
->pipe
), urb_priv
->isoc_packet_counter
,
2560 urb
->number_of_packets
, urb
->status
);
2563 /* Check if EP is still enabled, then shut it down. */
2564 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2565 isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid
);
2567 /* Should only occur for In Isoc EPs where SB isn't consumed. */
2568 ASSERT(usb_pipein(urb
->pipe
));
2570 /* Disable it and wait for it to stop */
2571 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2573 /* Ah, the luxury of busy-wait. */
2574 while((*R_DMA_CH8_SUB3_EP
== virt_to_phys(&TxIsocEPList
[epid
])) &&
2577 warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid
);
2581 /* Unlink SB to say that epid is finished. */
2582 TxIsocEPList
[epid
].sub
= 0;
2583 TxIsocEPList
[epid
].hw_len
= 0;
2585 /* No URB active for EP anymore */
2586 activeUrbList
[epid
] = NULL
;
2588 } else { /* Finishing of not active URB (queued up with SBs thought) */
2589 isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
2590 " SB queued but not active\n",
2591 (unsigned int)urb
, str_dir(urb
->pipe
),
2592 urb_priv
->isoc_packet_counter
, urb
->number_of_packets
,
2594 if(usb_pipeout(urb
->pipe
)) {
2595 /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
2596 struct USB_SB_Desc
*iter_sb
, *prev_sb
, *next_sb
;
2598 iter_sb
= TxIsocEPList
[epid
].sub
?
2599 phys_to_virt(TxIsocEPList
[epid
].sub
) : 0;
2602 /* SB that is linked before this URBs first SB */
2603 while (iter_sb
&& (iter_sb
!= urb_priv
->first_sb
)) {
2605 iter_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2609 /* Unlink of the URB currently being transmitted. */
2611 iter_sb
= TxIsocEPList
[epid
].sub
? phys_to_virt(TxIsocEPList
[epid
].sub
) : 0;
2614 while (iter_sb
&& (iter_sb
!= urb_priv
->last_sb
)) {
2615 iter_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2619 next_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2621 /* This should only happen if the DMA has completed
2622 processing the SB list for this EP while interrupts
2624 isoc_dbg("Isoc urb not found, already sent?\n");
2628 prev_sb
->next
= next_sb
? virt_to_phys(next_sb
) : 0;
2630 TxIsocEPList
[epid
].sub
= next_sb
? virt_to_phys(next_sb
) : 0;
2635 /* Free HC-private URB data*/
2636 bandwidth
= urb_priv
->bandwidth
;
2637 urb_priv_free(hcd
, urb
);
2639 crisv10_usb_release_bandwidth(hcd
, usb_pipeisoc(urb
->pipe
), bandwidth
);
2641 /* Hand the URB from HCD to its USB device driver, using its completion
2643 usb_hcd_giveback_urb (hcd
, urb
, status
);
2646 static __u32 urb_num
= 0;
2648 /* allocate and initialize URB private data */
2649 static int urb_priv_create(struct usb_hcd
*hcd
, struct urb
*urb
, int epid
,
2651 struct crisv10_urb_priv
*urb_priv
;
2653 urb_priv
= kmalloc(sizeof *urb_priv
, mem_flags
);
2656 memset(urb_priv
, 0, sizeof *urb_priv
);
2658 urb_priv
->epid
= epid
;
2659 urb_priv
->urb_state
= NOT_STARTED
;
2661 urb
->hcpriv
= urb_priv
;
2662 /* Assign URB a sequence number, and increment counter */
2663 urb_priv
->urb_num
= urb_num
;
2665 urb_priv
->bandwidth
= 0;
2669 /* free URB private data */
2670 static void urb_priv_free(struct usb_hcd
*hcd
, struct urb
*urb
) {
2672 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2673 ASSERT(urb_priv
!= 0);
2675 /* Check it has any SBs linked that needs to be freed*/
2676 if(urb_priv
->first_sb
!= NULL
) {
2677 struct USB_SB_Desc
*next_sb
, *first_sb
, *last_sb
;
2679 first_sb
= urb_priv
->first_sb
;
2680 last_sb
= urb_priv
->last_sb
;
2682 while(first_sb
!= last_sb
) {
2683 next_sb
= (struct USB_SB_Desc
*)phys_to_virt(first_sb
->next
);
2684 kmem_cache_free(usb_desc_cache
, first_sb
);
2688 kmem_cache_free(usb_desc_cache
, last_sb
);
2692 /* Check if it has any EPs in its Intr pool that also needs to be freed */
2693 if(urb_priv
->intr_ep_pool_length
> 0) {
2694 for(i
= 0; i
< urb_priv
->intr_ep_pool_length
; i
++) {
2695 kfree(urb_priv
->intr_ep_pool
[i
]);
2698 tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
2699 urb_priv->intr_ep_pool_length, (unsigned int)urb);
2707 static int ep_priv_create(struct usb_host_endpoint
*ep
, int mem_flags
) {
2708 struct crisv10_ep_priv
*ep_priv
;
2710 ep_priv
= kmalloc(sizeof *ep_priv
, mem_flags
);
2713 memset(ep_priv
, 0, sizeof *ep_priv
);
2715 ep
->hcpriv
= ep_priv
;
2719 static void ep_priv_free(struct usb_host_endpoint
*ep
) {
2720 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2727 * usb_check_bandwidth():
2729 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
2730 * bustime is from calc_bus_time(), but converted to microseconds.
2732 * returns <bustime in us> if successful,
2733 * or -ENOSPC if bandwidth request fails.
2736 * This initial implementation does not use Endpoint.bInterval
2737 * in managing bandwidth allocation.
2738 * It probably needs to be expanded to use Endpoint.bInterval.
2739 * This can be done as a later enhancement (correction).
2741 * This will also probably require some kind of
2742 * frame allocation tracking...meaning, for example,
2743 * that if multiple drivers request interrupts every 10 USB frames,
2744 * they don't all have to be allocated at
2745 * frame numbers N, N+10, N+20, etc. Some of them could be at
2746 * N+11, N+21, N+31, etc., and others at
2747 * N+12, N+22, N+32, etc.
2749 * Similarly for isochronous transfers...
2751 * Individual HCDs can schedule more directly ... this logic
2752 * is not correct for high speed transfers.
2754 static int crisv10_usb_check_bandwidth(
2755 struct usb_device
*dev
,
2758 unsigned int pipe
= urb
->pipe
;
2760 int is_in
= usb_pipein (pipe
);
2761 int is_iso
= usb_pipeisoc (pipe
);
2762 int old_alloc
= dev
->bus
->bandwidth_allocated
;
2765 bustime
= NS_TO_US (usb_calc_bus_time (dev
->speed
, is_in
, is_iso
,
2766 usb_maxpacket (dev
, pipe
, !is_in
)));
2768 bustime
/= urb
->number_of_packets
;
2770 new_alloc
= old_alloc
+ (int) bustime
;
2771 if (new_alloc
> FRAME_TIME_MAX_USECS_ALLOC
) {
2772 dev_dbg (&dev
->dev
, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n",
2773 old_alloc
, bustime
, new_alloc
);
2774 bustime
= -ENOSPC
; /* report error */
2781 * usb_claim_bandwidth - records bandwidth for a periodic transfer
2782 * @dev: source/target of request
2783 * @urb: request (urb->dev == dev)
2784 * @bustime: bandwidth consumed, in (average) microseconds per frame
2785 * @isoc: true iff the request is isochronous
2787 * HCDs are expected not to overcommit periodic bandwidth, and to record such
2788 * reservations whenever endpoints are added to the periodic schedule.
2790 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
2791 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
2792 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
2793 * large its periodic schedule is.
2795 static void crisv10_usb_claim_bandwidth(
2796 struct usb_device
*dev
,
2797 struct urb
*urb
, int bustime
, int isoc
)
2799 dev
->bus
->bandwidth_allocated
+= bustime
;
2801 dev
->bus
->bandwidth_isoc_reqs
++;
2803 dev
->bus
->bandwidth_int_reqs
++;
2804 struct crisv10_urb_priv
*urb_priv
;
2805 urb_priv
= urb
->hcpriv
;
2806 urb_priv
->bandwidth
= bustime
;
2810 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
2811 * @hcd: host controller
2812 * @isoc: true iff the request is isochronous
2813 * @bandwidth: bandwidth returned
2815 * This records that previously allocated bandwidth has been released.
2816 * Bandwidth is released when endpoints are removed from the host controller's
2817 * periodic schedule.
2819 static void crisv10_usb_release_bandwidth(
2820 struct usb_hcd
*hcd
,
2824 hcd_to_bus(hcd
)->bandwidth_allocated
-= bandwidth
;
2826 hcd_to_bus(hcd
)->bandwidth_isoc_reqs
--;
2828 hcd_to_bus(hcd
)->bandwidth_int_reqs
--;
2832 /* EPID handling functions, managing EP-list in Etrax through wrappers */
2833 /* ------------------------------------------------------------------- */
2835 /* Sets up a new EPID for an endpoint or returns existing if found */
2836 static int tc_setup_epid(struct usb_host_endpoint
*ep
, struct urb
*urb
,
2839 char devnum
, endpoint
, out_traffic
, slow
;
2842 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2846 /* Check if a valid epid already is setup for this endpoint */
2847 if(ep_priv
!= NULL
) {
2848 return ep_priv
->epid
;
2851 /* We must find and initiate a new epid for this urb. */
2852 epid
= tc_allocate_epid();
2855 /* Failed to allocate a new epid. */
2860 /* We now have a new epid to use. Claim it. */
2861 epid_state
[epid
].inuse
= 1;
2863 /* Init private data for new endpoint */
2864 if(ep_priv_create(ep
, mem_flags
) != 0) {
2867 ep_priv
= ep
->hcpriv
;
2868 ep_priv
->epid
= epid
;
2870 devnum
= usb_pipedevice(urb
->pipe
);
2871 endpoint
= usb_pipeendpoint(urb
->pipe
);
2872 slow
= (urb
->dev
->speed
== USB_SPEED_LOW
);
2873 maxlen
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
2875 if (usb_pipetype(urb
->pipe
) == PIPE_CONTROL
) {
2876 /* We want both IN and OUT control traffic to be put on the same
2880 out_traffic
= usb_pipeout(urb
->pipe
);
2883 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
2884 epid_data
= IO_STATE(R_USB_EPT_DATA_ISO
, valid
, yes
) |
2885 /* FIXME: Change any to the actual port? */
2886 IO_STATE(R_USB_EPT_DATA_ISO
, port
, any
) |
2887 IO_FIELD(R_USB_EPT_DATA_ISO
, max_len
, maxlen
) |
2888 IO_FIELD(R_USB_EPT_DATA_ISO
, ep
, endpoint
) |
2889 IO_FIELD(R_USB_EPT_DATA_ISO
, dev
, devnum
);
2890 etrax_epid_iso_set(epid
, epid_data
);
2892 epid_data
= IO_STATE(R_USB_EPT_DATA
, valid
, yes
) |
2893 IO_FIELD(R_USB_EPT_DATA
, low_speed
, slow
) |
2894 /* FIXME: Change any to the actual port? */
2895 IO_STATE(R_USB_EPT_DATA
, port
, any
) |
2896 IO_FIELD(R_USB_EPT_DATA
, max_len
, maxlen
) |
2897 IO_FIELD(R_USB_EPT_DATA
, ep
, endpoint
) |
2898 IO_FIELD(R_USB_EPT_DATA
, dev
, devnum
);
2899 etrax_epid_set(epid
, epid_data
);
2902 epid_state
[epid
].out_traffic
= out_traffic
;
2903 epid_state
[epid
].type
= usb_pipetype(urb
->pipe
);
2905 tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
2906 (unsigned int)ep
, epid
, devnum
, endpoint
, maxlen
,
2907 str_type(urb
->pipe
), out_traffic
? "out" : "in",
2908 slow
? "low" : "full");
2910 /* Enable Isoc eof interrupt if we set up the first Isoc epid */
2911 if(usb_pipeisoc(urb
->pipe
)) {
2912 isoc_epid_counter
++;
2913 if(isoc_epid_counter
== 1) {
2914 isoc_warn("Enabled Isoc eof interrupt\n");
2915 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, iso_eof
, set
);
2923 static void tc_free_epid(struct usb_host_endpoint
*ep
) {
2924 unsigned long flags
;
2925 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2927 volatile int timeout
= 10000;
2931 if (ep_priv
== NULL
) {
2932 tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep
);
2937 epid
= ep_priv
->epid
;
2939 /* Disable Isoc eof interrupt if we free the last Isoc epid */
2940 if(epid_isoc(epid
)) {
2941 ASSERT(isoc_epid_counter
> 0);
2942 isoc_epid_counter
--;
2943 if(isoc_epid_counter
== 0) {
2944 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, iso_eof
, clr
);
2945 isoc_warn("Disabled Isoc eof interrupt\n");
2949 /* Take lock manualy instead of in epid_x_x wrappers,
2950 because we need to be polling here */
2951 spin_lock_irqsave(&etrax_epid_lock
, flags
);
2953 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
2955 while((*R_USB_EPT_DATA
& IO_MASK(R_USB_EPT_DATA
, hold
)) &&
2958 warn("Timeout while waiting for epid:%d to drop hold\n", epid
);
2960 /* This will, among other things, set the valid field to 0. */
2961 *R_USB_EPT_DATA
= 0;
2962 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
2964 /* Free resource in software state info list */
2965 epid_state
[epid
].inuse
= 0;
2967 /* Free private endpoint data */
2973 static int tc_allocate_epid(void) {
2976 for (i
= 0; i
< NBR_OF_EPIDS
; i
++) {
2977 if (!epid_inuse(i
)) {
2983 tc_warn("Found no free epids\n");
2989 /* Wrappers around the list functions (include/linux/list.h). */
2990 /* ---------------------------------------------------------- */
2991 static inline int __urb_list_empty(int epid
) {
2993 retval
= list_empty(&urb_list
[epid
]);
2997 /* Returns first urb for this epid, or NULL if list is empty. */
2998 static inline struct urb
*urb_list_first(int epid
) {
2999 unsigned long flags
;
3000 struct urb
*first_urb
= 0;
3001 spin_lock_irqsave(&urb_list_lock
, flags
);
3002 if (!__urb_list_empty(epid
)) {
3003 /* Get the first urb (i.e. head->next). */
3004 urb_entry_t
*urb_entry
= list_entry((&urb_list
[epid
])->next
, urb_entry_t
, list
);
3005 first_urb
= urb_entry
->urb
;
3007 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3011 /* Adds an urb_entry last in the list for this epid. */
3012 static inline void urb_list_add(struct urb
*urb
, int epid
, int mem_flags
) {
3013 unsigned long flags
;
3014 urb_entry_t
*urb_entry
= (urb_entry_t
*)kmalloc(sizeof(urb_entry_t
), mem_flags
);
3017 urb_entry
->urb
= urb
;
3018 spin_lock_irqsave(&urb_list_lock
, flags
);
3019 list_add_tail(&urb_entry
->list
, &urb_list
[epid
]);
3020 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3023 /* Search through the list for an element that contains this urb. (The list
3024 is expected to be short and the one we are about to delete will often be
3025 the first in the list.)
3026 Should be protected by spin_locks in calling function */
3027 static inline urb_entry_t
*__urb_list_entry(struct urb
*urb
, int epid
) {
3028 struct list_head
*entry
;
3029 struct list_head
*tmp
;
3030 urb_entry_t
*urb_entry
;
3032 list_for_each_safe(entry
, tmp
, &urb_list
[epid
]) {
3033 urb_entry
= list_entry(entry
, urb_entry_t
, list
);
3035 ASSERT(urb_entry
->urb
);
3037 if (urb_entry
->urb
== urb
) {
3044 /* Same function as above but for global use. Protects list by spinlock */
3045 static inline urb_entry_t
*urb_list_entry(struct urb
*urb
, int epid
) {
3046 unsigned long flags
;
3047 urb_entry_t
*urb_entry
;
3048 spin_lock_irqsave(&urb_list_lock
, flags
);
3049 urb_entry
= __urb_list_entry(urb
, epid
);
3050 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3054 /* Delete an urb from the list. */
3055 static inline void urb_list_del(struct urb
*urb
, int epid
) {
3056 unsigned long flags
;
3057 urb_entry_t
*urb_entry
;
3059 /* Delete entry and free. */
3060 spin_lock_irqsave(&urb_list_lock
, flags
);
3061 urb_entry
= __urb_list_entry(urb
, epid
);
3064 list_del(&urb_entry
->list
);
3065 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3069 /* Move an urb to the end of the list. */
3070 static inline void urb_list_move_last(struct urb
*urb
, int epid
) {
3071 unsigned long flags
;
3072 urb_entry_t
*urb_entry
;
3074 spin_lock_irqsave(&urb_list_lock
, flags
);
3075 urb_entry
= __urb_list_entry(urb
, epid
);
3078 list_del(&urb_entry
->list
);
3079 list_add_tail(&urb_entry
->list
, &urb_list
[epid
]);
3080 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3083 /* Get the next urb in the list. */
3084 static inline struct urb
*urb_list_next(struct urb
*urb
, int epid
) {
3085 unsigned long flags
;
3086 urb_entry_t
*urb_entry
;
3088 spin_lock_irqsave(&urb_list_lock
, flags
);
3089 urb_entry
= __urb_list_entry(urb
, epid
);
3092 if (urb_entry
->list
.next
!= &urb_list
[epid
]) {
3093 struct list_head
*elem
= urb_entry
->list
.next
;
3094 urb_entry
= list_entry(elem
, urb_entry_t
, list
);
3095 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3096 return urb_entry
->urb
;
3098 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3103 struct USB_EP_Desc
* create_ep(int epid
, struct USB_SB_Desc
* sb_desc
,
3105 struct USB_EP_Desc
*ep_desc
;
3106 ep_desc
= (struct USB_EP_Desc
*) kmem_cache_alloc(usb_desc_cache
, mem_flags
);
3109 memset(ep_desc
, 0, sizeof(struct USB_EP_Desc
));
3111 ep_desc
->hw_len
= 0;
3112 ep_desc
->command
= (IO_FIELD(USB_EP_command
, epid
, epid
) |
3113 IO_STATE(USB_EP_command
, enable
, yes
));
3114 if(sb_desc
== NULL
) {
3117 ep_desc
->sub
= virt_to_phys(sb_desc
);
3127 #define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
3128 #define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
3129 #define CMD_FULL IO_STATE(USB_SB_command, full, yes)
3131 /* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
3132 SBs. Also used by create_sb_in() to avoid same allocation procedure at two
3134 struct USB_SB_Desc
* create_sb(struct USB_SB_Desc
* sb_prev
, int tt
, void* data
,
3135 int datalen
, int mem_flags
) {
3136 struct USB_SB_Desc
*sb_desc
;
3137 sb_desc
= (struct USB_SB_Desc
*)kmem_cache_alloc(usb_desc_cache
, mem_flags
);
3140 memset(sb_desc
, 0, sizeof(struct USB_SB_Desc
));
3142 sb_desc
->command
= IO_FIELD(USB_SB_command
, tt
, tt
) |
3143 IO_STATE(USB_SB_command
, eot
, yes
);
3145 sb_desc
->sw_len
= datalen
;
3147 sb_desc
->buf
= virt_to_phys(data
);
3151 if(sb_prev
!= NULL
) {
3152 sb_prev
->next
= virt_to_phys(sb_desc
);
3157 /* Creates a copy of an existing SB by allocation space for it and copy
3159 struct USB_SB_Desc
* create_sb_copy(struct USB_SB_Desc
* sb_orig
, int mem_flags
) {
3160 struct USB_SB_Desc
*sb_desc
;
3161 sb_desc
= (struct USB_SB_Desc
*)kmem_cache_alloc(usb_desc_cache
, mem_flags
);
3165 memcpy(sb_desc
, sb_orig
, sizeof(struct USB_SB_Desc
));
3169 /* A specific create_sb function for creation of in SBs. This is due to
3170 that datalen in In SBs shows how many packets we are expecting. It also
3171 sets up the rem field to show if how many bytes we expect in last packet
3172 if it's not a full one */
3173 struct USB_SB_Desc
* create_sb_in(struct USB_SB_Desc
* sb_prev
, int datalen
,
3174 int maxlen
, int mem_flags
) {
3175 struct USB_SB_Desc
*sb_desc
;
3176 sb_desc
= create_sb(sb_prev
, TT_IN
, NULL
,
3177 datalen
? (datalen
- 1) / maxlen
+ 1 : 0, mem_flags
);
3180 sb_desc
->command
|= IO_FIELD(USB_SB_command
, rem
, datalen
% maxlen
);
3184 void set_sb_cmds(struct USB_SB_Desc
*sb_desc
, __u16 flags
) {
3185 sb_desc
->command
|= flags
;
3188 int create_sb_for_urb(struct urb
*urb
, int mem_flags
) {
3189 int is_out
= !usb_pipein(urb
->pipe
);
3190 int type
= usb_pipetype(urb
->pipe
);
3191 int maxlen
= usb_maxpacket(urb
->dev
, urb
->pipe
, is_out
);
3192 int buf_len
= urb
->transfer_buffer_length
;
3193 void *buf
= buf_len
> 0 ? urb
->transfer_buffer
: NULL
;
3194 struct USB_SB_Desc
*sb_desc
= NULL
;
3196 struct crisv10_urb_priv
*urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
3197 ASSERT(urb_priv
!= NULL
);
3202 sb_desc
= create_sb(NULL
, TT_SETUP
, urb
->setup_packet
, 8, mem_flags
);
3205 set_sb_cmds(sb_desc
, CMD_FULL
);
3207 /* Attach first SB to URB */
3208 urb_priv
->first_sb
= sb_desc
;
3210 if (is_out
) { /* Out Control URB */
3211 /* If this Control OUT transfer has an optional data stage we add
3212 an OUT token before the mandatory IN (status) token */
3213 if ((buf_len
> 0) && buf
) {
3214 sb_desc
= create_sb(sb_desc
, TT_OUT
, buf
, buf_len
, mem_flags
);
3217 set_sb_cmds(sb_desc
, CMD_FULL
);
3221 /* The data length has to be exactly 1. This is due to a requirement
3222 of the USB specification that a host must be prepared to receive
3223 data in the status phase */
3224 sb_desc
= create_sb(sb_desc
, TT_IN
, NULL
, 1, mem_flags
);
3227 } else { /* In control URB */
3229 sb_desc
= create_sb_in(sb_desc
, buf_len
, maxlen
, mem_flags
);
3234 /* Read comment at zout_buffer declaration for an explanation to this. */
3235 sb_desc
= create_sb(sb_desc
, TT_ZOUT
, &zout_buffer
[0], 1, mem_flags
);
3238 /* Set descriptor interrupt flag for in URBs so we can finish URB after
3239 zout-packet has been sent */
3240 set_sb_cmds(sb_desc
, CMD_INTR
| CMD_FULL
);
3242 /* Set end-of-list flag in last SB */
3243 set_sb_cmds(sb_desc
, CMD_EOL
);
3244 /* Attach last SB to URB */
3245 urb_priv
->last_sb
= sb_desc
;
3249 if (is_out
) { /* Out Bulk URB */
3250 sb_desc
= create_sb(NULL
, TT_OUT
, buf
, buf_len
, mem_flags
);
3253 /* The full field is set to yes, even if we don't actually check that
3254 this is a full-length transfer (i.e., that transfer_buffer_length %
3256 Setting full prevents the USB controller from sending an empty packet
3257 in that case. However, if URB_ZERO_PACKET was set we want that. */
3258 if (!(urb
->transfer_flags
& URB_ZERO_PACKET
)) {
3259 set_sb_cmds(sb_desc
, CMD_FULL
);
3261 } else { /* In Bulk URB */
3262 sb_desc
= create_sb_in(NULL
, buf_len
, maxlen
, mem_flags
);
3266 /* Set end-of-list flag for last SB */
3267 set_sb_cmds(sb_desc
, CMD_EOL
);
3269 /* Attach SB to URB */
3270 urb_priv
->first_sb
= sb_desc
;
3271 urb_priv
->last_sb
= sb_desc
;
3274 case PIPE_INTERRUPT
:
3275 if(is_out
) { /* Out Intr URB */
3276 sb_desc
= create_sb(NULL
, TT_OUT
, buf
, buf_len
, mem_flags
);
3280 /* The full field is set to yes, even if we don't actually check that
3281 this is a full-length transfer (i.e., that transfer_buffer_length %
3283 Setting full prevents the USB controller from sending an empty packet
3284 in that case. However, if URB_ZERO_PACKET was set we want that. */
3285 if (!(urb
->transfer_flags
& URB_ZERO_PACKET
)) {
3286 set_sb_cmds(sb_desc
, CMD_FULL
);
3288 /* Only generate TX interrupt if it's a Out URB*/
3289 set_sb_cmds(sb_desc
, CMD_INTR
);
3291 } else { /* In Intr URB */
3292 sb_desc
= create_sb_in(NULL
, buf_len
, maxlen
, mem_flags
);
3296 /* Set end-of-list flag for last SB */
3297 set_sb_cmds(sb_desc
, CMD_EOL
);
3299 /* Attach SB to URB */
3300 urb_priv
->first_sb
= sb_desc
;
3301 urb_priv
->last_sb
= sb_desc
;
3304 case PIPE_ISOCHRONOUS
:
3305 if(is_out
) { /* Out Isoc URB */
3307 if(urb
->number_of_packets
== 0) {
3308 tc_err("Can't create SBs for Isoc URB with zero packets\n");
3311 /* Create one SB descriptor for each packet and link them together. */
3312 for(i
= 0; i
< urb
->number_of_packets
; i
++) {
3313 if (urb
->iso_frame_desc
[i
].length
> 0) {
3315 sb_desc
= create_sb(sb_desc
, TT_OUT
, urb
->transfer_buffer
+
3316 urb
->iso_frame_desc
[i
].offset
,
3317 urb
->iso_frame_desc
[i
].length
, mem_flags
);
3321 /* Check if it's a full length packet */
3322 if (urb
->iso_frame_desc
[i
].length
==
3323 usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
))) {
3324 set_sb_cmds(sb_desc
, CMD_FULL
);
3327 } else { /* zero length packet */
3328 sb_desc
= create_sb(sb_desc
, TT_ZOUT
, &zout_buffer
[0], 1, mem_flags
);
3331 set_sb_cmds(sb_desc
, CMD_FULL
);
3333 /* Attach first SB descriptor to URB */
3335 urb_priv
->first_sb
= sb_desc
;
3338 /* Set interrupt and end-of-list flags in last SB */
3339 set_sb_cmds(sb_desc
, CMD_INTR
| CMD_EOL
);
3340 /* Attach last SB descriptor to URB */
3341 urb_priv
->last_sb
= sb_desc
;
3342 tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
3343 urb
->number_of_packets
, (unsigned int)urb
);
3344 } else { /* In Isoc URB */
3345 /* Actual number of packets is not relevant for periodic in traffic as
3346 long as it is more than zero. Set to 1 always. */
3347 sb_desc
= create_sb(sb_desc
, TT_IN
, NULL
, 1, mem_flags
);
3350 /* Set end-of-list flags for SB */
3351 set_sb_cmds(sb_desc
, CMD_EOL
);
3353 /* Attach SB to URB */
3354 urb_priv
->first_sb
= sb_desc
;
3355 urb_priv
->last_sb
= sb_desc
;
3359 tc_err("Unknown pipe-type\n");
3366 int init_intr_urb(struct urb
*urb
, int mem_flags
) {
3367 struct crisv10_urb_priv
*urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
3368 struct USB_EP_Desc
* ep_desc
;
3373 ASSERT(urb_priv
!= NULL
);
3374 ASSERT(usb_pipeint(urb
->pipe
));
3375 /* We can't support interval longer than amount of eof descriptors in
3377 if(urb
->interval
> MAX_INTR_INTERVAL
) {
3378 tc_err("Interrupt interval %dms too big (max: %dms)\n", urb
->interval
,
3383 /* We assume that the SB descriptors already have been setup */
3384 ASSERT(urb_priv
->first_sb
!= NULL
);
3386 /* Round of the interval to 2^n, it is obvious that this code favours
3387 smaller numbers, but that is actually a good thing */
3388 /* FIXME: The "rounding error" for larger intervals will be quite
3389 large. For in traffic this shouldn't be a problem since it will only
3390 mean that we "poll" more often. */
3391 interval
= urb
->interval
;
3392 for (i
= 0; interval
; i
++) {
3393 interval
= interval
>> 1;
3395 urb_priv
->interval
= 1 << (i
- 1);
3397 /* We can only have max interval for Out Interrupt due to that we can only
3398 handle one linked in EP for a certain epid in the Intr descr array at the
3399 time. The USB Controller in the Etrax 100LX continues to process Intr EPs
3400 so we have no way of knowing which one that caused the actual transfer if
3401 we have several linked in. */
3402 if(usb_pipeout(urb
->pipe
)) {
3403 urb_priv
->interval
= MAX_INTR_INTERVAL
;
3406 /* Calculate amount of EPs needed */
3407 ep_count
= MAX_INTR_INTERVAL
/ urb_priv
->interval
;
3409 for(i
= 0; i
< ep_count
; i
++) {
3410 ep_desc
= create_ep(urb_priv
->epid
, urb_priv
->first_sb
, mem_flags
);
3411 if(ep_desc
== NULL
) {
3412 /* Free any descriptors that we may have allocated before failure */
3415 kfree(urb_priv
->intr_ep_pool
[i
]);
3419 urb_priv
->intr_ep_pool
[i
] = ep_desc
;
3421 urb_priv
->intr_ep_pool_length
= ep_count
;
3425 /* DMA RX/TX functions */
3426 /* ----------------------- */
3428 static void tc_dma_init_rx_list(void) {
3431 /* Setup descriptor list except last one */
3432 for (i
= 0; i
< (NBR_OF_RX_DESC
- 1); i
++) {
3433 RxDescList
[i
].sw_len
= RX_DESC_BUF_SIZE
;
3434 RxDescList
[i
].command
= 0;
3435 RxDescList
[i
].next
= virt_to_phys(&RxDescList
[i
+ 1]);
3436 RxDescList
[i
].buf
= virt_to_phys(RxBuf
+ (i
* RX_DESC_BUF_SIZE
));
3437 RxDescList
[i
].hw_len
= 0;
3438 RxDescList
[i
].status
= 0;
3440 /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
3441 USB_IN_Desc for the relevant fields.) */
3442 prepare_rx_descriptor((struct etrax_dma_descr
*)&RxDescList
[i
]);
3445 /* Special handling of last descriptor */
3446 RxDescList
[i
].sw_len
= RX_DESC_BUF_SIZE
;
3447 RxDescList
[i
].command
= IO_STATE(USB_IN_command
, eol
, yes
);
3448 RxDescList
[i
].next
= virt_to_phys(&RxDescList
[0]);
3449 RxDescList
[i
].buf
= virt_to_phys(RxBuf
+ (i
* RX_DESC_BUF_SIZE
));
3450 RxDescList
[i
].hw_len
= 0;
3451 RxDescList
[i
].status
= 0;
3453 /* Setup list pointers that show progress in list */
3454 myNextRxDesc
= &RxDescList
[0];
3455 myLastRxDesc
= &RxDescList
[NBR_OF_RX_DESC
- 1];
3457 flush_etrax_cache();
3458 /* Point DMA to first descriptor in list and start it */
3459 *R_DMA_CH9_FIRST
= virt_to_phys(myNextRxDesc
);
3460 *R_DMA_CH9_CMD
= IO_STATE(R_DMA_CH9_CMD
, cmd
, start
);
3464 static void tc_dma_init_tx_bulk_list(void) {
3466 volatile struct USB_EP_Desc
*epDescr
;
3468 for (i
= 0; i
< (NBR_OF_EPIDS
- 1); i
++) {
3469 epDescr
= &(TxBulkEPList
[i
]);
3470 CHECK_ALIGN(epDescr
);
3471 epDescr
->hw_len
= 0;
3472 epDescr
->command
= IO_FIELD(USB_EP_command
, epid
, i
);
3474 epDescr
->next
= virt_to_phys(&TxBulkEPList
[i
+ 1]);
3476 /* Initiate two EPs, disabled and with the eol flag set. No need for any
3479 /* The first one has the intr flag set so we get an interrupt when the DMA
3480 channel is about to become disabled. */
3481 CHECK_ALIGN(&TxBulkDummyEPList
[i
][0]);
3482 TxBulkDummyEPList
[i
][0].hw_len
= 0;
3483 TxBulkDummyEPList
[i
][0].command
= (IO_FIELD(USB_EP_command
, epid
, DUMMY_EPID
) |
3484 IO_STATE(USB_EP_command
, eol
, yes
) |
3485 IO_STATE(USB_EP_command
, intr
, yes
));
3486 TxBulkDummyEPList
[i
][0].sub
= 0;
3487 TxBulkDummyEPList
[i
][0].next
= virt_to_phys(&TxBulkDummyEPList
[i
][1]);
3489 /* The second one. */
3490 CHECK_ALIGN(&TxBulkDummyEPList
[i
][1]);
3491 TxBulkDummyEPList
[i
][1].hw_len
= 0;
3492 TxBulkDummyEPList
[i
][1].command
= (IO_FIELD(USB_EP_command
, epid
, DUMMY_EPID
) |
3493 IO_STATE(USB_EP_command
, eol
, yes
));
3494 TxBulkDummyEPList
[i
][1].sub
= 0;
3495 /* The last dummy's next pointer is the same as the current EP's next pointer. */
3496 TxBulkDummyEPList
[i
][1].next
= virt_to_phys(&TxBulkEPList
[i
+ 1]);
3499 /* Special handling of last descr in list, make list circular */
3500 epDescr
= &TxBulkEPList
[i
];
3501 CHECK_ALIGN(epDescr
);
3502 epDescr
->hw_len
= 0;
3503 epDescr
->command
= IO_STATE(USB_EP_command
, eol
, yes
) |
3504 IO_FIELD(USB_EP_command
, epid
, i
);
3506 epDescr
->next
= virt_to_phys(&TxBulkEPList
[0]);
3508 /* Init DMA sub-channel pointers to last item in each list */
3509 *R_DMA_CH8_SUB0_EP
= virt_to_phys(&TxBulkEPList
[i
]);
3510 /* No point in starting the bulk channel yet.
3511 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
3514 static void tc_dma_init_tx_ctrl_list(void) {
3516 volatile struct USB_EP_Desc
*epDescr
;
3518 for (i
= 0; i
< (NBR_OF_EPIDS
- 1); i
++) {
3519 epDescr
= &(TxCtrlEPList
[i
]);
3520 CHECK_ALIGN(epDescr
);
3521 epDescr
->hw_len
= 0;
3522 epDescr
->command
= IO_FIELD(USB_EP_command
, epid
, i
);
3524 epDescr
->next
= virt_to_phys(&TxCtrlEPList
[i
+ 1]);
3526 /* Special handling of last descr in list, make list circular */
3527 epDescr
= &TxCtrlEPList
[i
];
3528 CHECK_ALIGN(epDescr
);
3529 epDescr
->hw_len
= 0;
3530 epDescr
->command
= IO_STATE(USB_EP_command
, eol
, yes
) |
3531 IO_FIELD(USB_EP_command
, epid
, i
);
3533 epDescr
->next
= virt_to_phys(&TxCtrlEPList
[0]);
3535 /* Init DMA sub-channel pointers to last item in each list */
3536 *R_DMA_CH8_SUB1_EP
= virt_to_phys(&TxCtrlEPList
[i
]);
3537 /* No point in starting the ctrl channel yet.
3538 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
3542 static void tc_dma_init_tx_intr_list(void) {
3545 TxIntrSB_zout
.sw_len
= 1;
3546 TxIntrSB_zout
.next
= 0;
3547 TxIntrSB_zout
.buf
= virt_to_phys(&zout_buffer
[0]);
3548 TxIntrSB_zout
.command
= (IO_FIELD(USB_SB_command
, rem
, 0) |
3549 IO_STATE(USB_SB_command
, tt
, zout
) |
3550 IO_STATE(USB_SB_command
, full
, yes
) |
3551 IO_STATE(USB_SB_command
, eot
, yes
) |
3552 IO_STATE(USB_SB_command
, eol
, yes
));
3554 for (i
= 0; i
< (MAX_INTR_INTERVAL
- 1); i
++) {
3555 CHECK_ALIGN(&TxIntrEPList
[i
]);
3556 TxIntrEPList
[i
].hw_len
= 0;
3557 TxIntrEPList
[i
].command
=
3558 (IO_STATE(USB_EP_command
, eof
, yes
) |
3559 IO_STATE(USB_EP_command
, enable
, yes
) |
3560 IO_FIELD(USB_EP_command
, epid
, INVALID_EPID
));
3561 TxIntrEPList
[i
].sub
= virt_to_phys(&TxIntrSB_zout
);
3562 TxIntrEPList
[i
].next
= virt_to_phys(&TxIntrEPList
[i
+ 1]);
3565 /* Special handling of last descr in list, make list circular */
3566 CHECK_ALIGN(&TxIntrEPList
[i
]);
3567 TxIntrEPList
[i
].hw_len
= 0;
3568 TxIntrEPList
[i
].command
=
3569 (IO_STATE(USB_EP_command
, eof
, yes
) |
3570 IO_STATE(USB_EP_command
, eol
, yes
) |
3571 IO_STATE(USB_EP_command
, enable
, yes
) |
3572 IO_FIELD(USB_EP_command
, epid
, INVALID_EPID
));
3573 TxIntrEPList
[i
].sub
= virt_to_phys(&TxIntrSB_zout
);
3574 TxIntrEPList
[i
].next
= virt_to_phys(&TxIntrEPList
[0]);
3576 intr_dbg("Initiated Intr EP descriptor list\n");
3579 /* Connect DMA 8 sub-channel 2 to first in list */
3580 *R_DMA_CH8_SUB2_EP
= virt_to_phys(&TxIntrEPList
[0]);
3583 static void tc_dma_init_tx_isoc_list(void) {
3588 /* Read comment at zout_buffer declaration for an explanation to this. */
3589 TxIsocSB_zout
.sw_len
= 1;
3590 TxIsocSB_zout
.next
= 0;
3591 TxIsocSB_zout
.buf
= virt_to_phys(&zout_buffer
[0]);
3592 TxIsocSB_zout
.command
= (IO_FIELD(USB_SB_command
, rem
, 0) |
3593 IO_STATE(USB_SB_command
, tt
, zout
) |
3594 IO_STATE(USB_SB_command
, full
, yes
) |
3595 IO_STATE(USB_SB_command
, eot
, yes
) |
3596 IO_STATE(USB_SB_command
, eol
, yes
));
3598 /* The last isochronous EP descriptor is a dummy. */
3599 for (i
= 0; i
< (NBR_OF_EPIDS
- 1); i
++) {
3600 CHECK_ALIGN(&TxIsocEPList
[i
]);
3601 TxIsocEPList
[i
].hw_len
= 0;
3602 TxIsocEPList
[i
].command
= IO_FIELD(USB_EP_command
, epid
, i
);
3603 TxIsocEPList
[i
].sub
= 0;
3604 TxIsocEPList
[i
].next
= virt_to_phys(&TxIsocEPList
[i
+ 1]);
3607 CHECK_ALIGN(&TxIsocEPList
[i
]);
3608 TxIsocEPList
[i
].hw_len
= 0;
3610 /* Must enable the last EP descr to get eof interrupt. */
3611 TxIsocEPList
[i
].command
= (IO_STATE(USB_EP_command
, enable
, yes
) |
3612 IO_STATE(USB_EP_command
, eof
, yes
) |
3613 IO_STATE(USB_EP_command
, eol
, yes
) |
3614 IO_FIELD(USB_EP_command
, epid
, INVALID_EPID
));
3615 TxIsocEPList
[i
].sub
= virt_to_phys(&TxIsocSB_zout
);
3616 TxIsocEPList
[i
].next
= virt_to_phys(&TxIsocEPList
[0]);
3618 *R_DMA_CH8_SUB3_EP
= virt_to_phys(&TxIsocEPList
[0]);
3619 *R_DMA_CH8_SUB3_CMD
= IO_STATE(R_DMA_CH8_SUB3_CMD
, cmd
, start
);
3622 static int tc_dma_init(struct usb_hcd
*hcd
) {
3623 tc_dma_init_rx_list();
3624 tc_dma_init_tx_bulk_list();
3625 tc_dma_init_tx_ctrl_list();
3626 tc_dma_init_tx_intr_list();
3627 tc_dma_init_tx_isoc_list();
3629 if (cris_request_dma(USB_TX_DMA_NBR
,
3630 "ETRAX 100LX built-in USB (Tx)",
3631 DMA_VERBOSE_ON_ERROR
,
3633 err("Could not allocate DMA ch 8 for USB");
3637 if (cris_request_dma(USB_RX_DMA_NBR
,
3638 "ETRAX 100LX built-in USB (Rx)",
3639 DMA_VERBOSE_ON_ERROR
,
3641 err("Could not allocate DMA ch 9 for USB");
3646 /* Note that these interrupts are not used. */
3647 IO_STATE(R_IRQ_MASK2_SET
, dma8_sub0_descr
, set
) |
3648 /* Sub channel 1 (ctrl) descr. interrupts are used. */
3649 IO_STATE(R_IRQ_MASK2_SET
, dma8_sub1_descr
, set
) |
3650 IO_STATE(R_IRQ_MASK2_SET
, dma8_sub2_descr
, set
) |
3651 /* Sub channel 3 (isoc) descr. interrupts are used. */
3652 IO_STATE(R_IRQ_MASK2_SET
, dma8_sub3_descr
, set
);
3654 /* Note that the dma9_descr interrupt is not used. */
3656 IO_STATE(R_IRQ_MASK2_SET
, dma9_eop
, set
) |
3657 IO_STATE(R_IRQ_MASK2_SET
, dma9_descr
, set
);
3659 if (request_irq(ETRAX_USB_RX_IRQ
, tc_dma_rx_interrupt
, 0,
3660 "ETRAX 100LX built-in USB (Rx)", hcd
)) {
3661 err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ
);
3665 if (request_irq(ETRAX_USB_TX_IRQ
, tc_dma_tx_interrupt
, 0,
3666 "ETRAX 100LX built-in USB (Tx)", hcd
)) {
3667 err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ
);
3674 static void tc_dma_destroy(void) {
3675 free_irq(ETRAX_USB_RX_IRQ
, NULL
);
3676 free_irq(ETRAX_USB_TX_IRQ
, NULL
);
3678 cris_free_dma(USB_TX_DMA_NBR
, "ETRAX 100LX built-in USB (Tx)");
3679 cris_free_dma(USB_RX_DMA_NBR
, "ETRAX 100LX built-in USB (Rx)");
3683 static void tc_dma_link_intr_urb(struct urb
*urb
);
3685 /* Handle processing of Bulk, Ctrl and Intr queues */
3686 static void tc_dma_process_queue(int epid
) {
3688 struct crisv10_urb_priv
*urb_priv
;
3689 unsigned long flags
;
3692 if(epid_state
[epid
].disabled
) {
3693 /* Don't process any URBs on a disabled endpoint */
3697 /* Do not disturb us while fiddling with EPs and epids */
3698 local_irq_save(flags
);
3700 /* For bulk, Ctrl and Intr can we only have one URB active at a time for
3702 if(activeUrbList
[epid
] != NULL
) {
3703 /* An URB is already active on EP, skip checking queue */
3704 local_irq_restore(flags
);
3708 urb
= urb_list_first(epid
);
3710 /* No URB waiting in EP queue. Nothing do to */
3711 local_irq_restore(flags
);
3715 urb_priv
= urb
->hcpriv
;
3716 ASSERT(urb_priv
!= NULL
);
3717 ASSERT(urb_priv
->urb_state
== NOT_STARTED
);
3718 ASSERT(!usb_pipeisoc(urb
->pipe
));
3720 /* Remove this URB from the queue and move it to active */
3721 activeUrbList
[epid
] = urb
;
3722 urb_list_del(urb
, epid
);
3724 urb_priv
->urb_state
= STARTED
;
3726 /* Reset error counters (regardless of which direction this traffic is). */
3727 etrax_epid_clear_error(epid
);
3729 /* Special handling of Intr EP lists */
3730 if(usb_pipeint(urb
->pipe
)) {
3731 tc_dma_link_intr_urb(urb
);
3732 local_irq_restore(flags
);
3736 /* Software must preset the toggle bits for Bulk and Ctrl */
3737 if(usb_pipecontrol(urb
->pipe
)) {
3738 /* Toggle bits are initialized only during setup transaction in a
3740 etrax_epid_set_toggle(epid
, 0, 0);
3741 etrax_epid_set_toggle(epid
, 1, 0);
3743 toggle
= usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
3744 usb_pipeout(urb
->pipe
));
3745 etrax_epid_set_toggle(epid
, usb_pipeout(urb
->pipe
), toggle
);
3748 tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
3749 (unsigned int)urb
, str_dir(urb
->pipe
), str_type(urb
->pipe
), epid
,
3750 sblist_to_str(urb_priv
->first_sb
));
3752 /* We start the DMA sub channel without checking if it's running or not,
3754 1) If it's already running, issuing the start command is a nop.
3755 2) We avoid a test-and-set race condition. */
3756 switch(usb_pipetype(urb
->pipe
)) {
3758 /* Assert that the EP descriptor is disabled. */
3759 ASSERT(!(TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)));
3761 /* Set up and enable the EP descriptor. */
3762 TxBulkEPList
[epid
].sub
= virt_to_phys(urb_priv
->first_sb
);
3763 TxBulkEPList
[epid
].hw_len
= 0;
3764 TxBulkEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
3766 /* Check if the dummy list is already with us (if several urbs were queued). */
3767 if (usb_pipein(urb
->pipe
) && (TxBulkEPList
[epid
].next
!= virt_to_phys(&TxBulkDummyEPList
[epid
][0]))) {
3768 tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
3769 (unsigned long)urb
, epid
);
3771 /* We don't need to check if the DMA is at this EP or not before changing the
3772 next pointer, since we will do it in one 32-bit write (EP descriptors are
3774 TxBulkEPList
[epid
].next
= virt_to_phys(&TxBulkDummyEPList
[epid
][0]);
3777 restart_dma8_sub0();
3779 /* Update/restart the bulk start timer since we just started the channel.*/
3780 mod_timer(&bulk_start_timer
, jiffies
+ BULK_START_TIMER_INTERVAL
);
3781 /* Update/restart the bulk eot timer since we just inserted traffic. */
3782 mod_timer(&bulk_eot_timer
, jiffies
+ BULK_EOT_TIMER_INTERVAL
);
3785 /* Assert that the EP descriptor is disabled. */
3786 ASSERT(!(TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)));
3788 /* Set up and enable the EP descriptor. */
3789 TxCtrlEPList
[epid
].sub
= virt_to_phys(urb_priv
->first_sb
);
3790 TxCtrlEPList
[epid
].hw_len
= 0;
3791 TxCtrlEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
3793 *R_DMA_CH8_SUB1_CMD
= IO_STATE(R_DMA_CH8_SUB1_CMD
, cmd
, start
);
3796 local_irq_restore(flags
);
3799 static void tc_dma_link_intr_urb(struct urb
*urb
) {
3800 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
3801 volatile struct USB_EP_Desc
*tmp_ep
;
3802 struct USB_EP_Desc
*ep_desc
;
3806 ASSERT(urb_priv
!= NULL
);
3807 epid
= urb_priv
->epid
;
3808 ASSERT(urb_priv
->interval
> 0);
3809 ASSERT(urb_priv
->intr_ep_pool_length
> 0);
3811 tmp_ep
= &TxIntrEPList
[0];
3813 /* Only insert one EP descriptor in list for Out Intr URBs.
3814 We can only handle Out Intr with interval of 128ms because
3815 it's not possible to insert several Out Intr EPs because they
3816 are not consumed by the DMA. */
3817 if(usb_pipeout(urb
->pipe
)) {
3818 ep_desc
= urb_priv
->intr_ep_pool
[0];
3820 ep_desc
->next
= tmp_ep
->next
;
3821 tmp_ep
->next
= virt_to_phys(ep_desc
);
3824 /* Loop through Intr EP descriptor list and insert EP for URB at
3825 specified interval */
3827 /* Each EP descriptor with eof flag sat signals a new frame */
3828 if (tmp_ep
->command
& IO_MASK(USB_EP_command
, eof
)) {
3829 /* Insert a EP from URBs EP pool at correct interval */
3830 if ((i
% urb_priv
->interval
) == 0) {
3831 ep_desc
= urb_priv
->intr_ep_pool
[pool_idx
];
3833 ep_desc
->next
= tmp_ep
->next
;
3834 tmp_ep
->next
= virt_to_phys(ep_desc
);
3836 ASSERT(pool_idx
<= urb_priv
->intr_ep_pool_length
);
3840 tmp_ep
= (struct USB_EP_Desc
*)phys_to_virt(tmp_ep
->next
);
3841 } while(tmp_ep
!= &TxIntrEPList
[0]);
3844 intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid
,
3845 sblist_to_str(urb_priv
->first_sb
), urb_priv
->interval
, pool_idx
);
3847 /* We start the DMA sub channel without checking if it's running or not,
3849 1) If it's already running, issuing the start command is a nop.
3850 2) We avoid a test-and-set race condition. */
3851 *R_DMA_CH8_SUB2_CMD
= IO_STATE(R_DMA_CH8_SUB2_CMD
, cmd
, start
);
3854 static void tc_dma_process_isoc_urb(struct urb
*urb
) {
3855 unsigned long flags
;
3856 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
3859 /* Do not disturb us while fiddling with EPs and epids */
3860 local_irq_save(flags
);
3863 ASSERT(urb_priv
->first_sb
);
3864 epid
= urb_priv
->epid
;
3866 if(activeUrbList
[epid
] == NULL
) {
3867 /* EP is idle, so make this URB active */
3868 activeUrbList
[epid
] = urb
;
3869 urb_list_del(urb
, epid
);
3870 ASSERT(TxIsocEPList
[epid
].sub
== 0);
3871 ASSERT(!(TxIsocEPList
[epid
].command
&
3872 IO_STATE(USB_EP_command
, enable
, yes
)));
3874 /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
3875 if(usb_pipein(urb
->pipe
)) {
3876 /* Each EP for In Isoc will have only one SB descriptor, setup when
3877 submitting the first active urb. We do it here by copying from URBs
3878 pre-allocated SB. */
3879 memcpy((void *)&(TxIsocSBList
[epid
]), urb_priv
->first_sb
,
3880 sizeof(TxIsocSBList
[epid
]));
3881 TxIsocEPList
[epid
].hw_len
= 0;
3882 TxIsocEPList
[epid
].sub
= virt_to_phys(&(TxIsocSBList
[epid
]));
3884 /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
3885 TxIsocEPList
[epid
].hw_len
= 0;
3886 TxIsocEPList
[epid
].sub
= virt_to_phys(urb_priv
->first_sb
);
3888 isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
3890 (unsigned int)urb
, urb_priv
->urb_num
, epid
,
3891 (unsigned int)(urb_priv
->first_sb
),
3892 (unsigned int)(urb_priv
->last_sb
));
3895 if (urb
->transfer_flags
& URB_ISO_ASAP
) {
3896 /* The isoc transfer should be started as soon as possible. The
3897 start_frame field is a return value if URB_ISO_ASAP was set. Comparing
3898 R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
3899 token is sent 2 frames later. I'm not sure how this affects usage of
3900 the start_frame field by the device driver, or how it affects things
3901 when USB_ISO_ASAP is not set, so therefore there's no compensation for
3902 the 2 frame "lag" here. */
3903 urb
->start_frame
= (*R_USB_FM_NUMBER
& 0x7ff);
3904 TxIsocEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
3905 urb_priv
->urb_state
= STARTED
;
3906 isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
3909 /* Not started yet. */
3910 urb_priv
->urb_state
= NOT_STARTED
;
3911 isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
3916 /* An URB is already active on the EP. Leave URB in queue and let
3917 finish_isoc_urb process it after current active URB */
3918 ASSERT(TxIsocEPList
[epid
].sub
!= 0);
3920 if(usb_pipein(urb
->pipe
)) {
3921 /* Because there already is a active In URB on this epid we do nothing
3922 and the finish_isoc_urb() function will handle switching to next URB*/
3924 } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
3925 struct USB_SB_Desc
*temp_sb_desc
;
3927 /* Set state STARTED to all Out Isoc URBs added to SB list because we
3928 don't know how many of them that are finished before descr interrupt*/
3929 urb_priv
->urb_state
= STARTED
;
3931 /* Find end of current SB list by looking for SB with eol flag sat */
3932 temp_sb_desc
= phys_to_virt(TxIsocEPList
[epid
].sub
);
3933 while ((temp_sb_desc
->command
& IO_MASK(USB_SB_command
, eol
)) !=
3934 IO_STATE(USB_SB_command
, eol
, yes
)) {
3935 ASSERT(temp_sb_desc
->next
);
3936 temp_sb_desc
= phys_to_virt(temp_sb_desc
->next
);
3939 isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
3940 " sub:0x%x eol:0x%x\n",
3941 (unsigned int)urb
, urb_priv
->urb_num
,
3942 (unsigned int)(urb_priv
->first_sb
),
3943 (unsigned int)(urb_priv
->last_sb
), epid
,
3944 (unsigned int)phys_to_virt(TxIsocEPList
[epid
].sub
),
3945 (unsigned int)temp_sb_desc
);
3947 /* Next pointer must be set before eol is removed. */
3948 temp_sb_desc
->next
= virt_to_phys(urb_priv
->first_sb
);
3949 /* Clear the previous end of list flag since there is a new in the
3950 added SB descriptor list. */
3951 temp_sb_desc
->command
&= ~IO_MASK(USB_SB_command
, eol
);
3953 if (!(TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
))) {
3955 /* 8.8.5 in Designer's Reference says we should check for and correct
3956 any errors in the EP here. That should not be necessary if
3957 epid_attn is handled correctly, so we assume all is ok. */
3958 epid_data
= etrax_epid_iso_get(epid
);
3959 if (IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
) !=
3960 IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
3961 isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
3963 IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
), epid
,
3964 (unsigned int)urb
, urb_priv
->urb_num
);
3967 /* The SB list was exhausted. */
3968 if (virt_to_phys(urb_priv
->last_sb
) != TxIsocEPList
[epid
].sub
) {
3969 /* The new sublist did not get processed before the EP was
3970 disabled. Setup the EP again. */
3972 if(virt_to_phys(temp_sb_desc
) == TxIsocEPList
[epid
].sub
) {
3973 isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
3974 ", restarting from this URBs SB:0x%x\n",
3975 epid
, (unsigned int)temp_sb_desc
,
3976 (unsigned int)(urb_priv
->first_sb
));
3977 TxIsocEPList
[epid
].hw_len
= 0;
3978 TxIsocEPList
[epid
].sub
= virt_to_phys(urb_priv
->first_sb
);
3979 urb
->start_frame
= (*R_USB_FM_NUMBER
& 0x7ff);
3980 /* Enable the EP again so data gets processed this time */
3981 TxIsocEPList
[epid
].command
|=
3982 IO_STATE(USB_EP_command
, enable
, yes
);
3985 /* The EP has been disabled but not at end this URB (god knows
3986 where). This should generate an epid_attn so we should not be
3988 isoc_warn("EP was disabled on sb:0x%x before SB list for"
3989 " URB:0x%x[%d] got processed\n",
3990 (unsigned int)phys_to_virt(TxIsocEPList
[epid
].sub
),
3991 (unsigned int)urb
, urb_priv
->urb_num
);
3994 /* This might happend if we are slow on this function and isn't
3996 isoc_dbg("EP was disabled and finished with SBs from appended"
3997 " URB:0x%x[%d]\n", (unsigned int)urb
, urb_priv
->urb_num
);
4003 /* Start the DMA sub channel */
4004 *R_DMA_CH8_SUB3_CMD
= IO_STATE(R_DMA_CH8_SUB3_CMD
, cmd
, start
);
4006 local_irq_restore(flags
);
4009 static void tc_dma_unlink_intr_urb(struct urb
*urb
) {
4010 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
4011 volatile struct USB_EP_Desc
*first_ep
; /* First EP in the list. */
4012 volatile struct USB_EP_Desc
*curr_ep
; /* Current EP, the iterator. */
4013 volatile struct USB_EP_Desc
*next_ep
; /* The EP after current. */
4014 volatile struct USB_EP_Desc
*unlink_ep
; /* The one we should remove from
4017 volatile int timeout
= 10000;
4020 /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
4023 ASSERT(urb_priv
->intr_ep_pool_length
> 0);
4024 epid
= urb_priv
->epid
;
4026 /* First disable all Intr EPs belonging to epid for this URB */
4027 first_ep
= &TxIntrEPList
[0];
4030 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
4031 if (IO_EXTRACT(USB_EP_command
, epid
, next_ep
->command
) == epid
) {
4033 next_ep
->command
&= ~IO_MASK(USB_EP_command
, enable
);
4035 curr_ep
= phys_to_virt(curr_ep
->next
);
4036 } while (curr_ep
!= first_ep
);
4039 /* Now unlink all EPs belonging to this epid from Descr list */
4040 first_ep
= &TxIntrEPList
[0];
4043 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
4044 if (IO_EXTRACT(USB_EP_command
, epid
, next_ep
->command
) == epid
) {
4045 /* This is the one we should unlink. */
4046 unlink_ep
= next_ep
;
4048 /* Actually unlink the EP from the DMA list. */
4049 curr_ep
->next
= unlink_ep
->next
;
4051 /* Wait until the DMA is no longer at this descriptor. */
4052 while((*R_DMA_CH8_SUB2_EP
== virt_to_phys(unlink_ep
)) &&
4055 warn("Timeout while waiting for DMA-TX-Intr to leave unlink EP\n");
4060 curr_ep
= phys_to_virt(curr_ep
->next
);
4061 } while (curr_ep
!= first_ep
);
4063 if(count
!= urb_priv
->intr_ep_pool_length
) {
4064 intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count
,
4065 urb_priv
->intr_ep_pool_length
, (unsigned int)urb
,
4068 intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count
,
4069 urb_priv
->intr_ep_pool_length
, (unsigned int)urb
);
4073 static void check_finished_bulk_tx_epids(struct usb_hcd
*hcd
,
4075 unsigned long flags
;
4078 struct crisv10_urb_priv
* urb_priv
;
4081 /* Protect TxEPList */
4082 local_irq_save(flags
);
4084 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4085 /* A finished EP descriptor is disabled and has a valid sub pointer */
4086 if (!(TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) &&
4087 (TxBulkEPList
[epid
].sub
!= 0)) {
4089 /* Get the active URB for this epid */
4090 urb
= activeUrbList
[epid
];
4093 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4096 /* Only handle finished out Bulk EPs here,
4097 and let RX interrupt take care of the rest */
4098 if(!epid_out_traffic(epid
)) {
4103 tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
4104 epid_out_traffic(epid
) ? "Out" : "In", epid
, (unsigned int)urb
,
4107 tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
4108 epid_out_traffic(epid
) ? "Out" : "In", epid
, (unsigned int)urb
,
4112 if(urb_priv
->urb_state
== UNLINK
) {
4113 /* This Bulk URB is requested to be unlinked, that means that the EP
4114 has been disabled and we might not have sent all data */
4115 tc_finish_urb(hcd
, urb
, urb
->status
);
4119 ASSERT(urb_priv
->urb_state
== STARTED
);
4120 if (phys_to_virt(TxBulkEPList
[epid
].sub
) != urb_priv
->last_sb
) {
4121 tc_err("Endpoint got disabled before reaching last sb\n");
4124 epid_data
= etrax_epid_get(epid
);
4125 if (IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
) ==
4126 IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
4127 /* This means that the endpoint has no error, is disabled
4128 and had inserted traffic, i.e. transfer successfully completed. */
4129 tc_finish_urb(hcd
, urb
, 0);
4131 /* Shouldn't happen. We expect errors to be caught by epid
4133 tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
4134 epid
, IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
));
4137 tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid
);
4141 local_irq_restore(flags
);
4144 static void check_finished_ctrl_tx_epids(struct usb_hcd
*hcd
) {
4145 unsigned long flags
;
4148 struct crisv10_urb_priv
* urb_priv
;
4151 /* Protect TxEPList */
4152 local_irq_save(flags
);
4154 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4155 if(epid
== DUMMY_EPID
)
4158 /* A finished EP descriptor is disabled and has a valid sub pointer */
4159 if (!(TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) &&
4160 (TxCtrlEPList
[epid
].sub
!= 0)) {
4162 /* Get the active URB for this epid */
4163 urb
= activeUrbList
[epid
];
4166 tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid
);
4171 ASSERT(usb_pipein(urb
->pipe
));
4172 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4174 if (phys_to_virt(TxCtrlEPList
[epid
].sub
) != urb_priv
->last_sb
) {
4175 tc_err("Endpoint got disabled before reaching last sb\n");
4178 epid_data
= etrax_epid_get(epid
);
4179 if (IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
) ==
4180 IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
4181 /* This means that the endpoint has no error, is disabled
4182 and had inserted traffic, i.e. transfer successfully completed. */
4184 /* Check if RX-interrupt for In Ctrl has been processed before
4185 finishing the URB */
4186 if(urb_priv
->ctrl_rx_done
) {
4187 tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
4188 (unsigned int)urb
, urb_priv
->urb_num
);
4189 tc_finish_urb(hcd
, urb
, 0);
4191 /* If we get zout descriptor interrupt before RX was done for a
4192 In Ctrl transfer, then we flag that and it will be finished
4193 in the RX-Interrupt */
4194 urb_priv
->ctrl_zout_done
= 1;
4195 tc_dbg("Got zout descr interrupt before RX interrupt\n");
4198 /* Shouldn't happen. We expect errors to be caught by epid
4200 tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid
, (unsigned int)urb
, urb_priv
->urb_num
, IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
));
4201 __dump_ep_desc(&(TxCtrlEPList
[epid
]));
4202 __dump_ept_data(epid
);
4206 local_irq_restore(flags
);
4209 /* This function goes through all epids that are setup for Out Isoc transfers
4210 and marks (isoc_out_done) all queued URBs that the DMA has finished
4212 No URB completetion is done here to make interrupt routine return quickly.
4213 URBs are completed later with help of complete_isoc_bottom_half() that
4214 becomes schedules when this functions is finished. */
4215 static void check_finished_isoc_tx_epids(void) {
4216 unsigned long flags
;
4219 struct crisv10_urb_priv
* urb_priv
;
4220 struct USB_SB_Desc
* sb_desc
;
4223 /* Protect TxIsocEPList */
4224 local_irq_save(flags
);
4226 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4227 if (TxIsocEPList
[epid
].sub
== 0 || epid
== INVALID_EPID
||
4228 !epid_out_traffic(epid
)) {
4229 /* Nothing here to see. */
4232 ASSERT(epid_inuse(epid
));
4233 ASSERT(epid_isoc(epid
));
4235 sb_desc
= phys_to_virt(TxIsocEPList
[epid
].sub
);
4236 /* Find the last descriptor of the currently active URB for this ep.
4237 This is the first descriptor in the sub list marked for a descriptor
4239 while (sb_desc
&& !IO_EXTRACT(USB_SB_command
, intr
, sb_desc
->command
)) {
4240 sb_desc
= sb_desc
->next
? phys_to_virt(sb_desc
->next
) : 0;
4244 isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
4245 epid
, (unsigned int)phys_to_virt(TxIsocEPList
[epid
].sub
),
4246 (unsigned int)sb_desc
);
4248 urb
= activeUrbList
[epid
];
4250 isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid
);
4255 while(urb
&& !epid_done
) {
4257 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
);
4258 ASSERT(usb_pipeout(urb
->pipe
));
4260 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4262 ASSERT(urb_priv
->urb_state
== STARTED
||
4263 urb_priv
->urb_state
== UNLINK
);
4265 if (sb_desc
!= urb_priv
->last_sb
) {
4266 /* This urb has been sent. */
4267 urb_priv
->isoc_out_done
= 1;
4269 } else { /* Found URB that has last_sb as the interrupt reason */
4271 /* Check if EP has been disabled, meaning that all transfers are done*/
4272 if(!(TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
))) {
4273 ASSERT((sb_desc
->command
& IO_MASK(USB_SB_command
, eol
)) ==
4274 IO_STATE(USB_SB_command
, eol
, yes
));
4275 ASSERT(sb_desc
->next
== 0);
4276 urb_priv
->isoc_out_done
= 1;
4278 isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
4279 (unsigned int)urb
, urb_priv
->urb_num
);
4281 /* Stop looking any further in queue */
4286 if(urb
== activeUrbList
[epid
]) {
4287 urb
= urb_list_first(epid
);
4289 urb
= urb_list_next(urb
, epid
);
4292 } /* END: while(urb && !epid_done) */
4295 local_irq_restore(flags
);
4299 /* This is where the Out Isoc URBs are realy completed. This function is
4300 scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
4301 are done. This functions completes all URBs earlier marked with
4302 isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
4304 static void complete_isoc_bottom_half(struct work_struct
* work
) {
4305 struct crisv10_isoc_complete_data
*comp_data
;
4306 struct usb_iso_packet_descriptor
*packet
;
4307 struct crisv10_urb_priv
* urb_priv
;
4308 unsigned long flags
;
4314 comp_data
= container_of(work
, struct crisv10_isoc_complete_data
, usb_bh
);
4316 local_irq_save(flags
);
4318 for (epid
= 0; epid
< NBR_OF_EPIDS
- 1; epid
++) {
4319 if(!epid_inuse(epid
) || !epid_isoc(epid
) || !epid_out_traffic(epid
) || epid
== DUMMY_EPID
) {
4320 /* Only check valid Out Isoc epids */
4324 isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid
,
4325 (unsigned int)phys_to_virt(TxIsocEPList
[epid
].sub
));
4327 /* The descriptor interrupt handler has marked all transmitted Out Isoc
4328 URBs with isoc_out_done. Now we traverse all epids and for all that
4329 have out Isoc traffic we traverse its URB list and complete the
4330 transmitted URBs. */
4332 while (!epid_done
) {
4334 /* Get the active urb (if any) */
4335 urb
= activeUrbList
[epid
];
4337 isoc_dbg("No active URB on epid:%d anymore\n", epid
);
4343 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
);
4344 ASSERT(usb_pipeout(urb
->pipe
));
4346 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4349 if (!(urb_priv
->isoc_out_done
)) {
4350 /* We have reached URB that isn't flaged done yet, stop traversing. */
4351 isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
4352 " before not yet flaged URB:0x%x[%d]\n",
4353 epid
, (unsigned int)urb
, urb_priv
->urb_num
);
4358 /* This urb has been sent. */
4359 isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
4360 (unsigned int)urb
, urb_priv
->urb_num
);
4362 /* Set ok on transfered packets for this URB and finish it */
4363 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
4364 packet
= &urb
->iso_frame_desc
[i
];
4366 packet
->actual_length
= packet
->length
;
4368 urb_priv
->isoc_packet_counter
= urb
->number_of_packets
;
4369 tc_finish_urb(comp_data
->hcd
, urb
, 0);
4371 } /* END: while(!epid_done) */
4372 } /* END: for(epid...) */
4374 local_irq_restore(flags
);
4375 kmem_cache_free(isoc_compl_cache
, comp_data
);
4379 static void check_finished_intr_tx_epids(struct usb_hcd
*hcd
) {
4380 unsigned long flags
;
4383 struct crisv10_urb_priv
* urb_priv
;
4384 volatile struct USB_EP_Desc
*curr_ep
; /* Current EP, the iterator. */
4385 volatile struct USB_EP_Desc
*next_ep
; /* The EP after current. */
4387 /* Protect TxintrEPList */
4388 local_irq_save(flags
);
4390 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4391 if(!epid_inuse(epid
) || !epid_intr(epid
) || !epid_out_traffic(epid
)) {
4392 /* Nothing to see on this epid. Only check valid Out Intr epids */
4396 urb
= activeUrbList
[epid
];
4398 intr_warn("Found Out Intr epid:%d with no active URB\n", epid
);
4403 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
);
4404 ASSERT(usb_pipeout(urb
->pipe
));
4406 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4409 /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
4411 curr_ep
= &TxIntrEPList
[0];
4413 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
4414 if(next_ep
== urb_priv
->intr_ep_pool
[0]) {
4415 /* We found the Out Intr EP for this epid */
4417 /* Disable it so it doesn't get processed again */
4418 next_ep
->command
&= ~IO_MASK(USB_EP_command
, enable
);
4420 /* Finish the active Out Intr URB with status OK */
4421 tc_finish_urb(hcd
, urb
, 0);
4423 curr_ep
= phys_to_virt(curr_ep
->next
);
4424 } while (curr_ep
!= &TxIntrEPList
[1]);
4427 local_irq_restore(flags
);
4430 /* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
4431 static irqreturn_t
tc_dma_tx_interrupt(int irq
, void *vhc
) {
4432 struct usb_hcd
*hcd
= (struct usb_hcd
*)vhc
;
4435 if (*R_IRQ_READ2
& IO_MASK(R_IRQ_READ2
, dma8_sub0_descr
)) {
4436 /* Clear this interrupt */
4437 *R_DMA_CH8_SUB0_CLR_INTR
= IO_STATE(R_DMA_CH8_SUB0_CLR_INTR
, clr_descr
, do);
4438 restart_dma8_sub0();
4441 if (*R_IRQ_READ2
& IO_MASK(R_IRQ_READ2
, dma8_sub1_descr
)) {
4442 /* Clear this interrupt */
4443 *R_DMA_CH8_SUB1_CLR_INTR
= IO_STATE(R_DMA_CH8_SUB1_CLR_INTR
, clr_descr
, do);
4444 check_finished_ctrl_tx_epids(hcd
);
4447 if (*R_IRQ_READ2
& IO_MASK(R_IRQ_READ2
, dma8_sub2_descr
)) {
4448 /* Clear this interrupt */
4449 *R_DMA_CH8_SUB2_CLR_INTR
= IO_STATE(R_DMA_CH8_SUB2_CLR_INTR
, clr_descr
, do);
4450 check_finished_intr_tx_epids(hcd
);
4453 if (*R_IRQ_READ2
& IO_MASK(R_IRQ_READ2
, dma8_sub3_descr
)) {
4454 struct crisv10_isoc_complete_data
* comp_data
;
4456 /* Flag done Out Isoc for later completion */
4457 check_finished_isoc_tx_epids();
4459 /* Clear this interrupt */
4460 *R_DMA_CH8_SUB3_CLR_INTR
= IO_STATE(R_DMA_CH8_SUB3_CLR_INTR
, clr_descr
, do);
4461 /* Schedule bottom half of Out Isoc completion function. This function
4462 finishes the URBs marked with isoc_out_done */
4463 comp_data
= (struct crisv10_isoc_complete_data
*)
4464 kmem_cache_alloc(isoc_compl_cache
, GFP_ATOMIC
);
4465 ASSERT(comp_data
!= NULL
);
4466 comp_data
->hcd
= hcd
;
4468 INIT_WORK(&comp_data
->usb_bh
, complete_isoc_bottom_half
);
4469 schedule_work(&comp_data
->usb_bh
);
4475 /* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
4476 static irqreturn_t
tc_dma_rx_interrupt(int irq
, void *vhc
) {
4477 unsigned long flags
;
4479 struct usb_hcd
*hcd
= (struct usb_hcd
*)vhc
;
4480 struct crisv10_urb_priv
*urb_priv
;
4486 /* Clear this interrupt. */
4487 *R_DMA_CH9_CLR_INTR
= IO_STATE(R_DMA_CH9_CLR_INTR
, clr_eop
, do);
4489 /* Custom clear interrupt for this interrupt */
4490 /* The reason we cli here is that we call the driver's callback functions. */
4491 local_irq_save(flags
);
4493 /* Note that this while loop assumes that all packets span only
4494 one rx descriptor. */
4495 while(myNextRxDesc
->status
& IO_MASK(USB_IN_status
, eop
)) {
4496 epid
= IO_EXTRACT(USB_IN_status
, epid
, myNextRxDesc
->status
);
4497 /* Get the active URB for this epid */
4498 urb
= activeUrbList
[epid
];
4500 ASSERT(epid_inuse(epid
));
4502 dma_err("No urb for epid %d in rx interrupt\n", epid
);
4506 /* Check if any errors on epid */
4508 if (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, error
)) {
4509 __u32 r_usb_ept_data
;
4511 if (usb_pipeisoc(urb
->pipe
)) {
4512 r_usb_ept_data
= etrax_epid_iso_get(epid
);
4513 if((r_usb_ept_data
& IO_MASK(R_USB_EPT_DATA_ISO
, valid
)) &&
4514 (IO_EXTRACT(R_USB_EPT_DATA_ISO
, error_code
, r_usb_ept_data
) == 0) &&
4515 (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, nodata
))) {
4516 /* Not an error, just a failure to receive an expected iso
4517 in packet in this frame. This is not documented
4518 in the designers reference. Continue processing.
4520 } else real_error
= 1;
4521 } else real_error
= 1;
4525 dma_err("Error in RX descr on epid:%d for URB 0x%x",
4526 epid
, (unsigned int)urb
);
4527 dump_ept_data(epid
);
4528 dump_in_desc(myNextRxDesc
);
4532 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4534 ASSERT(urb_priv
->urb_state
== STARTED
||
4535 urb_priv
->urb_state
== UNLINK
);
4537 if ((usb_pipetype(urb
->pipe
) == PIPE_BULK
) ||
4538 (usb_pipetype(urb
->pipe
) == PIPE_CONTROL
) ||
4539 (usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
)) {
4541 /* We get nodata for empty data transactions, and the rx descriptor's
4542 hw_len field is not valid in that case. No data to copy in other
4544 if (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, nodata
)) {
4545 /* No data to copy */
4548 dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
4549 (unsigned int)urb, epid, myNextRxDesc->hw_len,
4550 urb_priv->rx_offset);
4552 /* Only copy data if URB isn't flaged to be unlinked*/
4553 if(urb_priv
->urb_state
!= UNLINK
) {
4554 /* Make sure the data fits in the buffer. */
4555 if(urb_priv
->rx_offset
+ myNextRxDesc
->hw_len
4556 <= urb
->transfer_buffer_length
) {
4558 /* Copy the data to URBs buffer */
4559 memcpy(urb
->transfer_buffer
+ urb_priv
->rx_offset
,
4560 phys_to_virt(myNextRxDesc
->buf
), myNextRxDesc
->hw_len
);
4561 urb_priv
->rx_offset
+= myNextRxDesc
->hw_len
;
4563 /* Signal overflow when returning URB */
4564 urb
->status
= -EOVERFLOW
;
4565 tc_finish_urb_later(hcd
, urb
, urb
->status
);
4570 /* Check if it was the last packet in the transfer */
4571 if (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, eot
)) {
4572 /* Special handling for In Ctrl URBs. */
4573 if(usb_pipecontrol(urb
->pipe
) && usb_pipein(urb
->pipe
) &&
4574 !(urb_priv
->ctrl_zout_done
)) {
4575 /* Flag that RX part of Ctrl transfer is done. Because zout descr
4576 interrupt hasn't happend yet will the URB be finished in the
4578 urb_priv
->ctrl_rx_done
= 1;
4579 tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
4580 " for zout\n", (unsigned int)urb
);
4582 tc_finish_urb(hcd
, urb
, 0);
4585 } else { /* ISOC RX */
4587 isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
4588 epid, (unsigned int)urb);
4591 struct usb_iso_packet_descriptor
*packet
;
4593 if (urb_priv
->urb_state
== UNLINK
) {
4594 isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
4596 } else if (urb_priv
->urb_state
== NOT_STARTED
) {
4597 isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
4601 packet
= &urb
->iso_frame_desc
[urb_priv
->isoc_packet_counter
];
4605 if (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, nodata
)) {
4606 /* We get nodata for empty data transactions, and the rx descriptor's
4607 hw_len field is not valid in that case. We copy 0 bytes however to
4609 packet
->actual_length
= 0;
4611 packet
->actual_length
= myNextRxDesc
->hw_len
;
4612 /* Make sure the data fits in the buffer. */
4613 ASSERT(packet
->actual_length
<= packet
->length
);
4614 memcpy(urb
->transfer_buffer
+ packet
->offset
,
4615 phys_to_virt(myNextRxDesc
->buf
), packet
->actual_length
);
4616 if(packet
->actual_length
> 0)
4617 isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
4618 packet
->actual_length
, urb_priv
->isoc_packet_counter
,
4619 (unsigned int)urb
, urb_priv
->urb_num
);
4622 /* Increment the packet counter. */
4623 urb_priv
->isoc_packet_counter
++;
4625 /* Note that we don't care about the eot field in the rx descriptor's
4626 status. It will always be set for isoc traffic. */
4627 if (urb
->number_of_packets
== urb_priv
->isoc_packet_counter
) {
4628 /* Complete the urb with status OK. */
4629 tc_finish_urb(hcd
, urb
, 0);
4634 myNextRxDesc
->status
= 0;
4635 myNextRxDesc
->command
|= IO_MASK(USB_IN_command
, eol
);
4636 myLastRxDesc
->command
&= ~IO_MASK(USB_IN_command
, eol
);
4637 myLastRxDesc
= myNextRxDesc
;
4638 myNextRxDesc
= phys_to_virt(myNextRxDesc
->next
);
4639 flush_etrax_cache();
4640 *R_DMA_CH9_CMD
= IO_STATE(R_DMA_CH9_CMD
, cmd
, restart
);
4643 local_irq_restore(flags
);
4648 static void tc_bulk_start_timer_func(unsigned long dummy
) {
4649 /* We might enable an EP descriptor behind the current DMA position when
4650 it's about to decide that there are no more bulk traffic and it should
4651 stop the bulk channel.
4652 Therefore we periodically check if the bulk channel is stopped and there
4653 is an enabled bulk EP descriptor, in which case we start the bulk
4656 if (!(*R_DMA_CH8_SUB0_CMD
& IO_MASK(R_DMA_CH8_SUB0_CMD
, cmd
))) {
4659 timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
4661 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4662 if (TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
4663 timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
4665 restart_dma8_sub0();
4667 /* Restart the bulk eot timer since we just started the bulk channel.*/
4668 mod_timer(&bulk_eot_timer
, jiffies
+ BULK_EOT_TIMER_INTERVAL
);
4670 /* No need to search any further. */
4675 timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
4679 static void tc_bulk_eot_timer_func(unsigned long dummy
) {
4680 struct usb_hcd
*hcd
= (struct usb_hcd
*)dummy
;
4682 /* Because of a race condition in the top half, we might miss a bulk eot.
4683 This timer "simulates" a bulk eot if we don't get one for a while,
4684 hopefully correcting the situation. */
4685 timer_dbg("bulk_eot_timer timed out.\n");
4686 check_finished_bulk_tx_epids(hcd
, 1);
4690 /*************************************************************/
4691 /*************************************************************/
4692 /* Device driver block */
4693 /*************************************************************/
4694 /*************************************************************/
4696 /* Forward declarations for device driver functions */
4697 static int devdrv_hcd_probe(struct device
*);
4698 static int devdrv_hcd_remove(struct device
*);
4700 static int devdrv_hcd_suspend(struct device
*, u32
, u32
);
4701 static int devdrv_hcd_resume(struct device
*, u32
);
4702 #endif /* CONFIG_PM */
4705 static struct platform_device
*devdrv_hc_platform_device
;
4707 /* device driver interface */
4708 static struct device_driver devdrv_hc_device_driver
= {
4709 .name
= (char *) hc_name
,
4710 .bus
= &platform_bus_type
,
4712 .probe
= devdrv_hcd_probe
,
4713 .remove
= devdrv_hcd_remove
,
4716 .suspend
= devdrv_hcd_suspend
,
4717 .resume
= devdrv_hcd_resume
,
4718 #endif /* CONFIG_PM */
4721 /* initialize the host controller and driver */
4722 static int __init_or_module
devdrv_hcd_probe(struct device
*dev
)
4724 struct usb_hcd
*hcd
;
4725 struct crisv10_hcd
*crisv10_hcd
;
4728 /* Check DMA burst length */
4729 if(IO_EXTRACT(R_BUS_CONFIG
, dma_burst
, *R_BUS_CONFIG
) !=
4730 IO_STATE(R_BUS_CONFIG
, dma_burst
, burst32
)) {
4731 devdrv_err("Invalid DMA burst length in Etrax 100LX,"
4732 " needs to be 32\n");
4736 hcd
= usb_create_hcd(&crisv10_hc_driver
, dev
, dev
->bus_id
);
4740 crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
4741 spin_lock_init(&crisv10_hcd
->lock
);
4742 crisv10_hcd
->num_ports
= num_ports();
4743 crisv10_hcd
->running
= 0;
4745 dev_set_drvdata(dev
, crisv10_hcd
);
4747 devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ
,
4748 ETRAX_USB_RX_IRQ
, ETRAX_USB_TX_IRQ
);
4750 /* Print out chip version read from registers */
4751 int rev_maj
= *R_USB_REVISION
& IO_MASK(R_USB_REVISION
, major
);
4752 int rev_min
= *R_USB_REVISION
& IO_MASK(R_USB_REVISION
, minor
);
4754 devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj
);
4756 devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj
, rev_min
);
4759 devdrv_info("Bulk timer interval, start:%d eot:%d\n",
4760 BULK_START_TIMER_INTERVAL
,
4761 BULK_EOT_TIMER_INTERVAL
);
4764 /* Init root hub data structures */
4766 devdrv_err("Failed init data for Root Hub\n");
4770 if(port_in_use(0)) {
4771 if (cris_request_io_interface(if_usb_1
, "ETRAX100LX USB-HCD")) {
4772 printk(KERN_CRIT
"usb-host: request IO interface usb1 failed");
4776 devdrv_info("Claimed interface for USB physical port 1\n");
4778 if(port_in_use(1)) {
4779 if (cris_request_io_interface(if_usb_2
, "ETRAX100LX USB-HCD")) {
4780 /* Free first interface if second failed to be claimed */
4781 if(port_in_use(0)) {
4782 cris_free_io_interface(if_usb_1
);
4784 printk(KERN_CRIT
"usb-host: request IO interface usb2 failed");
4788 devdrv_info("Claimed interface for USB physical port 2\n");
4791 /* Init transfer controller structs and locks */
4792 if((retval
= tc_init(hcd
)) != 0) {
4796 /* Attach interrupt functions for DMA and init DMA controller */
4797 if((retval
= tc_dma_init(hcd
)) != 0) {
4801 /* Attach the top IRQ handler for USB controller interrupts */
4802 if (request_irq(ETRAX_USB_HC_IRQ
, crisv10_hcd_top_irq
, 0,
4803 "ETRAX 100LX built-in USB (HC)", hcd
)) {
4804 err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ
);
4809 /* iso_eof is only enabled when isoc traffic is running. */
4810 *R_USB_IRQ_MASK_SET
=
4811 /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
4812 IO_STATE(R_USB_IRQ_MASK_SET
, bulk_eot
, set
) |
4813 IO_STATE(R_USB_IRQ_MASK_SET
, epid_attn
, set
) |
4814 IO_STATE(R_USB_IRQ_MASK_SET
, port_status
, set
) |
4815 IO_STATE(R_USB_IRQ_MASK_SET
, ctl_status
, set
);
4818 crisv10_ready_wait();
4819 /* Reset the USB interface. */
4821 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
4822 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
4823 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, reset
);
4825 /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
4826 0x2A30 (10800), to guarantee that control traffic gets 10% of the
4827 bandwidth, and periodic transfer may allocate the rest (90%).
4828 This doesn't work though.
4829 The value 11960 is chosen to be just after the SOF token, with a couple
4830 of bit times extra for possible bit stuffing. */
4831 *R_USB_FM_PSTART
= IO_FIELD(R_USB_FM_PSTART
, value
, 11960);
4833 crisv10_ready_wait();
4834 /* Configure the USB interface as a host controller. */
4836 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
4837 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
4838 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_config
);
4841 /* Check so controller not busy before enabling ports */
4842 crisv10_ready_wait();
4844 /* Enable selected USB ports */
4845 if(port_in_use(0)) {
4846 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, no
);
4848 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, yes
);
4850 if(port_in_use(1)) {
4851 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, no
);
4853 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, yes
);
4856 crisv10_ready_wait();
4857 /* Start processing of USB traffic. */
4859 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
4860 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
4861 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
4863 /* Do not continue probing initialization before USB interface is done */
4864 crisv10_ready_wait();
4866 /* Register our Host Controller to USB Core
4867 * Finish the remaining parts of generic HCD initialization: allocate the
4868 * buffers of consistent memory, register the bus
4869 * and call the driver's reset() and start() routines. */
4870 retval
= usb_add_hcd(hcd
, ETRAX_USB_HC_IRQ
, IRQF_DISABLED
);
4872 devdrv_err("Failed registering HCD driver\n");
4879 devdrv_hcd_remove(dev
);
4884 /* cleanup after the host controller and driver */
4885 static int __init_or_module
devdrv_hcd_remove(struct device
*dev
)
4887 struct crisv10_hcd
*crisv10_hcd
= dev_get_drvdata(dev
);
4888 struct usb_hcd
*hcd
;
4892 hcd
= crisv10_hcd_to_hcd(crisv10_hcd
);
4895 /* Stop USB Controller in Etrax 100LX */
4896 crisv10_hcd_reset(hcd
);
4898 usb_remove_hcd(hcd
);
4899 devdrv_dbg("Removed HCD from USB Core\n");
4901 /* Free USB Controller IRQ */
4902 free_irq(ETRAX_USB_HC_IRQ
, NULL
);
4904 /* Free resources */
4909 if(port_in_use(0)) {
4910 cris_free_io_interface(if_usb_1
);
4912 if(port_in_use(1)) {
4913 cris_free_io_interface(if_usb_2
);
4916 devdrv_dbg("Freed all claimed resources\n");
4924 static int devdrv_hcd_suspend(struct usb_hcd
*hcd
, u32 state
, u32 level
)
4926 return 0; /* no-op for now */
4929 static int devdrv_hcd_resume(struct usb_hcd
*hcd
, u32 level
)
4931 return 0; /* no-op for now */
4934 #endif /* CONFIG_PM */
4937 /*************************************************************/
4938 /*************************************************************/
4940 /*************************************************************/
4941 /*************************************************************/
4943 /* register driver */
4944 static int __init
module_hcd_init(void)
4950 /* Here we select enabled ports by following defines created from
4952 #ifndef CONFIG_ETRAX_USB_HOST_PORT1
4955 #ifndef CONFIG_ETRAX_USB_HOST_PORT2
4959 printk(KERN_INFO
"%s version "VERSION
" "COPYRIGHT
"\n", product_desc
);
4961 devdrv_hc_platform_device
=
4962 platform_device_register_simple((char *) hc_name
, 0, NULL
, 0);
4964 if (IS_ERR(devdrv_hc_platform_device
))
4965 return PTR_ERR(devdrv_hc_platform_device
);
4966 return driver_register(&devdrv_hc_device_driver
);
4968 * Note that we do not set the DMA mask for the device,
4969 * i.e. we pretend that we will use PIO, since no specific
4970 * allocation routines are needed for DMA buffers. This will
4971 * cause the HCD buffer allocation routines to fall back to
4976 /* unregister driver */
4977 static void __exit
module_hcd_exit(void)
4979 driver_unregister(&devdrv_hc_device_driver
);
4984 module_init(module_hcd_init
);
4985 module_exit(module_hcd_exit
);