3 * ETRAX 100LX USB Host Controller Driver
5 * Copyright (C) 2005 - 2008 Axis Communications AB
7 * Author: Konrad Eriksson <konrad.eriksson@axis.se>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/moduleparam.h>
15 #include <linux/spinlock.h>
16 #include <linux/usb.h>
17 #include <linux/platform_device.h>
21 #include <asm/arch/dma.h>
22 #include <asm/arch/io_interface_mux.h>
24 #include "../core/hcd.h"
25 #include "../core/hub.h"
26 #include "hc-crisv10.h"
27 #include "hc-cris-dbg.h"
30 /***************************************************************************/
31 /***************************************************************************/
32 /* Host Controller settings */
33 /***************************************************************************/
34 /***************************************************************************/
36 #define VERSION "1.00-openwrt_diff-v1"
37 #define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
38 #define DESCRIPTION "ETRAX 100LX USB Host Controller"
40 #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
41 #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
42 #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
44 /* Number of physical ports in Etrax 100LX */
45 #define USB_ROOT_HUB_PORTS 2
47 const char hc_name
[] = "hc-crisv10";
48 const char product_desc
[] = DESCRIPTION
;
50 /* The number of epids is, among other things, used for pre-allocating
51 ctrl, bulk and isoc EP descriptors (one for each epid).
52 Assumed to be > 1 when initiating the DMA lists. */
53 #define NBR_OF_EPIDS 32
55 /* Support interrupt traffic intervals up to 128 ms. */
56 #define MAX_INTR_INTERVAL 128
58 /* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
59 table must be "invalid". By this we mean that we shouldn't care about epid
60 attentions for this epid, or at least handle them differently from epid
61 attentions for "valid" epids. This define determines which one to use
63 #define INVALID_EPID 31
64 /* A special epid for the bulk dummys. */
69 MODULE_DESCRIPTION(DESCRIPTION
);
70 MODULE_LICENSE("GPL");
71 MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
74 /* Module parameters */
76 /* 0 = No ports enabled
77 1 = Only port 1 enabled (on board ethernet on devboard)
78 2 = Only port 2 enabled (external connector on devboard)
79 3 = Both ports enabled
81 static unsigned int ports
= 3;
82 module_param(ports
, uint
, S_IRUGO
);
83 MODULE_PARM_DESC(ports
, "Bitmask indicating USB ports to use");
86 /***************************************************************************/
87 /***************************************************************************/
88 /* Shared global variables for this module */
89 /***************************************************************************/
90 /***************************************************************************/
92 /* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
93 static volatile struct USB_EP_Desc TxBulkEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
95 static volatile struct USB_EP_Desc TxCtrlEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
97 /* EP descriptor lists for period transfers. Must be 32-bit aligned. */
98 static volatile struct USB_EP_Desc TxIntrEPList
[MAX_INTR_INTERVAL
] __attribute__ ((aligned (4)));
99 static volatile struct USB_SB_Desc TxIntrSB_zout
__attribute__ ((aligned (4)));
101 static volatile struct USB_EP_Desc TxIsocEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
102 static volatile struct USB_SB_Desc TxIsocSB_zout
__attribute__ ((aligned (4)));
104 static volatile struct USB_SB_Desc TxIsocSBList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
106 /* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
107 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
108 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
109 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
111 static volatile struct USB_EP_Desc TxBulkDummyEPList
[NBR_OF_EPIDS
][2] __attribute__ ((aligned (4)));
113 /* List of URB pointers, where each points to the active URB for a epid.
114 For Bulk, Ctrl and Intr this means which URB that currently is added to
115 DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
116 URB has completed is the queue examined and the first URB in queue is
117 removed and moved to the activeUrbList while its state change to STARTED and
118 its transfer(s) gets added to DMA list (exception Isoc where URBs enter
119 state STARTED directly and added transfers added to DMA lists). */
120 static struct urb
*activeUrbList
[NBR_OF_EPIDS
];
122 /* Additional software state info for each epid */
123 static struct etrax_epid epid_state
[NBR_OF_EPIDS
];
125 /* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
126 even if there is new data waiting to be processed */
127 static struct timer_list bulk_start_timer
= TIMER_INITIALIZER(NULL
, 0, 0);
128 static struct timer_list bulk_eot_timer
= TIMER_INITIALIZER(NULL
, 0, 0);
130 /* We want the start timer to expire before the eot timer, because the former
131 might start traffic, thus making it unnecessary for the latter to time
133 #define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
134 #define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
136 /* Delay before a URB completion happen when it's scheduled to be delayed */
137 #define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
139 /* Simplifying macros for checking software state info of a epid */
140 /* ----------------------------------------------------------------------- */
141 #define epid_inuse(epid) epid_state[epid].inuse
142 #define epid_out_traffic(epid) epid_state[epid].out_traffic
143 #define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
144 #define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
147 /***************************************************************************/
148 /***************************************************************************/
149 /* DEBUG FUNCTIONS */
150 /***************************************************************************/
151 /***************************************************************************/
152 /* Note that these functions are always available in their "__" variants,
153 for use in error situations. The "__" missing variants are controlled by
154 the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
155 static void __dump_urb(struct urb
* purb
)
157 struct crisv10_urb_priv
*urb_priv
= purb
->hcpriv
;
160 urb_num
= urb_priv
->urb_num
;
162 printk("\nURB:0x%x[%d]\n", (unsigned int)purb
, urb_num
);
163 printk("dev :0x%08lx\n", (unsigned long)purb
->dev
);
164 printk("pipe :0x%08x\n", purb
->pipe
);
165 printk("status :%d\n", purb
->status
);
166 printk("transfer_flags :0x%08x\n", purb
->transfer_flags
);
167 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb
->transfer_buffer
);
168 printk("transfer_buffer_length:%d\n", purb
->transfer_buffer_length
);
169 printk("actual_length :%d\n", purb
->actual_length
);
170 printk("setup_packet :0x%08lx\n", (unsigned long)purb
->setup_packet
);
171 printk("start_frame :%d\n", purb
->start_frame
);
172 printk("number_of_packets :%d\n", purb
->number_of_packets
);
173 printk("interval :%d\n", purb
->interval
);
174 printk("error_count :%d\n", purb
->error_count
);
175 printk("context :0x%08lx\n", (unsigned long)purb
->context
);
176 printk("complete :0x%08lx\n\n", (unsigned long)purb
->complete
);
179 static void __dump_in_desc(volatile struct USB_IN_Desc
*in
)
181 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in
);
182 printk(" sw_len : 0x%04x (%d)\n", in
->sw_len
, in
->sw_len
);
183 printk(" command : 0x%04x\n", in
->command
);
184 printk(" next : 0x%08lx\n", in
->next
);
185 printk(" buf : 0x%08lx\n", in
->buf
);
186 printk(" hw_len : 0x%04x (%d)\n", in
->hw_len
, in
->hw_len
);
187 printk(" status : 0x%04x\n\n", in
->status
);
190 static void __dump_sb_desc(volatile struct USB_SB_Desc
*sb
)
192 char tt
= (sb
->command
& 0x30) >> 4;
209 tt_string
= "unknown (weird)";
212 printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb
);
213 printk(" command:0x%04x (", sb
->command
);
214 printk("rem:%d ", (sb
->command
& 0x3f00) >> 8);
215 printk("full:%d ", (sb
->command
& 0x40) >> 6);
216 printk("tt:%d(%s) ", tt
, tt_string
);
217 printk("intr:%d ", (sb
->command
& 0x8) >> 3);
218 printk("eot:%d ", (sb
->command
& 0x2) >> 1);
219 printk("eol:%d)", sb
->command
& 0x1);
220 printk(" sw_len:0x%04x(%d)", sb
->sw_len
, sb
->sw_len
);
221 printk(" next:0x%08lx", sb
->next
);
222 printk(" buf:0x%08lx\n", sb
->buf
);
226 static void __dump_ep_desc(volatile struct USB_EP_Desc
*ep
)
228 printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep
);
229 printk(" command:0x%04x (", ep
->command
);
230 printk("ep_id:%d ", (ep
->command
& 0x1f00) >> 8);
231 printk("enable:%d ", (ep
->command
& 0x10) >> 4);
232 printk("intr:%d ", (ep
->command
& 0x8) >> 3);
233 printk("eof:%d ", (ep
->command
& 0x2) >> 1);
234 printk("eol:%d)", ep
->command
& 0x1);
235 printk(" hw_len:0x%04x(%d)", ep
->hw_len
, ep
->hw_len
);
236 printk(" next:0x%08lx", ep
->next
);
237 printk(" sub:0x%08lx\n", ep
->sub
);
240 static inline void __dump_ep_list(int pipe_type
)
242 volatile struct USB_EP_Desc
*ep
;
243 volatile struct USB_EP_Desc
*first_ep
;
244 volatile struct USB_SB_Desc
*sb
;
249 first_ep
= &TxBulkEPList
[0];
252 first_ep
= &TxCtrlEPList
[0];
255 first_ep
= &TxIntrEPList
[0];
257 case PIPE_ISOCHRONOUS
:
258 first_ep
= &TxIsocEPList
[0];
265 printk("\n\nDumping EP list...\n\n");
269 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
270 sb
= ep
->sub
? phys_to_virt(ep
->sub
) : 0;
273 sb
= sb
->next
? phys_to_virt(sb
->next
) : 0;
275 ep
= (volatile struct USB_EP_Desc
*)(phys_to_virt(ep
->next
));
277 } while (ep
!= first_ep
);
280 static inline void __dump_ept_data(int epid
)
283 __u32 r_usb_ept_data
;
285 if (epid
< 0 || epid
> 31) {
286 printk("Cannot dump ept data for invalid epid %d\n", epid
);
290 local_irq_save(flags
);
291 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
293 r_usb_ept_data
= *R_USB_EPT_DATA
;
294 local_irq_restore(flags
);
296 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data
, epid
);
297 if (r_usb_ept_data
== 0) {
298 /* No need for more detailed printing. */
301 printk(" valid : %d\n", (r_usb_ept_data
& 0x80000000) >> 31);
302 printk(" hold : %d\n", (r_usb_ept_data
& 0x40000000) >> 30);
303 printk(" error_count_in : %d\n", (r_usb_ept_data
& 0x30000000) >> 28);
304 printk(" t_in : %d\n", (r_usb_ept_data
& 0x08000000) >> 27);
305 printk(" low_speed : %d\n", (r_usb_ept_data
& 0x04000000) >> 26);
306 printk(" port : %d\n", (r_usb_ept_data
& 0x03000000) >> 24);
307 printk(" error_code : %d\n", (r_usb_ept_data
& 0x00c00000) >> 22);
308 printk(" t_out : %d\n", (r_usb_ept_data
& 0x00200000) >> 21);
309 printk(" error_count_out : %d\n", (r_usb_ept_data
& 0x00180000) >> 19);
310 printk(" max_len : %d\n", (r_usb_ept_data
& 0x0003f800) >> 11);
311 printk(" ep : %d\n", (r_usb_ept_data
& 0x00000780) >> 7);
312 printk(" dev : %d\n", (r_usb_ept_data
& 0x0000003f));
315 static inline void __dump_ept_data_iso(int epid
)
320 if (epid
< 0 || epid
> 31) {
321 printk("Cannot dump ept data for invalid epid %d\n", epid
);
325 local_irq_save(flags
);
326 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
328 ept_data
= *R_USB_EPT_DATA_ISO
;
329 local_irq_restore(flags
);
331 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data
, epid
);
333 /* No need for more detailed printing. */
336 printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, valid
,
338 printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, port
,
340 printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, error_code
,
342 printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, max_len
,
344 printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, ep
,
346 printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, dev
,
350 static inline void __dump_ept_data_list(void)
354 printk("Dumping the whole R_USB_EPT_DATA list\n");
356 for (i
= 0; i
< 32; i
++) {
361 static void debug_epid(int epid
) {
364 if(epid_isoc(epid
)) {
365 __dump_ept_data_iso(epid
);
367 __dump_ept_data(epid
);
371 for(i
= 0; i
< 32; i
++) {
372 if(IO_EXTRACT(USB_EP_command
, epid
, TxBulkEPList
[i
].command
) ==
374 printk("%d: ", i
); __dump_ep_desc(&(TxBulkEPList
[i
]));
379 for(i
= 0; i
< 32; i
++) {
380 if(IO_EXTRACT(USB_EP_command
, epid
, TxCtrlEPList
[i
].command
) ==
382 printk("%d: ", i
); __dump_ep_desc(&(TxCtrlEPList
[i
]));
387 for(i
= 0; i
< MAX_INTR_INTERVAL
; i
++) {
388 if(IO_EXTRACT(USB_EP_command
, epid
, TxIntrEPList
[i
].command
) ==
390 printk("%d: ", i
); __dump_ep_desc(&(TxIntrEPList
[i
]));
395 for(i
= 0; i
< 32; i
++) {
396 if(IO_EXTRACT(USB_EP_command
, epid
, TxIsocEPList
[i
].command
) ==
398 printk("%d: ", i
); __dump_ep_desc(&(TxIsocEPList
[i
]));
402 __dump_ept_data_list();
403 __dump_ep_list(PIPE_INTERRUPT
);
409 char* hcd_status_to_str(__u8 bUsbStatus
) {
410 static char hcd_status_str
[128];
411 hcd_status_str
[0] = '\0';
412 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, ourun
, yes
)) {
413 strcat(hcd_status_str
, "ourun ");
415 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, perror
, yes
)) {
416 strcat(hcd_status_str
, "perror ");
418 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, device_mode
, yes
)) {
419 strcat(hcd_status_str
, "device_mode ");
421 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, host_mode
, yes
)) {
422 strcat(hcd_status_str
, "host_mode ");
424 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, started
, yes
)) {
425 strcat(hcd_status_str
, "started ");
427 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, running
, yes
)) {
428 strcat(hcd_status_str
, "running ");
430 return hcd_status_str
;
434 char* sblist_to_str(struct USB_SB_Desc
* sb_desc
) {
435 static char sblist_to_str_buff
[128];
436 char tmp
[32], tmp2
[32];
437 sblist_to_str_buff
[0] = '\0';
438 while(sb_desc
!= NULL
) {
439 switch(IO_EXTRACT(USB_SB_command
, tt
, sb_desc
->command
)) {
440 case 0: sprintf(tmp
, "zout"); break;
441 case 1: sprintf(tmp
, "in"); break;
442 case 2: sprintf(tmp
, "out"); break;
443 case 3: sprintf(tmp
, "setup"); break;
445 sprintf(tmp2
, "(%s %d)", tmp
, sb_desc
->sw_len
);
446 strcat(sblist_to_str_buff
, tmp2
);
447 if(sb_desc
->next
!= 0) {
448 sb_desc
= phys_to_virt(sb_desc
->next
);
453 return sblist_to_str_buff
;
456 char* port_status_to_str(__u16 wPortStatus
) {
457 static char port_status_str
[128];
458 port_status_str
[0] = '\0';
459 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, connected
, yes
)) {
460 strcat(port_status_str
, "connected ");
462 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) {
463 strcat(port_status_str
, "enabled ");
465 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, suspended
, yes
)) {
466 strcat(port_status_str
, "suspended ");
468 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, reset
, yes
)) {
469 strcat(port_status_str
, "reset ");
471 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, speed
, full
)) {
472 strcat(port_status_str
, "full-speed ");
474 strcat(port_status_str
, "low-speed ");
476 return port_status_str
;
480 char* endpoint_to_str(struct usb_endpoint_descriptor
*ed
) {
481 static char endpoint_to_str_buff
[128];
483 int epnum
= ed
->bEndpointAddress
& 0x0F;
484 int dir
= ed
->bEndpointAddress
& 0x80;
485 int type
= ed
->bmAttributes
& 0x03;
486 endpoint_to_str_buff
[0] = '\0';
487 sprintf(endpoint_to_str_buff
, "ep:%d ", epnum
);
490 sprintf(tmp
, " ctrl");
493 sprintf(tmp
, " isoc");
496 sprintf(tmp
, " bulk");
499 sprintf(tmp
, " intr");
502 strcat(endpoint_to_str_buff
, tmp
);
506 sprintf(tmp
, " out");
508 strcat(endpoint_to_str_buff
, tmp
);
510 return endpoint_to_str_buff
;
513 /* Debug helper functions for Transfer Controller */
514 char* pipe_to_str(unsigned int pipe
) {
515 static char pipe_to_str_buff
[128];
517 sprintf(pipe_to_str_buff
, "dir:%s", str_dir(pipe
));
518 sprintf(tmp
, " type:%s", str_type(pipe
));
519 strcat(pipe_to_str_buff
, tmp
);
521 sprintf(tmp
, " dev:%d", usb_pipedevice(pipe
));
522 strcat(pipe_to_str_buff
, tmp
);
523 sprintf(tmp
, " ep:%d", usb_pipeendpoint(pipe
));
524 strcat(pipe_to_str_buff
, tmp
);
525 return pipe_to_str_buff
;
529 #define USB_DEBUG_DESC 1
531 #ifdef USB_DEBUG_DESC
532 #define dump_in_desc(x) __dump_in_desc(x)
533 #define dump_sb_desc(...) __dump_sb_desc(...)
534 #define dump_ep_desc(x) __dump_ep_desc(x)
535 #define dump_ept_data(x) __dump_ept_data(x)
537 #define dump_in_desc(...) do {} while (0)
538 #define dump_sb_desc(...) do {} while (0)
539 #define dump_ep_desc(...) do {} while (0)
543 /* Uncomment this to enable massive function call trace
544 #define USB_DEBUG_TRACE */
546 #ifdef USB_DEBUG_TRACE
547 #define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
548 #define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
550 #define DBFENTER do {} while (0)
551 #define DBFEXIT do {} while (0)
554 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
555 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
557 /* Most helpful debugging aid */
558 #define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
561 /***************************************************************************/
562 /***************************************************************************/
563 /* Forward declarations */
564 /***************************************************************************/
565 /***************************************************************************/
566 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg
*reg
);
567 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg
*reg
);
568 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg
*reg
);
569 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg
*reg
);
571 void rh_port_status_change(__u16
[]);
572 int rh_clear_port_feature(__u8
, __u16
);
573 int rh_set_port_feature(__u8
, __u16
);
574 static void rh_disable_port(unsigned int port
);
576 static void check_finished_bulk_tx_epids(struct usb_hcd
*hcd
,
579 static int tc_setup_epid(struct usb_host_endpoint
*ep
, struct urb
*urb
,
581 static void tc_free_epid(struct usb_host_endpoint
*ep
);
582 static int tc_allocate_epid(void);
583 static void tc_finish_urb(struct usb_hcd
*hcd
, struct urb
*urb
, int status
);
584 static void tc_finish_urb_later(struct usb_hcd
*hcd
, struct urb
*urb
,
587 static int urb_priv_create(struct usb_hcd
*hcd
, struct urb
*urb
, int epid
,
589 static void urb_priv_free(struct usb_hcd
*hcd
, struct urb
*urb
);
591 static int crisv10_usb_check_bandwidth(struct usb_device
*dev
,struct urb
*urb
);
592 static void crisv10_usb_claim_bandwidth(
593 struct usb_device
*dev
, struct urb
*urb
, int bustime
, int isoc
);
594 static void crisv10_usb_release_bandwidth(
595 struct usb_hcd
*hcd
, int isoc
, int bandwidth
);
597 static inline struct urb
*urb_list_first(int epid
);
598 static inline void urb_list_add(struct urb
*urb
, int epid
,
600 static inline urb_entry_t
*urb_list_entry(struct urb
*urb
, int epid
);
601 static inline void urb_list_del(struct urb
*urb
, int epid
);
602 static inline void urb_list_move_last(struct urb
*urb
, int epid
);
603 static inline struct urb
*urb_list_next(struct urb
*urb
, int epid
);
605 int create_sb_for_urb(struct urb
*urb
, int mem_flags
);
606 int init_intr_urb(struct urb
*urb
, int mem_flags
);
608 static inline void etrax_epid_set(__u8 index
, __u32 data
);
609 static inline void etrax_epid_clear_error(__u8 index
);
610 static inline void etrax_epid_set_toggle(__u8 index
, __u8 dirout
,
612 static inline __u8
etrax_epid_get_toggle(__u8 index
, __u8 dirout
);
613 static inline __u32
etrax_epid_get(__u8 index
);
615 /* We're accessing the same register position in Etrax so
616 when we do full access the internal difference doesn't matter */
617 #define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
618 #define etrax_epid_iso_get(index) etrax_epid_get(index)
621 static void tc_dma_process_isoc_urb(struct urb
*urb
);
622 static void tc_dma_process_queue(int epid
);
623 static void tc_dma_unlink_intr_urb(struct urb
*urb
);
624 static irqreturn_t
tc_dma_tx_interrupt(int irq
, void *vhc
);
625 static irqreturn_t
tc_dma_rx_interrupt(int irq
, void *vhc
);
627 static void tc_bulk_start_timer_func(unsigned long dummy
);
628 static void tc_bulk_eot_timer_func(unsigned long dummy
);
631 /*************************************************************/
632 /*************************************************************/
633 /* Host Controler Driver block */
634 /*************************************************************/
635 /*************************************************************/
638 static irqreturn_t
crisv10_hcd_top_irq(int irq
, void*);
639 static int crisv10_hcd_reset(struct usb_hcd
*);
640 static int crisv10_hcd_start(struct usb_hcd
*);
641 static void crisv10_hcd_stop(struct usb_hcd
*);
643 static int crisv10_hcd_suspend(struct device
*, u32
, u32
);
644 static int crisv10_hcd_resume(struct device
*, u32
);
645 #endif /* CONFIG_PM */
646 static int crisv10_hcd_get_frame(struct usb_hcd
*);
648 static int tc_urb_enqueue(struct usb_hcd
*, struct urb
*, gfp_t mem_flags
);
649 static int tc_urb_dequeue(struct usb_hcd
*, struct urb
*, int);
650 static void tc_endpoint_disable(struct usb_hcd
*, struct usb_host_endpoint
*ep
);
652 static int rh_status_data_request(struct usb_hcd
*, char *);
653 static int rh_control_request(struct usb_hcd
*, u16
, u16
, u16
, char*, u16
);
656 static int crisv10_hcd_hub_suspend(struct usb_hcd
*);
657 static int crisv10_hcd_hub_resume(struct usb_hcd
*);
658 #endif /* CONFIG_PM */
659 #ifdef CONFIG_USB_OTG
660 static int crisv10_hcd_start_port_reset(struct usb_hcd
*, unsigned);
661 #endif /* CONFIG_USB_OTG */
663 /* host controller driver interface */
664 static const struct hc_driver crisv10_hc_driver
=
666 .description
= hc_name
,
667 .product_desc
= product_desc
,
668 .hcd_priv_size
= sizeof(struct crisv10_hcd
),
670 /* Attaching IRQ handler manualy in probe() */
671 /* .irq = crisv10_hcd_irq, */
675 /* called to init HCD and root hub */
676 .reset
= crisv10_hcd_reset
,
677 .start
= crisv10_hcd_start
,
679 /* cleanly make HCD stop writing memory and doing I/O */
680 .stop
= crisv10_hcd_stop
,
682 /* return current frame number */
683 .get_frame_number
= crisv10_hcd_get_frame
,
686 /* Manage i/o requests via the Transfer Controller */
687 .urb_enqueue
= tc_urb_enqueue
,
688 .urb_dequeue
= tc_urb_dequeue
,
690 /* hw synch, freeing endpoint resources that urb_dequeue can't */
691 .endpoint_disable
= tc_endpoint_disable
,
694 /* Root Hub support */
695 .hub_status_data
= rh_status_data_request
,
696 .hub_control
= rh_control_request
,
698 .hub_suspend
= rh_suspend_request
,
699 .hub_resume
= rh_resume_request
,
700 #endif /* CONFIG_PM */
701 #ifdef CONFIG_USB_OTG
702 .start_port_reset
= crisv10_hcd_start_port_reset
,
703 #endif /* CONFIG_USB_OTG */
708 * conversion between pointers to a hcd and the corresponding
712 static inline struct crisv10_hcd
*hcd_to_crisv10_hcd(struct usb_hcd
*hcd
)
714 return (struct crisv10_hcd
*) hcd
->hcd_priv
;
717 static inline struct usb_hcd
*crisv10_hcd_to_hcd(struct crisv10_hcd
*hcd
)
719 return container_of((void *) hcd
, struct usb_hcd
, hcd_priv
);
722 /* check if specified port is in use */
723 static inline int port_in_use(unsigned int port
)
725 return ports
& (1 << port
);
728 /* number of ports in use */
729 static inline unsigned int num_ports(void)
731 unsigned int i
, num
= 0;
732 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++)
738 /* map hub port number to the port number used internally by the HC */
739 static inline unsigned int map_port(unsigned int port
)
741 unsigned int i
, num
= 0;
742 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++)
749 /* size of descriptors in slab cache */
751 #define MAX(x, y) ((x) > (y) ? (x) : (y))
755 /******************************************************************/
756 /* Hardware Interrupt functions */
757 /******************************************************************/
759 /* Fast interrupt handler for HC */
760 static irqreturn_t
crisv10_hcd_top_irq(int irq
, void *vcd
)
762 struct usb_hcd
*hcd
= vcd
;
763 struct crisv10_irq_reg reg
;
772 /* Turn of other interrupts while handling these sensitive cases */
773 local_irq_save(flags
);
775 /* Read out which interrupts that are flaged */
776 irq_mask
= *R_USB_IRQ_MASK_READ
;
777 reg
.r_usb_irq_mask_read
= irq_mask
;
779 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
780 R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
781 clears the ourun and perror fields of R_USB_STATUS. */
782 reg
.r_usb_status
= *R_USB_STATUS
;
784 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
786 reg
.r_usb_epid_attn
= *R_USB_EPID_ATTN
;
788 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
789 port_status interrupt. */
790 reg
.r_usb_rh_port_status_1
= *R_USB_RH_PORT_STATUS_1
;
791 reg
.r_usb_rh_port_status_2
= *R_USB_RH_PORT_STATUS_2
;
793 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
794 /* Note: the lower 11 bits contain the actual frame number, sent with each
796 reg
.r_usb_fm_number
= *R_USB_FM_NUMBER
;
798 /* Interrupts are handled in order of priority. */
799 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, port_status
)) {
800 crisv10_hcd_port_status_irq(®
);
802 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, epid_attn
)) {
803 crisv10_hcd_epid_attn_irq(®
);
805 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, ctl_status
)) {
806 crisv10_hcd_ctl_status_irq(®
);
808 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, iso_eof
)) {
809 crisv10_hcd_isoc_eof_irq(®
);
811 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, bulk_eot
)) {
812 /* Update/restart the bulk start timer since obviously the channel is
814 mod_timer(&bulk_start_timer
, jiffies
+ BULK_START_TIMER_INTERVAL
);
815 /* Update/restart the bulk eot timer since we just received an bulk eot
817 mod_timer(&bulk_eot_timer
, jiffies
+ BULK_EOT_TIMER_INTERVAL
);
819 /* Check for finished bulk transfers on epids */
820 check_finished_bulk_tx_epids(hcd
, 0);
822 local_irq_restore(flags
);
829 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg
*reg
) {
830 struct usb_hcd
*hcd
= reg
->hcd
;
831 struct crisv10_urb_priv
*urb_priv
;
835 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
836 if (test_bit(epid
, (void *)®
->r_usb_epid_attn
)) {
841 if (epid
== DUMMY_EPID
|| epid
== INVALID_EPID
) {
842 /* We definitely don't care about these ones. Besides, they are
843 always disabled, so any possible disabling caused by the
844 epid attention interrupt is irrelevant. */
848 if(!epid_inuse(epid
)) {
849 irq_err("Epid attention on epid:%d that isn't in use\n", epid
);
850 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
855 /* Note that although there are separate R_USB_EPT_DATA and
856 R_USB_EPT_DATA_ISO registers, they are located at the same address and
857 are of the same size. In other words, this read should be ok for isoc
859 ept_data
= etrax_epid_get(epid
);
860 error_code
= IO_EXTRACT(R_USB_EPT_DATA
, error_code
, ept_data
);
862 /* Get the active URB for this epid. We blatantly assume
863 that only this URB could have caused the epid attention. */
864 urb
= activeUrbList
[epid
];
866 irq_err("Attention on epid:%d error:%d with no active URB.\n",
868 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
873 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
876 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
877 if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
879 /* Isoc traffic doesn't have error_count_in/error_count_out. */
880 if ((usb_pipetype(urb
->pipe
) != PIPE_ISOCHRONOUS
) &&
881 (IO_EXTRACT(R_USB_EPT_DATA
, error_count_in
, ept_data
) == 3 ||
882 IO_EXTRACT(R_USB_EPT_DATA
, error_count_out
, ept_data
) == 3)) {
883 /* Check if URB allready is marked for late-finish, we can get
884 several 3rd error for Intr traffic when a device is unplugged */
885 if(urb_priv
->later_data
== NULL
) {
887 irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid
,
888 str_dir(urb
->pipe
), str_type(urb
->pipe
),
889 (unsigned int)urb
, urb_priv
->urb_num
);
891 tc_finish_urb_later(hcd
, urb
, -EPROTO
);
894 } else if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, perror
)) {
895 irq_warn("Perror for epid:%d\n", epid
);
896 printk("FM_NUMBER: %d\n", reg
->r_usb_fm_number
& 0x7ff);
897 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
901 if (!(ept_data
& IO_MASK(R_USB_EPT_DATA
, valid
))) {
903 panic("Perror because of invalid epid."
904 " Deconfigured too early?");
906 /* past eof1, near eof, zout transfer, setup transfer */
907 /* Dump the urb and the relevant EP descriptor. */
908 panic("Something wrong with DMA descriptor contents."
909 " Too much traffic inserted?");
911 } else if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, ourun
)) {
913 printk("FM_NUMBER: %d\n", reg
->r_usb_fm_number
& 0x7ff);
914 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
918 panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid
);
920 irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid
,
921 str_dir(urb
->pipe
), str_type(urb
->pipe
));
922 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
927 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
929 /* Not really a protocol error, just says that the endpoint gave
930 a stall response. Note that error_code cannot be stall for isoc. */
931 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
932 panic("Isoc traffic cannot stall");
935 tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid
,
936 str_dir(urb
->pipe
), str_type(urb
->pipe
), (unsigned int)urb
);
937 tc_finish_urb(hcd
, urb
, -EPIPE
);
939 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
941 /* Two devices responded to a transaction request. Must be resolved
942 by software. FIXME: Reset ports? */
943 panic("Bus error for epid %d."
944 " Two devices responded to transaction request\n",
947 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
949 /* DMA overrun or underrun. */
950 irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid
,
951 str_dir(urb
->pipe
), str_type(urb
->pipe
));
953 /* It seems that error_code = buffer_error in
954 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
955 are the same error. */
956 tc_finish_urb(hcd
, urb
, -EPROTO
);
958 irq_warn("Unknown attention on epid:%d (%s %s)\n", epid
,
959 str_dir(urb
->pipe
), str_type(urb
->pipe
));
967 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg
*reg
)
969 __u16 port_reg
[USB_ROOT_HUB_PORTS
];
971 port_reg
[0] = reg
->r_usb_rh_port_status_1
;
972 port_reg
[1] = reg
->r_usb_rh_port_status_2
;
973 rh_port_status_change(port_reg
);
977 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg
*reg
)
981 struct crisv10_urb_priv
*urb_priv
;
985 for (epid
= 0; epid
< NBR_OF_EPIDS
- 1; epid
++) {
987 /* Only check epids that are in use, is valid and has SB list */
988 if (!epid_inuse(epid
) || epid
== INVALID_EPID
||
989 TxIsocEPList
[epid
].sub
== 0 || epid
== DUMMY_EPID
) {
990 /* Nothing here to see. */
993 ASSERT(epid_isoc(epid
));
995 /* Get the active URB for this epid (if any). */
996 urb
= activeUrbList
[epid
];
998 isoc_warn("Ignoring NULL urb for epid:%d\n", epid
);
1001 if(!epid_out_traffic(epid
)) {
1003 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
);
1005 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
1008 if (urb_priv
->urb_state
== NOT_STARTED
) {
1009 /* If ASAP is not set and urb->start_frame is the current frame,
1010 start the transfer. */
1011 if (!(urb
->transfer_flags
& URB_ISO_ASAP
) &&
1012 (urb
->start_frame
== (*R_USB_FM_NUMBER
& 0x7ff))) {
1013 /* EP should not be enabled if we're waiting for start_frame */
1014 ASSERT((TxIsocEPList
[epid
].command
&
1015 IO_STATE(USB_EP_command
, enable
, yes
)) == 0);
1017 isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid
);
1018 TxIsocEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
1020 /* This urb is now active. */
1021 urb_priv
->urb_state
= STARTED
;
1031 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg
*reg
)
1033 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(reg
->hcd
);
1036 ASSERT(crisv10_hcd
);
1038 /* irq_dbg("ctr_status_irq, controller status: %s\n",
1039 hcd_status_to_str(reg->r_usb_status));*/
1041 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
1042 list for the corresponding epid? */
1043 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, ourun
)) {
1044 panic("USB controller got ourun.");
1046 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, perror
)) {
1048 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
1049 an interrupt pipe. I don't see how re-enabling all EP descriptors
1050 will help if there was a programming error. */
1051 panic("USB controller got perror.");
1054 /* Keep track of USB Controller, if it's running or not */
1055 if(reg
->r_usb_status
& IO_STATE(R_USB_STATUS
, running
, yes
)) {
1056 crisv10_hcd
->running
= 1;
1058 crisv10_hcd
->running
= 0;
1061 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, device_mode
)) {
1062 /* We should never operate in device mode. */
1063 panic("USB controller in device mode.");
1066 /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
1067 using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
1068 set_bit(HCD_FLAG_SAW_IRQ
, ®
->hcd
->flags
);
1074 /******************************************************************/
1075 /* Host Controller interface functions */
1076 /******************************************************************/
1078 static inline void crisv10_ready_wait(void) {
1079 volatile int timeout
= 10000;
1080 /* Check the busy bit of USB controller in Etrax */
1081 while((*R_USB_COMMAND
& IO_MASK(R_USB_COMMAND
, busy
)) &&
1085 /* reset host controller */
1086 static int crisv10_hcd_reset(struct usb_hcd
*hcd
)
1089 hcd_dbg(hcd
, "reset\n");
1092 /* Reset the USB interface. */
1095 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1096 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1097 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
1104 /* start host controller */
1105 static int crisv10_hcd_start(struct usb_hcd
*hcd
)
1108 hcd_dbg(hcd
, "start\n");
1110 crisv10_ready_wait();
1112 /* Start processing of USB traffic. */
1114 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1115 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1116 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1120 hcd
->state
= HC_STATE_RUNNING
;
1126 /* stop host controller */
1127 static void crisv10_hcd_stop(struct usb_hcd
*hcd
)
1130 hcd_dbg(hcd
, "stop\n");
1131 crisv10_hcd_reset(hcd
);
1135 /* return the current frame number */
1136 static int crisv10_hcd_get_frame(struct usb_hcd
*hcd
)
1140 return (*R_USB_FM_NUMBER
& 0x7ff);
1143 #ifdef CONFIG_USB_OTG
1145 static int crisv10_hcd_start_port_reset(struct usb_hcd
*hcd
, unsigned port
)
1147 return 0; /* no-op for now */
1150 #endif /* CONFIG_USB_OTG */
1153 /******************************************************************/
1154 /* Root Hub functions */
1155 /******************************************************************/
1157 /* root hub status */
1158 static const struct usb_hub_status rh_hub_status
=
1164 /* root hub descriptor */
1165 static const u8 rh_hub_descr
[] =
1167 0x09, /* bDescLength */
1168 0x29, /* bDescriptorType */
1169 USB_ROOT_HUB_PORTS
, /* bNbrPorts */
1170 0x00, /* wHubCharacteristics */
1172 0x01, /* bPwrOn2pwrGood */
1173 0x00, /* bHubContrCurrent */
1174 0x00, /* DeviceRemovable */
1175 0xff /* PortPwrCtrlMask */
1178 /* Actual holder of root hub status*/
1179 struct crisv10_rh rh
;
1181 /* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
1184 /* Reset port status flags */
1185 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++) {
1186 rh
.wPortChange
[i
] = 0;
1187 rh
.wPortStatusPrev
[i
] = 0;
1192 #define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
1193 (1<<USB_PORT_FEAT_ENABLE)|\
1194 (1<<USB_PORT_FEAT_SUSPEND)|\
1195 (1<<USB_PORT_FEAT_RESET))
1197 /* Handle port status change interrupt (called from bottom part interrupt) */
1198 void rh_port_status_change(__u16 port_reg
[]) {
1202 for(i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++) {
1203 /* Xor out changes since last read, masked for important flags */
1204 wChange
= (port_reg
[i
] & RH_FEAT_MASK
) ^ rh
.wPortStatusPrev
[i
];
1205 /* Or changes together with (if any) saved changes */
1206 rh
.wPortChange
[i
] |= wChange
;
1207 /* Save new status */
1208 rh
.wPortStatusPrev
[i
] = port_reg
[i
];
1211 rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i
+1,
1212 port_status_to_str(wChange
),
1213 port_status_to_str(port_reg
[i
]));
1218 /* Construct port status change bitmap for the root hub */
1219 static int rh_status_data_request(struct usb_hcd
*hcd
, char *buf
)
1221 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1226 * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
1227 * return bitmap indicating ports with status change
1230 spin_lock(&crisv10_hcd
->lock
);
1231 for (i
= 1; i
<= crisv10_hcd
->num_ports
; i
++) {
1232 if (rh
.wPortChange
[map_port(i
)]) {
1234 rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i
,
1235 port_status_to_str(rh
.wPortChange
[map_port(i
)]),
1236 port_status_to_str(rh
.wPortStatusPrev
[map_port(i
)]));
1239 spin_unlock(&crisv10_hcd
->lock
);
1241 return *buf
== 0 ? 0 : 1;
1244 /* Handle a control request for the root hub (called from hcd_driver) */
1245 static int rh_control_request(struct usb_hcd
*hcd
,
1252 struct crisv10_hcd
*crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1258 case GetHubDescriptor
:
1259 rh_dbg("GetHubDescriptor\n");
1260 len
= min_t(unsigned int, sizeof rh_hub_descr
, wLength
);
1261 memcpy(buf
, rh_hub_descr
, len
);
1262 buf
[2] = crisv10_hcd
->num_ports
;
1265 rh_dbg("GetHubStatus\n");
1266 len
= min_t(unsigned int, sizeof rh_hub_status
, wLength
);
1267 memcpy(buf
, &rh_hub_status
, len
);
1270 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1272 rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex
,
1273 port_status_to_str(rh
.wPortChange
[map_port(wIndex
)]),
1274 port_status_to_str(rh
.wPortStatusPrev
[map_port(wIndex
)]));
1275 *(u16
*) buf
= cpu_to_le16(rh
.wPortStatusPrev
[map_port(wIndex
)]);
1276 *(u16
*) (buf
+ 2) = cpu_to_le16(rh
.wPortChange
[map_port(wIndex
)]);
1279 rh_dbg("SetHubFeature\n");
1280 case ClearHubFeature
:
1281 rh_dbg("ClearHubFeature\n");
1283 case C_HUB_OVER_CURRENT
:
1284 case C_HUB_LOCAL_POWER
:
1285 rh_warn("Not implemented hub request:%d \n", typeReq
);
1286 /* not implemented */
1292 case SetPortFeature
:
1293 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1295 if(rh_set_port_feature(map_port(wIndex
), wValue
))
1298 case ClearPortFeature
:
1299 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1301 if(rh_clear_port_feature(map_port(wIndex
), wValue
))
1305 rh_warn("Unknown hub request: %d\n", typeReq
);
1313 int rh_set_port_feature(__u8 bPort
, __u16 wFeature
) {
1314 __u8 bUsbCommand
= 0;
1317 case USB_PORT_FEAT_RESET
:
1318 rh_dbg("SetPortFeature: reset\n");
1320 if (rh
.wPortStatusPrev
[bPort
] &
1321 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
))
1323 __u8 restart_controller
= 0;
1325 if ( (rh
.wPortStatusPrev
[0] &
1326 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) &&
1327 (rh
.wPortStatusPrev
[1] &
1328 IO_STATE(R_USB_RH_PORT_STATUS_2
, enabled
, yes
)) )
1330 /* Both ports is enabled. The USB controller will not change state. */
1331 restart_controller
= 0;
1335 /* Only ports is enabled. The USB controller will change state and
1336 must be restarted. */
1337 restart_controller
= 1;
1340 In ETRAX 100LX it's not possible to reset an enabled root hub port.
1341 The workaround is to disable and enable the port before resetting it.
1342 Disabling the port can, if both ports are disabled at once, cause the
1343 USB controller to change state to HOST_MODE state.
1344 The USB controller state transition causes a lot of unwanted
1345 interrupts that must be avoided.
1346 Disabling the USB controller status and port status interrupts before
1347 disabling/resetting the port stops these interrupts.
1349 These actions are performed:
1350 1. Disable USB controller status and port status interrupts.
1352 3. Wait for the port to be disabled.
1354 5. Wait for the port to be enabled.
1356 7. Wait for for the reset to end.
1357 8. Wait for the USB controller entering started state.
1358 9. Order the USB controller to running state.
1359 10. Wait for the USB controller reaching running state.
1360 11. Clear all interrupts generated during the disable/enable/reset
1362 12. Enable the USB controller status and port status interrupts.
1365 /* 1. Disable USB controller status and USB port status interrupts. */
1366 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, ctl_status
, clr
);
1367 __asm__
__volatile__ (" nop");
1368 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, port_status
, clr
);
1369 __asm__
__volatile__ (" nop");
1373 /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX
1374 root hub port reset is 10 ms we must perform 5 port resets to
1375 achieve a proper root hub port reset. */
1376 for (reset_cnt
= 0; reset_cnt
< 5; reset_cnt
++)
1378 rh_dbg("Disable Port %d\n", bPort
+ 1);
1380 /* 2. Disable the port*/
1383 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, yes
);
1387 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, yes
);
1390 /* 3. Wait for the port to be disabled. */
1391 while ( (bPort
== 0) ?
1392 *R_USB_RH_PORT_STATUS_1
&
1393 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
) :
1394 *R_USB_RH_PORT_STATUS_2
&
1395 IO_STATE(R_USB_RH_PORT_STATUS_2
, enabled
, yes
) ) {}
1397 rh_dbg("Port %d is disabled. Enable it!\n", bPort
+ 1);
1399 /* 4. Enable the port. */
1402 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, no
);
1406 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, no
);
1409 /* 5. Wait for the port to be enabled again. */
1410 while (!( (bPort
== 0) ?
1411 *R_USB_RH_PORT_STATUS_1
&
1412 IO_STATE(R_USB_RH_PORT_STATUS_1
, connected
, yes
) :
1413 *R_USB_RH_PORT_STATUS_2
&
1414 IO_STATE(R_USB_RH_PORT_STATUS_2
, connected
, yes
) ) ) {}
1416 rh_dbg("Port %d is enabled.\n", bPort
+ 1);
1418 /* 6. Reset the port */
1419 crisv10_ready_wait();
1422 IO_STATE(R_USB_COMMAND
, port_sel
, port1
):
1423 IO_STATE(R_USB_COMMAND
, port_sel
, port2
) ) |
1424 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1425 IO_STATE(R_USB_COMMAND
, busy
, no
) |
1426 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, nop
);
1427 rh_dbg("Port %d is resetting.\n", bPort
+ 1);
1429 /* 7. The USB specification says that we should wait for at least
1430 10ms for device recover */
1431 udelay(10500); /* 10,5ms blocking wait */
1433 crisv10_ready_wait();
1438 /* Check if the USB controller needs to be restarted. */
1439 if (restart_controller
)
1441 /* 8. Wait for the USB controller entering started state. */
1442 while (!(*R_USB_STATUS
& IO_STATE(R_USB_STATUS
, started
, yes
))) {}
1444 /* 9. Order the USB controller to running state. */
1445 crisv10_ready_wait();
1447 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1448 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1449 IO_STATE(R_USB_COMMAND
, busy
, no
) |
1450 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1452 /* 10. Wait for the USB controller reaching running state. */
1453 while (!(*R_USB_STATUS
& IO_STATE(R_USB_STATUS
, running
, yes
))) {}
1456 /* 11. Clear any controller or port satus interrupts before enabling
1461 /* Clear the port status interrupt of the reset port. */
1464 rh_dbg("Clearing port 1 interrupts\n");
1465 dummy
= *R_USB_RH_PORT_STATUS_1
;
1469 rh_dbg("Clearing port 2 interrupts\n");
1470 dummy
= *R_USB_RH_PORT_STATUS_2
;
1473 if (restart_controller
)
1475 /* The USB controller is restarted. Clear all interupts. */
1476 rh_dbg("Clearing all interrupts\n");
1477 dummy
= *R_USB_STATUS
;
1478 dummy
= *R_USB_RH_PORT_STATUS_1
;
1479 dummy
= *R_USB_RH_PORT_STATUS_2
;
1483 /* 12. Enable USB controller status and USB port status interrupts. */
1484 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, ctl_status
, set
);
1485 __asm__
__volatile__ (" nop");
1486 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, port_status
, set
);
1487 __asm__
__volatile__ (" nop");
1493 bUsbCommand
|= IO_STATE(R_USB_COMMAND
, port_cmd
, reset
);
1494 /* Select which port via the port_sel field */
1495 bUsbCommand
|= IO_FIELD(R_USB_COMMAND
, port_sel
, bPort
+1);
1497 /* Make sure the controller isn't busy. */
1498 crisv10_ready_wait();
1499 /* Send out the actual command to the USB controller */
1500 *R_USB_COMMAND
= bUsbCommand
;
1502 /* Wait a while for controller to first become started after port reset */
1503 udelay(12000); /* 12ms blocking wait */
1505 /* Make sure the controller isn't busy. */
1506 crisv10_ready_wait();
1508 /* If all enabled ports were disabled the host controller goes down into
1509 started mode, so we need to bring it back into the running state.
1510 (This is safe even if it's already in the running state.) */
1512 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1513 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1514 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1518 case USB_PORT_FEAT_SUSPEND
:
1519 rh_dbg("SetPortFeature: suspend\n");
1520 bUsbCommand
|= IO_STATE(R_USB_COMMAND
, port_cmd
, suspend
);
1523 case USB_PORT_FEAT_POWER
:
1524 rh_dbg("SetPortFeature: power\n");
1526 case USB_PORT_FEAT_C_CONNECTION
:
1527 rh_dbg("SetPortFeature: c_connection\n");
1529 case USB_PORT_FEAT_C_RESET
:
1530 rh_dbg("SetPortFeature: c_reset\n");
1532 case USB_PORT_FEAT_C_OVER_CURRENT
:
1533 rh_dbg("SetPortFeature: c_over_current\n");
1537 /* Select which port via the port_sel field */
1538 bUsbCommand
|= IO_FIELD(R_USB_COMMAND
, port_sel
, bPort
+1);
1540 /* Make sure the controller isn't busy. */
1541 crisv10_ready_wait();
1542 /* Send out the actual command to the USB controller */
1543 *R_USB_COMMAND
= bUsbCommand
;
1546 rh_dbg("SetPortFeature: unknown feature\n");
1552 int rh_clear_port_feature(__u8 bPort
, __u16 wFeature
) {
1554 case USB_PORT_FEAT_ENABLE
:
1555 rh_dbg("ClearPortFeature: enable\n");
1556 rh_disable_port(bPort
);
1558 case USB_PORT_FEAT_SUSPEND
:
1559 rh_dbg("ClearPortFeature: suspend\n");
1561 case USB_PORT_FEAT_POWER
:
1562 rh_dbg("ClearPortFeature: power\n");
1565 case USB_PORT_FEAT_C_ENABLE
:
1566 rh_dbg("ClearPortFeature: c_enable\n");
1568 case USB_PORT_FEAT_C_SUSPEND
:
1569 rh_dbg("ClearPortFeature: c_suspend\n");
1571 case USB_PORT_FEAT_C_CONNECTION
:
1572 rh_dbg("ClearPortFeature: c_connection\n");
1574 case USB_PORT_FEAT_C_OVER_CURRENT
:
1575 rh_dbg("ClearPortFeature: c_over_current\n");
1577 case USB_PORT_FEAT_C_RESET
:
1578 rh_dbg("ClearPortFeature: c_reset\n");
1581 rh
.wPortChange
[bPort
] &= ~(1 << (wFeature
- 16));
1584 rh_dbg("ClearPortFeature: unknown feature\n");
1592 /* Handle a suspend request for the root hub (called from hcd_driver) */
1593 static int rh_suspend_request(struct usb_hcd
*hcd
)
1595 return 0; /* no-op for now */
1598 /* Handle a resume request for the root hub (called from hcd_driver) */
1599 static int rh_resume_request(struct usb_hcd
*hcd
)
1601 return 0; /* no-op for now */
1603 #endif /* CONFIG_PM */
1607 /* Wrapper function for workaround port disable registers in USB controller */
1608 static void rh_disable_port(unsigned int port
) {
1609 volatile int timeout
= 10000;
1610 volatile char* usb_portx_disable
;
1613 usb_portx_disable
= R_USB_PORT1_DISABLE
;
1616 usb_portx_disable
= R_USB_PORT2_DISABLE
;
1619 /* Invalid port index */
1622 /* Set disable flag in special register */
1623 *usb_portx_disable
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, yes
);
1624 /* Wait until not enabled anymore */
1625 while((rh
.wPortStatusPrev
[port
] &
1626 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) &&
1629 /* clear disable flag in special register */
1630 *usb_portx_disable
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, no
);
1631 rh_info("Physical port %d disabled\n", port
+1);
1635 /******************************************************************/
1636 /* Transfer Controller (TC) functions */
1637 /******************************************************************/
1639 /* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
1641 To adjust it dynamically we would have to get an interrupt when we reach
1642 the end of the rx descriptor list, or when we get close to the end, and
1643 then allocate more descriptors. */
1644 #define NBR_OF_RX_DESC 512
1645 #define RX_DESC_BUF_SIZE 1024
1646 #define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
1649 /* Local variables for Transfer Controller */
1650 /* --------------------------------------- */
1652 /* This is a circular (double-linked) list of the active urbs for each epid.
1653 The head is never removed, and new urbs are linked onto the list as
1654 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
1655 functions instead (which includes spin_locks) */
1656 static struct list_head urb_list
[NBR_OF_EPIDS
];
1658 /* Read about the need and usage of this lock in submit_ctrl_urb. */
1659 /* Lock for URB lists for each EPID */
1660 static spinlock_t urb_list_lock
;
1662 /* Lock for EPID array register (R_USB_EPT_x) in Etrax */
1663 static spinlock_t etrax_epid_lock
;
1665 /* Lock for dma8 sub0 handling */
1666 static spinlock_t etrax_dma8_sub0_lock
;
1668 /* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
1669 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
1671 static volatile unsigned char RxBuf
[RX_BUF_SIZE
] __attribute__ ((aligned (32)));
1672 static volatile struct USB_IN_Desc RxDescList
[NBR_OF_RX_DESC
] __attribute__ ((aligned (4)));
1674 /* Pointers into RxDescList. */
1675 static volatile struct USB_IN_Desc
*myNextRxDesc
;
1676 static volatile struct USB_IN_Desc
*myLastRxDesc
;
1678 /* A zout transfer makes a memory access at the address of its buf pointer,
1679 which means that setting this buf pointer to 0 will cause an access to the
1680 flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
1681 (depending on DMA burst size) transfer.
1682 Instead, we set it to 1, and point it to this buffer. */
1683 static int zout_buffer
[4] __attribute__ ((aligned (4)));
1685 /* Cache for allocating new EP and SB descriptors. */
1686 static struct kmem_cache
*usb_desc_cache
;
1688 /* Cache for the data allocated in the isoc descr top half. */
1689 static struct kmem_cache
*isoc_compl_cache
;
1691 /* Cache for the data allocated when delayed finishing of URBs */
1692 static struct kmem_cache
*later_data_cache
;
1695 /* Counter to keep track of how many Isoc EP we have sat up. Used to enable
1696 and disable iso_eof interrupt. We only need these interrupts when we have
1697 Isoc data endpoints (consumes CPU cycles).
1698 FIXME: This could be more fine granular, so this interrupt is only enabled
1699 when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
1700 static int isoc_epid_counter
;
1702 /* Protecting wrapper functions for R_USB_EPT_x */
1703 /* -------------------------------------------- */
1704 static inline void etrax_epid_set(__u8 index
, __u32 data
) {
1705 unsigned long flags
;
1706 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1707 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1709 *R_USB_EPT_DATA
= data
;
1710 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1713 static inline void etrax_epid_clear_error(__u8 index
) {
1714 unsigned long flags
;
1715 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1716 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1719 ~(IO_MASK(R_USB_EPT_DATA
, error_count_in
) |
1720 IO_MASK(R_USB_EPT_DATA
, error_count_out
) |
1721 IO_MASK(R_USB_EPT_DATA
, error_code
));
1722 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1725 static inline void etrax_epid_set_toggle(__u8 index
, __u8 dirout
,
1727 unsigned long flags
;
1728 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1729 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1732 *R_USB_EPT_DATA
&= ~IO_MASK(R_USB_EPT_DATA
, t_out
);
1733 *R_USB_EPT_DATA
|= IO_FIELD(R_USB_EPT_DATA
, t_out
, toggle
);
1735 *R_USB_EPT_DATA
&= ~IO_MASK(R_USB_EPT_DATA
, t_in
);
1736 *R_USB_EPT_DATA
|= IO_FIELD(R_USB_EPT_DATA
, t_in
, toggle
);
1738 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1741 static inline __u8
etrax_epid_get_toggle(__u8 index
, __u8 dirout
) {
1742 unsigned long flags
;
1744 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1745 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1748 toggle
= IO_EXTRACT(R_USB_EPT_DATA
, t_out
, *R_USB_EPT_DATA
);
1750 toggle
= IO_EXTRACT(R_USB_EPT_DATA
, t_in
, *R_USB_EPT_DATA
);
1752 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1757 static inline __u32
etrax_epid_get(__u8 index
) {
1758 unsigned long flags
;
1760 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1761 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1763 data
= *R_USB_EPT_DATA
;
1764 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1771 /* Main functions for Transfer Controller */
1772 /* -------------------------------------- */
1774 /* Init structs, memories and lists used by Transfer Controller */
1775 int tc_init(struct usb_hcd
*hcd
) {
1777 /* Clear software state info for all epids */
1778 memset(epid_state
, 0, sizeof(struct etrax_epid
) * NBR_OF_EPIDS
);
1780 /* Set Invalid and Dummy as being in use and disabled */
1781 epid_state
[INVALID_EPID
].inuse
= 1;
1782 epid_state
[DUMMY_EPID
].inuse
= 1;
1783 epid_state
[INVALID_EPID
].disabled
= 1;
1784 epid_state
[DUMMY_EPID
].disabled
= 1;
1786 /* Clear counter for how many Isoc epids we have sat up */
1787 isoc_epid_counter
= 0;
1789 /* Initialize the urb list by initiating a head for each list.
1790 Also reset list hodling active URB for each epid */
1791 for (i
= 0; i
< NBR_OF_EPIDS
; i
++) {
1792 INIT_LIST_HEAD(&urb_list
[i
]);
1793 activeUrbList
[i
] = NULL
;
1796 /* Init lock for URB lists */
1797 spin_lock_init(&urb_list_lock
);
1798 /* Init lock for Etrax R_USB_EPT register */
1799 spin_lock_init(&etrax_epid_lock
);
1800 /* Init lock for Etrax dma8 sub0 handling */
1801 spin_lock_init(&etrax_dma8_sub0_lock
);
1803 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
1805 /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
1806 allocate SB descriptors from this cache. This is ok since
1807 sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
1808 usb_desc_cache
= kmem_cache_create("usb_desc_cache",
1809 sizeof(struct USB_EP_Desc
), 0,
1810 SLAB_HWCACHE_ALIGN
, 0);
1811 if(usb_desc_cache
== NULL
) {
1815 /* Create slab cache for speedy allocation of memory for isoc bottom-half
1816 interrupt handling */
1818 kmem_cache_create("isoc_compl_cache",
1819 sizeof(struct crisv10_isoc_complete_data
),
1820 0, SLAB_HWCACHE_ALIGN
, 0);
1821 if(isoc_compl_cache
== NULL
) {
1825 /* Create slab cache for speedy allocation of memory for later URB finish
1828 kmem_cache_create("later_data_cache",
1829 sizeof(struct urb_later_data
),
1830 0, SLAB_HWCACHE_ALIGN
, 0);
1831 if(later_data_cache
== NULL
) {
1836 /* Initiate the bulk start timer. */
1837 init_timer(&bulk_start_timer
);
1838 bulk_start_timer
.expires
= jiffies
+ BULK_START_TIMER_INTERVAL
;
1839 bulk_start_timer
.function
= tc_bulk_start_timer_func
;
1840 add_timer(&bulk_start_timer
);
1843 /* Initiate the bulk eot timer. */
1844 init_timer(&bulk_eot_timer
);
1845 bulk_eot_timer
.expires
= jiffies
+ BULK_EOT_TIMER_INTERVAL
;
1846 bulk_eot_timer
.function
= tc_bulk_eot_timer_func
;
1847 bulk_eot_timer
.data
= (unsigned long)hcd
;
1848 add_timer(&bulk_eot_timer
);
1853 /* Uninitialize all resources used by Transfer Controller */
1854 void tc_destroy(void) {
1856 /* Destroy all slab cache */
1857 kmem_cache_destroy(usb_desc_cache
);
1858 kmem_cache_destroy(isoc_compl_cache
);
1859 kmem_cache_destroy(later_data_cache
);
1862 del_timer(&bulk_start_timer
);
1863 del_timer(&bulk_eot_timer
);
1866 static void restart_dma8_sub0(void) {
1867 unsigned long flags
;
1868 spin_lock_irqsave(&etrax_dma8_sub0_lock
, flags
);
1869 /* Verify that the dma is not running */
1870 if ((*R_DMA_CH8_SUB0_CMD
& IO_MASK(R_DMA_CH8_SUB0_CMD
, cmd
)) == 0) {
1871 struct USB_EP_Desc
*ep
= (struct USB_EP_Desc
*)phys_to_virt(*R_DMA_CH8_SUB0_EP
);
1872 while (DUMMY_EPID
== IO_EXTRACT(USB_EP_command
, epid
, ep
->command
)) {
1873 ep
= (struct USB_EP_Desc
*)phys_to_virt(ep
->next
);
1875 /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */
1876 *R_DMA_CH8_SUB0_EP
= virt_to_phys(ep
);
1877 /* Restart the DMA */
1878 *R_DMA_CH8_SUB0_CMD
= IO_STATE(R_DMA_CH8_SUB0_CMD
, cmd
, start
);
1880 spin_unlock_irqrestore(&etrax_dma8_sub0_lock
, flags
);
1883 /* queue an URB with the transfer controller (called from hcd_driver) */
1884 static int tc_urb_enqueue(struct usb_hcd
*hcd
,
1891 unsigned long flags
;
1892 struct crisv10_urb_priv
*urb_priv
;
1893 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1896 if(!(crisv10_hcd
->running
)) {
1897 /* The USB Controller is not running, probably because no device is
1898 attached. No idea to enqueue URBs then */
1899 tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
1904 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
1905 /* Special case check for In Isoc transfers. Specification states that each
1906 In Isoc transfer consists of one packet and therefore it should fit into
1907 the transfer-buffer of an URB.
1908 We do the check here to be sure (an invalid scenario can be produced with
1909 parameters to the usbtest suite) */
1910 if(usb_pipeisoc(urb
->pipe
) && usb_pipein(urb
->pipe
) &&
1911 (urb
->transfer_buffer_length
< maxpacket
)) {
1912 tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb
->transfer_buffer_length
, maxpacket
);
1916 /* Check if there is a epid for URBs destination, if not this function
1918 epid
= tc_setup_epid(urb
->ep
, urb
, mem_flags
);
1920 tc_err("Failed setup epid:%d for URB:0x%x\n", epid
, (unsigned int)urb
);
1925 if(urb
== activeUrbList
[epid
]) {
1926 tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb
);
1930 if(urb_list_entry(urb
, epid
)) {
1931 tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb
);
1935 /* If we actively have flaged endpoint as disabled then refuse submition */
1936 if(epid_state
[epid
].disabled
) {
1940 /* Allocate and init HC-private data for URB */
1941 if(urb_priv_create(hcd
, urb
, epid
, mem_flags
) != 0) {
1945 urb_priv
= urb
->hcpriv
;
1947 /* Check if there is enough bandwidth for periodic transfer */
1948 if(usb_pipeint(urb
->pipe
) || usb_pipeisoc(urb
->pipe
)) {
1949 /* only check (and later claim) if not already claimed */
1950 if (urb_priv
->bandwidth
== 0) {
1951 bustime
= crisv10_usb_check_bandwidth(urb
->dev
, urb
);
1953 tc_err("Not enough periodic bandwidth\n");
1954 urb_priv_free(hcd
, urb
);
1961 tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
1962 (unsigned int)urb
, urb_priv
->urb_num
, epid
,
1963 pipe_to_str(urb
->pipe
), urb
->transfer_buffer_length
);
1965 /* Create and link SBs required for this URB */
1966 retval
= create_sb_for_urb(urb
, mem_flags
);
1968 tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb
,
1970 urb_priv_free(hcd
, urb
);
1975 /* Init intr EP pool if this URB is a INTR transfer. This pool is later
1976 used when inserting EPs in the TxIntrEPList. We do the alloc here
1977 so we can't run out of memory later */
1978 if(usb_pipeint(urb
->pipe
)) {
1979 retval
= init_intr_urb(urb
, mem_flags
);
1981 tc_warn("Failed to init Intr URB\n");
1982 urb_priv_free(hcd
, urb
);
1988 /* Disable other access when inserting USB */
1989 local_irq_save(flags
);
1991 /* Claim bandwidth, if needed */
1993 crisv10_usb_claim_bandwidth(urb
->dev
,
1996 (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
));
1999 /* Add URB to EP queue */
2000 urb_list_add(urb
, epid
, mem_flags
);
2002 if(usb_pipeisoc(urb
->pipe
)) {
2003 /* Special processing of Isoc URBs. */
2004 tc_dma_process_isoc_urb(urb
);
2006 /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
2007 tc_dma_process_queue(epid
);
2010 local_irq_restore(flags
);
2016 /* remove an URB from the transfer controller queues (called from hcd_driver)*/
2017 static int tc_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
) {
2018 struct crisv10_urb_priv
*urb_priv
;
2019 unsigned long flags
;
2023 /* Disable interrupts here since a descriptor interrupt for the isoc epid
2024 will modify the sb list. This could possibly be done more granular, but
2025 urb_dequeue should not be used frequently anyway.
2027 local_irq_save(flags
);
2029 urb
->status
= status
;
2030 urb_priv
= urb
->hcpriv
;
2033 /* This happens if a device driver calls unlink on an urb that
2034 was never submitted (lazy driver) or if the urb was completed
2035 while dequeue was being called. */
2036 tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb
);
2037 local_irq_restore(flags
);
2040 epid
= urb_priv
->epid
;
2042 tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2043 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2044 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2045 str_type(urb
->pipe
), epid
, urb
->status
,
2046 (urb_priv
->later_data
) ? "later-sched" : "");
2048 /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
2049 that isn't active can be dequeued by just removing it from the queue */
2050 if(usb_pipebulk(urb
->pipe
) || usb_pipecontrol(urb
->pipe
) ||
2051 usb_pipeint(urb
->pipe
)) {
2053 /* Check if URB haven't gone further than the queue */
2054 if(urb
!= activeUrbList
[epid
]) {
2055 ASSERT(urb_priv
->later_data
== NULL
);
2056 tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
2057 " (not active)\n", (unsigned int)urb
, urb_priv
->urb_num
,
2058 str_dir(urb
->pipe
), str_type(urb
->pipe
), epid
);
2060 /* Finish the URB with error status from USB core */
2061 tc_finish_urb(hcd
, urb
, urb
->status
);
2062 local_irq_restore(flags
);
2067 /* Set URB status to Unlink for handling when interrupt comes. */
2068 urb_priv
->urb_state
= UNLINK
;
2070 /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
2071 switch(usb_pipetype(urb
->pipe
)) {
2073 /* Check if EP still is enabled */
2074 if (TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2075 /* The EP was enabled, disable it. */
2076 TxBulkEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2078 /* Kicking dummy list out of the party. */
2079 TxBulkEPList
[epid
].next
= virt_to_phys(&TxBulkEPList
[(epid
+ 1) % NBR_OF_EPIDS
]);
2082 /* Check if EP still is enabled */
2083 if (TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2084 /* The EP was enabled, disable it. */
2085 TxCtrlEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2088 case PIPE_ISOCHRONOUS
:
2089 /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
2090 finish_isoc_urb(). Because there might the case when URB is dequeued
2091 but there are other valid URBs waiting */
2093 /* Check if In Isoc EP still is enabled */
2094 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2095 /* The EP was enabled, disable it. */
2096 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2099 case PIPE_INTERRUPT
:
2100 /* Special care is taken for interrupt URBs. EPs are unlinked in
2107 /* Asynchronous unlink, finish the URB later from scheduled or other
2108 event (data finished, error) */
2109 tc_finish_urb_later(hcd
, urb
, urb
->status
);
2111 local_irq_restore(flags
);
2117 static void tc_sync_finish_epid(struct usb_hcd
*hcd
, int epid
) {
2118 volatile int timeout
= 10000;
2120 struct crisv10_urb_priv
* urb_priv
;
2121 unsigned long flags
;
2123 volatile struct USB_EP_Desc
*first_ep
; /* First EP in the list. */
2124 volatile struct USB_EP_Desc
*curr_ep
; /* Current EP, the iterator. */
2125 volatile struct USB_EP_Desc
*next_ep
; /* The EP after current. */
2127 int type
= epid_state
[epid
].type
;
2129 /* Setting this flag will cause enqueue() to return -ENOENT for new
2130 submitions on this endpoint and finish_urb() wont process queue further */
2131 epid_state
[epid
].disabled
= 1;
2135 /* Check if EP still is enabled */
2136 if (TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2137 /* The EP was enabled, disable it. */
2138 TxBulkEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2139 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid
);
2141 /* Do busy-wait until DMA not using this EP descriptor anymore */
2142 while((*R_DMA_CH8_SUB0_EP
==
2143 virt_to_phys(&TxBulkEPList
[epid
])) &&
2150 /* Check if EP still is enabled */
2151 if (TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2152 /* The EP was enabled, disable it. */
2153 TxCtrlEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2154 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid
);
2156 /* Do busy-wait until DMA not using this EP descriptor anymore */
2157 while((*R_DMA_CH8_SUB1_EP
==
2158 virt_to_phys(&TxCtrlEPList
[epid
])) &&
2163 case PIPE_INTERRUPT
:
2164 local_irq_save(flags
);
2165 /* Disable all Intr EPs belonging to epid */
2166 first_ep
= &TxIntrEPList
[0];
2169 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
2170 if (IO_EXTRACT(USB_EP_command
, epid
, next_ep
->command
) == epid
) {
2172 next_ep
->command
&= ~IO_MASK(USB_EP_command
, enable
);
2174 curr_ep
= phys_to_virt(curr_ep
->next
);
2175 } while (curr_ep
!= first_ep
);
2177 local_irq_restore(flags
);
2180 case PIPE_ISOCHRONOUS
:
2181 /* Check if EP still is enabled */
2182 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2183 tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid
);
2184 /* The EP was enabled, disable it. */
2185 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2187 while((*R_DMA_CH8_SUB3_EP
== virt_to_phys(&TxIsocEPList
[epid
])) &&
2193 local_irq_save(flags
);
2195 /* Finish if there is active URB for this endpoint */
2196 if(activeUrbList
[epid
] != NULL
) {
2197 urb
= activeUrbList
[epid
];
2198 urb_priv
= urb
->hcpriv
;
2200 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2201 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2202 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2203 str_type(urb
->pipe
), epid
, urb
->status
,
2204 (urb_priv
->later_data
) ? "later-sched" : "");
2206 tc_finish_urb(hcd
, activeUrbList
[epid
], -ENOENT
);
2207 ASSERT(activeUrbList
[epid
] == NULL
);
2210 /* Finish any queued URBs for this endpoint. There won't be any resubmitions
2211 because epid_disabled causes enqueue() to fail for this endpoint */
2212 while((urb
= urb_list_first(epid
)) != NULL
) {
2213 urb_priv
= urb
->hcpriv
;
2216 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2217 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2218 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2219 str_type(urb
->pipe
), epid
, urb
->status
,
2220 (urb_priv
->later_data
) ? "later-sched" : "");
2222 tc_finish_urb(hcd
, urb
, -ENOENT
);
2224 epid_state
[epid
].disabled
= 0;
2225 local_irq_restore(flags
);
2228 /* free resources associated with an endpoint (called from hcd_driver) */
2229 static void tc_endpoint_disable(struct usb_hcd
*hcd
,
2230 struct usb_host_endpoint
*ep
) {
2232 /* Only free epid if it has been allocated. We get two endpoint_disable
2233 requests for ctrl endpoints so ignore the second one */
2234 if(ep
->hcpriv
!= NULL
) {
2235 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2236 int epid
= ep_priv
->epid
;
2237 tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
2238 (unsigned int)ep
, (unsigned int)ep
->hcpriv
,
2239 endpoint_to_str(&(ep
->desc
)), epid
);
2241 tc_sync_finish_epid(hcd
, epid
);
2243 ASSERT(activeUrbList
[epid
] == NULL
);
2244 ASSERT(list_empty(&urb_list
[epid
]));
2248 tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep
,
2249 (unsigned int)ep
->hcpriv
, endpoint_to_str(&(ep
->desc
)));
2254 static void tc_finish_urb_later_proc(struct work_struct
* work
) {
2255 unsigned long flags
;
2256 struct urb_later_data
* uld
;
2258 local_irq_save(flags
);
2259 uld
= container_of(work
, struct urb_later_data
, dws
.work
);
2260 if(uld
->urb
== NULL
) {
2261 late_dbg("Later finish of URB = NULL (allready finished)\n");
2263 struct crisv10_urb_priv
* urb_priv
= uld
->urb
->hcpriv
;
2265 if(urb_priv
->urb_num
== uld
->urb_num
) {
2266 late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld
->urb
),
2268 if(uld
->status
!= uld
->urb
->status
) {
2269 errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
2270 uld
->urb
->status
, uld
->status
);
2272 if(uld
!= urb_priv
->later_data
) {
2273 panic("Scheduled uld not same as URBs uld\n");
2275 tc_finish_urb(uld
->hcd
, uld
->urb
, uld
->status
);
2277 late_warn("Ignoring later finish of URB:0x%x[%d]"
2278 ", urb_num doesn't match current URB:0x%x[%d]",
2279 (unsigned int)(uld
->urb
), uld
->urb_num
,
2280 (unsigned int)(uld
->urb
), urb_priv
->urb_num
);
2283 local_irq_restore(flags
);
2284 kmem_cache_free(later_data_cache
, uld
);
2287 static void tc_finish_urb_later(struct usb_hcd
*hcd
, struct urb
*urb
,
2289 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2290 struct urb_later_data
* uld
;
2294 if(urb_priv
->later_data
!= NULL
) {
2295 /* Later-finish allready scheduled for this URB, just update status to
2296 return when finishing later */
2297 errno_dbg("Later-finish schedule change URB status:%d with new"
2298 " status:%d\n", urb_priv
->later_data
->status
, status
);
2300 urb_priv
->later_data
->status
= status
;
2304 uld
= kmem_cache_alloc(later_data_cache
, GFP_ATOMIC
);
2309 uld
->urb_num
= urb_priv
->urb_num
;
2310 uld
->status
= status
;
2312 INIT_DELAYED_WORK(&uld
->dws
, tc_finish_urb_later_proc
);
2313 urb_priv
->later_data
= uld
;
2315 /* Schedule the finishing of the URB to happen later */
2316 schedule_delayed_work(&uld
->dws
, LATER_TIMER_DELAY
);
2319 static void tc_finish_isoc_urb(struct usb_hcd
*hcd
, struct urb
*urb
,
2322 static void tc_finish_urb(struct usb_hcd
*hcd
, struct urb
*urb
, int status
) {
2323 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
2324 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2330 ASSERT(urb_priv
!= NULL
);
2331 epid
= urb_priv
->epid
;
2332 urb_num
= urb_priv
->urb_num
;
2334 if(urb
!= activeUrbList
[epid
]) {
2335 if(urb_list_entry(urb
, epid
)) {
2336 /* Remove this URB from the list. Only happens when URB are finished
2337 before having been processed (dequeing) */
2338 urb_list_del(urb
, epid
);
2340 tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
2341 " epid:%d\n", (unsigned int)urb
, urb_num
, epid
);
2345 /* Cancel any pending later-finish of this URB */
2346 if(urb_priv
->later_data
) {
2347 urb_priv
->later_data
->urb
= NULL
;
2350 /* For an IN pipe, we always set the actual length, regardless of whether
2351 there was an error or not (which means the device driver can use the data
2353 if(usb_pipein(urb
->pipe
)) {
2354 urb
->actual_length
= urb_priv
->rx_offset
;
2356 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2358 if (status
== 0 && urb
->status
== -EINPROGRESS
) {
2359 urb
->actual_length
= urb
->transfer_buffer_length
;
2361 /* We wouldn't know of any partial writes if there was an error. */
2362 urb
->actual_length
= 0;
2367 /* URB status mangling */
2368 if(urb
->status
== -EINPROGRESS
) {
2369 /* The USB core hasn't changed the status, let's set our finish status */
2370 urb
->status
= status
;
2372 if ((status
== 0) && (urb
->transfer_flags
& URB_SHORT_NOT_OK
) &&
2373 usb_pipein(urb
->pipe
) &&
2374 (urb
->actual_length
!= urb
->transfer_buffer_length
)) {
2375 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
2376 max length) is to be treated as an error. */
2377 errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
2378 " data:%d\n", (unsigned int)urb
, urb_num
,
2379 urb
->actual_length
);
2380 urb
->status
= -EREMOTEIO
;
2383 if(urb_priv
->urb_state
== UNLINK
) {
2384 /* URB has been requested to be unlinked asynchronously */
2385 urb
->status
= -ECONNRESET
;
2386 errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
2387 (unsigned int)urb
, urb_num
, urb
->status
);
2390 /* The USB Core wants to signal some error via the URB, pass it through */
2393 /* use completely different finish function for Isoc URBs */
2394 if(usb_pipeisoc(urb
->pipe
)) {
2395 tc_finish_isoc_urb(hcd
, urb
, status
);
2399 /* Do special unlinking of EPs for Intr traffic */
2400 if(usb_pipeint(urb
->pipe
)) {
2401 tc_dma_unlink_intr_urb(urb
);
2404 /* Release allocated bandwidth for periodic transfers */
2405 if(usb_pipeint(urb
->pipe
) || usb_pipeisoc(urb
->pipe
))
2406 crisv10_usb_release_bandwidth(hcd
,
2407 usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
,
2408 urb_priv
->bandwidth
);
2410 /* This URB is active on EP */
2411 if(urb
== activeUrbList
[epid
]) {
2412 /* We need to fiddle with the toggle bits because the hardware doesn't do
2414 toggle
= etrax_epid_get_toggle(epid
, usb_pipeout(urb
->pipe
));
2415 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
2416 usb_pipeout(urb
->pipe
), toggle
);
2418 /* Checks for Ctrl and Bulk EPs */
2419 switch(usb_pipetype(urb
->pipe
)) {
2421 /* Check so Bulk EP realy is disabled before finishing active URB */
2422 ASSERT((TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) ==
2423 IO_STATE(USB_EP_command
, enable
, no
));
2424 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2426 TxBulkEPList
[epid
].sub
= 0;
2427 /* No need to wait for the DMA before changing the next pointer.
2428 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2429 the last one (INVALID_EPID) for actual traffic. */
2430 TxBulkEPList
[epid
].next
=
2431 virt_to_phys(&TxBulkEPList
[(epid
+ 1) % NBR_OF_EPIDS
]);
2434 /* Check so Ctrl EP realy is disabled before finishing active URB */
2435 ASSERT((TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) ==
2436 IO_STATE(USB_EP_command
, enable
, no
));
2437 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2439 TxCtrlEPList
[epid
].sub
= 0;
2444 /* Free HC-private URB data*/
2445 urb_priv_free(hcd
, urb
);
2448 errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2449 (unsigned int)urb
, urb_num
, str_dir(urb
->pipe
),
2450 str_type(urb
->pipe
), urb
->actual_length
, urb
->status
);
2452 tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2453 (unsigned int)urb
, urb_num
, str_dir(urb
->pipe
),
2454 str_type(urb
->pipe
), urb
->actual_length
, urb
->status
);
2457 /* If we just finished an active URB, clear active pointer. */
2458 if (urb
== activeUrbList
[epid
]) {
2459 /* Make URB not active on EP anymore */
2460 activeUrbList
[epid
] = NULL
;
2462 if(urb
->status
== 0) {
2463 /* URB finished sucessfully, process queue to see if there are any more
2464 URBs waiting before we call completion function.*/
2465 if(crisv10_hcd
->running
) {
2466 /* Only process queue if USB controller is running */
2467 tc_dma_process_queue(epid
);
2469 tc_warn("No processing of queue for epid:%d, USB Controller not"
2470 " running\n", epid
);
2475 /* Hand the URB from HCD to its USB device driver, using its completion
2477 usb_hcd_giveback_urb (hcd
, urb
, status
);
2479 /* Check the queue once more if the URB returned with error, because we
2480 didn't do it before the completion function because the specification
2481 states that the queue should not restart until all it's unlinked
2482 URBs have been fully retired, with the completion functions run */
2483 if(crisv10_hcd
->running
) {
2484 /* Only process queue if USB controller is running */
2485 tc_dma_process_queue(epid
);
2487 tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
2494 static void tc_finish_isoc_urb(struct usb_hcd
*hcd
, struct urb
*urb
,
2496 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2498 volatile int timeout
= 10000;
2502 epid
= urb_priv
->epid
;
2504 ASSERT(usb_pipeisoc(urb
->pipe
));
2506 /* Set that all isoc packets have status and length set before
2507 completing the urb. */
2508 for (i
= urb_priv
->isoc_packet_counter
; i
< urb
->number_of_packets
; i
++){
2509 urb
->iso_frame_desc
[i
].actual_length
= 0;
2510 urb
->iso_frame_desc
[i
].status
= -EPROTO
;
2513 /* Check if the URB is currently active (done or error) */
2514 if(urb
== activeUrbList
[epid
]) {
2515 /* Check if there are another In Isoc URB queued for this epid */
2516 if (!list_empty(&urb_list
[epid
])&& !epid_state
[epid
].disabled
) {
2517 /* Move it from queue to active and mark it started so Isoc transfers
2518 won't be interrupted.
2519 All Isoc URBs data transfers are already added to DMA lists so we
2520 don't have to insert anything in DMA lists here. */
2521 activeUrbList
[epid
] = urb_list_first(epid
);
2522 ((struct crisv10_urb_priv
*)(activeUrbList
[epid
]->hcpriv
))->urb_state
=
2524 urb_list_del(activeUrbList
[epid
], epid
);
2527 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2528 " status:%d, new waiting URB:0x%x[%d]\n",
2529 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2530 str_type(urb
->pipe
), urb_priv
->isoc_packet_counter
,
2531 urb
->number_of_packets
, urb
->status
,
2532 (unsigned int)activeUrbList
[epid
],
2533 ((struct crisv10_urb_priv
*)(activeUrbList
[epid
]->hcpriv
))->urb_num
);
2536 } else { /* No other URB queued for this epid */
2538 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2539 " status:%d, no new URB waiting\n",
2540 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2541 str_type(urb
->pipe
), urb_priv
->isoc_packet_counter
,
2542 urb
->number_of_packets
, urb
->status
);
2545 /* Check if EP is still enabled, then shut it down. */
2546 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2547 isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid
);
2549 /* Should only occur for In Isoc EPs where SB isn't consumed. */
2550 ASSERT(usb_pipein(urb
->pipe
));
2552 /* Disable it and wait for it to stop */
2553 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2555 /* Ah, the luxury of busy-wait. */
2556 while((*R_DMA_CH8_SUB3_EP
== virt_to_phys(&TxIsocEPList
[epid
])) &&
2560 /* Unlink SB to say that epid is finished. */
2561 TxIsocEPList
[epid
].sub
= 0;
2562 TxIsocEPList
[epid
].hw_len
= 0;
2564 /* No URB active for EP anymore */
2565 activeUrbList
[epid
] = NULL
;
2567 } else { /* Finishing of not active URB (queued up with SBs thought) */
2568 isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
2569 " SB queued but not active\n",
2570 (unsigned int)urb
, str_dir(urb
->pipe
),
2571 urb_priv
->isoc_packet_counter
, urb
->number_of_packets
,
2573 if(usb_pipeout(urb
->pipe
)) {
2574 /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
2575 struct USB_SB_Desc
*iter_sb
, *prev_sb
, *next_sb
;
2577 iter_sb
= TxIsocEPList
[epid
].sub
?
2578 phys_to_virt(TxIsocEPList
[epid
].sub
) : 0;
2581 /* SB that is linked before this URBs first SB */
2582 while (iter_sb
&& (iter_sb
!= urb_priv
->first_sb
)) {
2584 iter_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2588 /* Unlink of the URB currently being transmitted. */
2590 iter_sb
= TxIsocEPList
[epid
].sub
? phys_to_virt(TxIsocEPList
[epid
].sub
) : 0;
2593 while (iter_sb
&& (iter_sb
!= urb_priv
->last_sb
)) {
2594 iter_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2598 next_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2600 /* This should only happen if the DMA has completed
2601 processing the SB list for this EP while interrupts
2603 isoc_dbg("Isoc urb not found, already sent?\n");
2607 prev_sb
->next
= next_sb
? virt_to_phys(next_sb
) : 0;
2609 TxIsocEPList
[epid
].sub
= next_sb
? virt_to_phys(next_sb
) : 0;
2614 /* Free HC-private URB data*/
2615 bandwidth
= urb_priv
->bandwidth
;
2616 urb_priv_free(hcd
, urb
);
2618 crisv10_usb_release_bandwidth(hcd
, usb_pipeisoc(urb
->pipe
), bandwidth
);
2620 /* Hand the URB from HCD to its USB device driver, using its completion
2622 usb_hcd_giveback_urb (hcd
, urb
, status
);
2625 static __u32 urb_num
= 0;
2627 /* allocate and initialize URB private data */
2628 static int urb_priv_create(struct usb_hcd
*hcd
, struct urb
*urb
, int epid
,
2630 struct crisv10_urb_priv
*urb_priv
;
2632 urb_priv
= kmalloc(sizeof *urb_priv
, mem_flags
);
2635 memset(urb_priv
, 0, sizeof *urb_priv
);
2637 urb_priv
->epid
= epid
;
2638 urb_priv
->urb_state
= NOT_STARTED
;
2640 urb
->hcpriv
= urb_priv
;
2641 /* Assign URB a sequence number, and increment counter */
2642 urb_priv
->urb_num
= urb_num
;
2644 urb_priv
->bandwidth
= 0;
2648 /* free URB private data */
2649 static void urb_priv_free(struct usb_hcd
*hcd
, struct urb
*urb
) {
2651 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2652 ASSERT(urb_priv
!= 0);
2654 /* Check it has any SBs linked that needs to be freed*/
2655 if(urb_priv
->first_sb
!= NULL
) {
2656 struct USB_SB_Desc
*next_sb
, *first_sb
, *last_sb
;
2658 first_sb
= urb_priv
->first_sb
;
2659 last_sb
= urb_priv
->last_sb
;
2661 while(first_sb
!= last_sb
) {
2662 next_sb
= (struct USB_SB_Desc
*)phys_to_virt(first_sb
->next
);
2663 kmem_cache_free(usb_desc_cache
, first_sb
);
2667 kmem_cache_free(usb_desc_cache
, last_sb
);
2671 /* Check if it has any EPs in its Intr pool that also needs to be freed */
2672 if(urb_priv
->intr_ep_pool_length
> 0) {
2673 for(i
= 0; i
< urb_priv
->intr_ep_pool_length
; i
++) {
2674 kfree(urb_priv
->intr_ep_pool
[i
]);
2677 tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
2678 urb_priv->intr_ep_pool_length, (unsigned int)urb);
2686 static int ep_priv_create(struct usb_host_endpoint
*ep
, int mem_flags
) {
2687 struct crisv10_ep_priv
*ep_priv
;
2689 ep_priv
= kmalloc(sizeof *ep_priv
, mem_flags
);
2692 memset(ep_priv
, 0, sizeof *ep_priv
);
2694 ep
->hcpriv
= ep_priv
;
2698 static void ep_priv_free(struct usb_host_endpoint
*ep
) {
2699 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2706 * usb_check_bandwidth():
2708 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
2709 * bustime is from calc_bus_time(), but converted to microseconds.
2711 * returns <bustime in us> if successful,
2712 * or -ENOSPC if bandwidth request fails.
2715 * This initial implementation does not use Endpoint.bInterval
2716 * in managing bandwidth allocation.
2717 * It probably needs to be expanded to use Endpoint.bInterval.
2718 * This can be done as a later enhancement (correction).
2720 * This will also probably require some kind of
2721 * frame allocation tracking...meaning, for example,
2722 * that if multiple drivers request interrupts every 10 USB frames,
2723 * they don't all have to be allocated at
2724 * frame numbers N, N+10, N+20, etc. Some of them could be at
2725 * N+11, N+21, N+31, etc., and others at
2726 * N+12, N+22, N+32, etc.
2728 * Similarly for isochronous transfers...
2730 * Individual HCDs can schedule more directly ... this logic
2731 * is not correct for high speed transfers.
2733 static int crisv10_usb_check_bandwidth(
2734 struct usb_device
*dev
,
2737 unsigned int pipe
= urb
->pipe
;
2739 int is_in
= usb_pipein (pipe
);
2740 int is_iso
= usb_pipeisoc (pipe
);
2741 int old_alloc
= dev
->bus
->bandwidth_allocated
;
2744 bustime
= NS_TO_US (usb_calc_bus_time (dev
->speed
, is_in
, is_iso
,
2745 usb_maxpacket (dev
, pipe
, !is_in
)));
2747 bustime
/= urb
->number_of_packets
;
2749 new_alloc
= old_alloc
+ (int) bustime
;
2750 if (new_alloc
> FRAME_TIME_MAX_USECS_ALLOC
) {
2751 dev_dbg (&dev
->dev
, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n",
2752 old_alloc
, bustime
, new_alloc
);
2753 bustime
= -ENOSPC
; /* report error */
2760 * usb_claim_bandwidth - records bandwidth for a periodic transfer
2761 * @dev: source/target of request
2762 * @urb: request (urb->dev == dev)
2763 * @bustime: bandwidth consumed, in (average) microseconds per frame
2764 * @isoc: true iff the request is isochronous
2766 * HCDs are expected not to overcommit periodic bandwidth, and to record such
2767 * reservations whenever endpoints are added to the periodic schedule.
2769 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
2770 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
2771 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
2772 * large its periodic schedule is.
2774 static void crisv10_usb_claim_bandwidth(
2775 struct usb_device
*dev
,
2776 struct urb
*urb
, int bustime
, int isoc
)
2778 dev
->bus
->bandwidth_allocated
+= bustime
;
2780 dev
->bus
->bandwidth_isoc_reqs
++;
2782 dev
->bus
->bandwidth_int_reqs
++;
2783 struct crisv10_urb_priv
*urb_priv
;
2784 urb_priv
= urb
->hcpriv
;
2785 urb_priv
->bandwidth
= bustime
;
2789 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
2790 * @hcd: host controller
2791 * @isoc: true iff the request is isochronous
2792 * @bandwidth: bandwidth returned
2794 * This records that previously allocated bandwidth has been released.
2795 * Bandwidth is released when endpoints are removed from the host controller's
2796 * periodic schedule.
2798 static void crisv10_usb_release_bandwidth(
2799 struct usb_hcd
*hcd
,
2803 hcd_to_bus(hcd
)->bandwidth_allocated
-= bandwidth
;
2805 hcd_to_bus(hcd
)->bandwidth_isoc_reqs
--;
2807 hcd_to_bus(hcd
)->bandwidth_int_reqs
--;
2811 /* EPID handling functions, managing EP-list in Etrax through wrappers */
2812 /* ------------------------------------------------------------------- */
2814 /* Sets up a new EPID for an endpoint or returns existing if found */
2815 static int tc_setup_epid(struct usb_host_endpoint
*ep
, struct urb
*urb
,
2818 char devnum
, endpoint
, out_traffic
, slow
;
2821 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2825 /* Check if a valid epid already is setup for this endpoint */
2826 if(ep_priv
!= NULL
) {
2827 return ep_priv
->epid
;
2830 /* We must find and initiate a new epid for this urb. */
2831 epid
= tc_allocate_epid();
2834 /* Failed to allocate a new epid. */
2839 /* We now have a new epid to use. Claim it. */
2840 epid_state
[epid
].inuse
= 1;
2842 /* Init private data for new endpoint */
2843 if(ep_priv_create(ep
, mem_flags
) != 0) {
2846 ep_priv
= ep
->hcpriv
;
2847 ep_priv
->epid
= epid
;
2849 devnum
= usb_pipedevice(urb
->pipe
);
2850 endpoint
= usb_pipeendpoint(urb
->pipe
);
2851 slow
= (urb
->dev
->speed
== USB_SPEED_LOW
);
2852 maxlen
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
2854 if (usb_pipetype(urb
->pipe
) == PIPE_CONTROL
) {
2855 /* We want both IN and OUT control traffic to be put on the same
2859 out_traffic
= usb_pipeout(urb
->pipe
);
2862 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
2863 epid_data
= IO_STATE(R_USB_EPT_DATA_ISO
, valid
, yes
) |
2864 /* FIXME: Change any to the actual port? */
2865 IO_STATE(R_USB_EPT_DATA_ISO
, port
, any
) |
2866 IO_FIELD(R_USB_EPT_DATA_ISO
, max_len
, maxlen
) |
2867 IO_FIELD(R_USB_EPT_DATA_ISO
, ep
, endpoint
) |
2868 IO_FIELD(R_USB_EPT_DATA_ISO
, dev
, devnum
);
2869 etrax_epid_iso_set(epid
, epid_data
);
2871 epid_data
= IO_STATE(R_USB_EPT_DATA
, valid
, yes
) |
2872 IO_FIELD(R_USB_EPT_DATA
, low_speed
, slow
) |
2873 /* FIXME: Change any to the actual port? */
2874 IO_STATE(R_USB_EPT_DATA
, port
, any
) |
2875 IO_FIELD(R_USB_EPT_DATA
, max_len
, maxlen
) |
2876 IO_FIELD(R_USB_EPT_DATA
, ep
, endpoint
) |
2877 IO_FIELD(R_USB_EPT_DATA
, dev
, devnum
);
2878 etrax_epid_set(epid
, epid_data
);
2881 epid_state
[epid
].out_traffic
= out_traffic
;
2882 epid_state
[epid
].type
= usb_pipetype(urb
->pipe
);
2884 tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
2885 (unsigned int)ep
, epid
, devnum
, endpoint
, maxlen
,
2886 str_type(urb
->pipe
), out_traffic
? "out" : "in",
2887 slow
? "low" : "full");
2889 /* Enable Isoc eof interrupt if we set up the first Isoc epid */
2890 if(usb_pipeisoc(urb
->pipe
)) {
2891 isoc_epid_counter
++;
2892 if(isoc_epid_counter
== 1) {
2893 isoc_warn("Enabled Isoc eof interrupt\n");
2894 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, iso_eof
, set
);
2902 static void tc_free_epid(struct usb_host_endpoint
*ep
) {
2903 unsigned long flags
;
2904 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2906 volatile int timeout
= 10000;
2910 if (ep_priv
== NULL
) {
2911 tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep
);
2916 epid
= ep_priv
->epid
;
2918 /* Disable Isoc eof interrupt if we free the last Isoc epid */
2919 if(epid_isoc(epid
)) {
2920 ASSERT(isoc_epid_counter
> 0);
2921 isoc_epid_counter
--;
2922 if(isoc_epid_counter
== 0) {
2923 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, iso_eof
, clr
);
2924 isoc_warn("Disabled Isoc eof interrupt\n");
2928 /* Take lock manualy instead of in epid_x_x wrappers,
2929 because we need to be polling here */
2930 spin_lock_irqsave(&etrax_epid_lock
, flags
);
2932 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
2934 while((*R_USB_EPT_DATA
& IO_MASK(R_USB_EPT_DATA
, hold
)) &&
2936 /* This will, among other things, set the valid field to 0. */
2937 *R_USB_EPT_DATA
= 0;
2938 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
2940 /* Free resource in software state info list */
2941 epid_state
[epid
].inuse
= 0;
2943 /* Free private endpoint data */
2949 static int tc_allocate_epid(void) {
2952 for (i
= 0; i
< NBR_OF_EPIDS
; i
++) {
2953 if (!epid_inuse(i
)) {
2959 tc_warn("Found no free epids\n");
2965 /* Wrappers around the list functions (include/linux/list.h). */
2966 /* ---------------------------------------------------------- */
2967 static inline int __urb_list_empty(int epid
) {
2969 retval
= list_empty(&urb_list
[epid
]);
2973 /* Returns first urb for this epid, or NULL if list is empty. */
2974 static inline struct urb
*urb_list_first(int epid
) {
2975 unsigned long flags
;
2976 struct urb
*first_urb
= 0;
2977 spin_lock_irqsave(&urb_list_lock
, flags
);
2978 if (!__urb_list_empty(epid
)) {
2979 /* Get the first urb (i.e. head->next). */
2980 urb_entry_t
*urb_entry
= list_entry((&urb_list
[epid
])->next
, urb_entry_t
, list
);
2981 first_urb
= urb_entry
->urb
;
2983 spin_unlock_irqrestore(&urb_list_lock
, flags
);
2987 /* Adds an urb_entry last in the list for this epid. */
2988 static inline void urb_list_add(struct urb
*urb
, int epid
, int mem_flags
) {
2989 unsigned long flags
;
2990 urb_entry_t
*urb_entry
= (urb_entry_t
*)kmalloc(sizeof(urb_entry_t
), mem_flags
);
2993 urb_entry
->urb
= urb
;
2994 spin_lock_irqsave(&urb_list_lock
, flags
);
2995 list_add_tail(&urb_entry
->list
, &urb_list
[epid
]);
2996 spin_unlock_irqrestore(&urb_list_lock
, flags
);
2999 /* Search through the list for an element that contains this urb. (The list
3000 is expected to be short and the one we are about to delete will often be
3001 the first in the list.)
3002 Should be protected by spin_locks in calling function */
3003 static inline urb_entry_t
*__urb_list_entry(struct urb
*urb
, int epid
) {
3004 struct list_head
*entry
;
3005 struct list_head
*tmp
;
3006 urb_entry_t
*urb_entry
;
3008 list_for_each_safe(entry
, tmp
, &urb_list
[epid
]) {
3009 urb_entry
= list_entry(entry
, urb_entry_t
, list
);
3011 ASSERT(urb_entry
->urb
);
3013 if (urb_entry
->urb
== urb
) {
3020 /* Same function as above but for global use. Protects list by spinlock */
3021 static inline urb_entry_t
*urb_list_entry(struct urb
*urb
, int epid
) {
3022 unsigned long flags
;
3023 urb_entry_t
*urb_entry
;
3024 spin_lock_irqsave(&urb_list_lock
, flags
);
3025 urb_entry
= __urb_list_entry(urb
, epid
);
3026 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3030 /* Delete an urb from the list. */
3031 static inline void urb_list_del(struct urb
*urb
, int epid
) {
3032 unsigned long flags
;
3033 urb_entry_t
*urb_entry
;
3035 /* Delete entry and free. */
3036 spin_lock_irqsave(&urb_list_lock
, flags
);
3037 urb_entry
= __urb_list_entry(urb
, epid
);
3040 list_del(&urb_entry
->list
);
3041 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3045 /* Move an urb to the end of the list. */
3046 static inline void urb_list_move_last(struct urb
*urb
, int epid
) {
3047 unsigned long flags
;
3048 urb_entry_t
*urb_entry
;
3050 spin_lock_irqsave(&urb_list_lock
, flags
);
3051 urb_entry
= __urb_list_entry(urb
, epid
);
3054 list_del(&urb_entry
->list
);
3055 list_add_tail(&urb_entry
->list
, &urb_list
[epid
]);
3056 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3059 /* Get the next urb in the list. */
3060 static inline struct urb
*urb_list_next(struct urb
*urb
, int epid
) {
3061 unsigned long flags
;
3062 urb_entry_t
*urb_entry
;
3064 spin_lock_irqsave(&urb_list_lock
, flags
);
3065 urb_entry
= __urb_list_entry(urb
, epid
);
3068 if (urb_entry
->list
.next
!= &urb_list
[epid
]) {
3069 struct list_head
*elem
= urb_entry
->list
.next
;
3070 urb_entry
= list_entry(elem
, urb_entry_t
, list
);
3071 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3072 return urb_entry
->urb
;
3074 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3079 struct USB_EP_Desc
* create_ep(int epid
, struct USB_SB_Desc
* sb_desc
,
3081 struct USB_EP_Desc
*ep_desc
;
3082 ep_desc
= (struct USB_EP_Desc
*) kmem_cache_alloc(usb_desc_cache
, mem_flags
);
3085 memset(ep_desc
, 0, sizeof(struct USB_EP_Desc
));
3087 ep_desc
->hw_len
= 0;
3088 ep_desc
->command
= (IO_FIELD(USB_EP_command
, epid
, epid
) |
3089 IO_STATE(USB_EP_command
, enable
, yes
));
3090 if(sb_desc
== NULL
) {
3093 ep_desc
->sub
= virt_to_phys(sb_desc
);
3103 #define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
3104 #define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
3105 #define CMD_FULL IO_STATE(USB_SB_command, full, yes)
3107 /* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
3108 SBs. Also used by create_sb_in() to avoid same allocation procedure at two
3110 struct USB_SB_Desc
* create_sb(struct USB_SB_Desc
* sb_prev
, int tt
, void* data
,
3111 int datalen
, int mem_flags
) {
3112 struct USB_SB_Desc
*sb_desc
;
3113 sb_desc
= (struct USB_SB_Desc
*)kmem_cache_alloc(usb_desc_cache
, mem_flags
);
3116 memset(sb_desc
, 0, sizeof(struct USB_SB_Desc
));
3118 sb_desc
->command
= IO_FIELD(USB_SB_command
, tt
, tt
) |
3119 IO_STATE(USB_SB_command
, eot
, yes
);
3121 sb_desc
->sw_len
= datalen
;
3123 sb_desc
->buf
= virt_to_phys(data
);
3127 if(sb_prev
!= NULL
) {
3128 sb_prev
->next
= virt_to_phys(sb_desc
);
3133 /* Creates a copy of an existing SB by allocation space for it and copy
3135 struct USB_SB_Desc
* create_sb_copy(struct USB_SB_Desc
* sb_orig
, int mem_flags
) {
3136 struct USB_SB_Desc
*sb_desc
;
3137 sb_desc
= (struct USB_SB_Desc
*)kmem_cache_alloc(usb_desc_cache
, mem_flags
);
3141 memcpy(sb_desc
, sb_orig
, sizeof(struct USB_SB_Desc
));
3145 /* A specific create_sb function for creation of in SBs. This is due to
3146 that datalen in In SBs shows how many packets we are expecting. It also
3147 sets up the rem field to show if how many bytes we expect in last packet
3148 if it's not a full one */
3149 struct USB_SB_Desc
* create_sb_in(struct USB_SB_Desc
* sb_prev
, int datalen
,
3150 int maxlen
, int mem_flags
) {
3151 struct USB_SB_Desc
*sb_desc
;
3152 sb_desc
= create_sb(sb_prev
, TT_IN
, NULL
,
3153 datalen
? (datalen
- 1) / maxlen
+ 1 : 0, mem_flags
);
3156 sb_desc
->command
|= IO_FIELD(USB_SB_command
, rem
, datalen
% maxlen
);
3160 void set_sb_cmds(struct USB_SB_Desc
*sb_desc
, __u16 flags
) {
3161 sb_desc
->command
|= flags
;
3164 int create_sb_for_urb(struct urb
*urb
, int mem_flags
) {
3165 int is_out
= !usb_pipein(urb
->pipe
);
3166 int type
= usb_pipetype(urb
->pipe
);
3167 int maxlen
= usb_maxpacket(urb
->dev
, urb
->pipe
, is_out
);
3168 int buf_len
= urb
->transfer_buffer_length
;
3169 void *buf
= buf_len
> 0 ? urb
->transfer_buffer
: NULL
;
3170 struct USB_SB_Desc
*sb_desc
= NULL
;
3172 struct crisv10_urb_priv
*urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
3173 ASSERT(urb_priv
!= NULL
);
3178 sb_desc
= create_sb(NULL
, TT_SETUP
, urb
->setup_packet
, 8, mem_flags
);
3181 set_sb_cmds(sb_desc
, CMD_FULL
);
3183 /* Attach first SB to URB */
3184 urb_priv
->first_sb
= sb_desc
;
3186 if (is_out
) { /* Out Control URB */
3187 /* If this Control OUT transfer has an optional data stage we add
3188 an OUT token before the mandatory IN (status) token */
3189 if ((buf_len
> 0) && buf
) {
3190 sb_desc
= create_sb(sb_desc
, TT_OUT
, buf
, buf_len
, mem_flags
);
3193 set_sb_cmds(sb_desc
, CMD_FULL
);
3197 /* The data length has to be exactly 1. This is due to a requirement
3198 of the USB specification that a host must be prepared to receive
3199 data in the status phase */
3200 sb_desc
= create_sb(sb_desc
, TT_IN
, NULL
, 1, mem_flags
);
3203 } else { /* In control URB */
3205 sb_desc
= create_sb_in(sb_desc
, buf_len
, maxlen
, mem_flags
);
3210 /* Read comment at zout_buffer declaration for an explanation to this. */
3211 sb_desc
= create_sb(sb_desc
, TT_ZOUT
, &zout_buffer
[0], 1, mem_flags
);
3214 /* Set descriptor interrupt flag for in URBs so we can finish URB after
3215 zout-packet has been sent */
3216 set_sb_cmds(sb_desc
, CMD_INTR
| CMD_FULL
);
3218 /* Set end-of-list flag in last SB */
3219 set_sb_cmds(sb_desc
, CMD_EOL
);
3220 /* Attach last SB to URB */
3221 urb_priv
->last_sb
= sb_desc
;
3225 if (is_out
) { /* Out Bulk URB */
3226 sb_desc
= create_sb(NULL
, TT_OUT
, buf
, buf_len
, mem_flags
);
3229 /* The full field is set to yes, even if we don't actually check that
3230 this is a full-length transfer (i.e., that transfer_buffer_length %
3232 Setting full prevents the USB controller from sending an empty packet
3233 in that case. However, if URB_ZERO_PACKET was set we want that. */
3234 if (!(urb
->transfer_flags
& URB_ZERO_PACKET
)) {
3235 set_sb_cmds(sb_desc
, CMD_FULL
);
3237 } else { /* In Bulk URB */
3238 sb_desc
= create_sb_in(NULL
, buf_len
, maxlen
, mem_flags
);
3242 /* Set end-of-list flag for last SB */
3243 set_sb_cmds(sb_desc
, CMD_EOL
);
3245 /* Attach SB to URB */
3246 urb_priv
->first_sb
= sb_desc
;
3247 urb_priv
->last_sb
= sb_desc
;
3250 case PIPE_INTERRUPT
:
3251 if(is_out
) { /* Out Intr URB */
3252 sb_desc
= create_sb(NULL
, TT_OUT
, buf
, buf_len
, mem_flags
);
3256 /* The full field is set to yes, even if we don't actually check that
3257 this is a full-length transfer (i.e., that transfer_buffer_length %
3259 Setting full prevents the USB controller from sending an empty packet
3260 in that case. However, if URB_ZERO_PACKET was set we want that. */
3261 if (!(urb
->transfer_flags
& URB_ZERO_PACKET
)) {
3262 set_sb_cmds(sb_desc
, CMD_FULL
);
3264 /* Only generate TX interrupt if it's a Out URB*/
3265 set_sb_cmds(sb_desc
, CMD_INTR
);
3267 } else { /* In Intr URB */
3268 sb_desc
= create_sb_in(NULL
, buf_len
, maxlen
, mem_flags
);
3272 /* Set end-of-list flag for last SB */
3273 set_sb_cmds(sb_desc
, CMD_EOL
);
3275 /* Attach SB to URB */
3276 urb_priv
->first_sb
= sb_desc
;
3277 urb_priv
->last_sb
= sb_desc
;
3280 case PIPE_ISOCHRONOUS
:
3281 if(is_out
) { /* Out Isoc URB */
3283 if(urb
->number_of_packets
== 0) {
3284 tc_err("Can't create SBs for Isoc URB with zero packets\n");
3287 /* Create one SB descriptor for each packet and link them together. */
3288 for(i
= 0; i
< urb
->number_of_packets
; i
++) {
3289 if (urb
->iso_frame_desc
[i
].length
> 0) {
3291 sb_desc
= create_sb(sb_desc
, TT_OUT
, urb
->transfer_buffer
+
3292 urb
->iso_frame_desc
[i
].offset
,
3293 urb
->iso_frame_desc
[i
].length
, mem_flags
);
3297 /* Check if it's a full length packet */
3298 if (urb
->iso_frame_desc
[i
].length
==
3299 usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
))) {
3300 set_sb_cmds(sb_desc
, CMD_FULL
);
3303 } else { /* zero length packet */
3304 sb_desc
= create_sb(sb_desc
, TT_ZOUT
, &zout_buffer
[0], 1, mem_flags
);
3307 set_sb_cmds(sb_desc
, CMD_FULL
);
3309 /* Attach first SB descriptor to URB */
3311 urb_priv
->first_sb
= sb_desc
;
3314 /* Set interrupt and end-of-list flags in last SB */
3315 set_sb_cmds(sb_desc
, CMD_INTR
| CMD_EOL
);
3316 /* Attach last SB descriptor to URB */
3317 urb_priv
->last_sb
= sb_desc
;
3318 tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
3319 urb
->number_of_packets
, (unsigned int)urb
);
3320 } else { /* In Isoc URB */
3321 /* Actual number of packets is not relevant for periodic in traffic as
3322 long as it is more than zero. Set to 1 always. */
3323 sb_desc
= create_sb(sb_desc
, TT_IN
, NULL
, 1, mem_flags
);
3326 /* Set end-of-list flags for SB */
3327 set_sb_cmds(sb_desc
, CMD_EOL
);
3329 /* Attach SB to URB */
3330 urb_priv
->first_sb
= sb_desc
;
3331 urb_priv
->last_sb
= sb_desc
;
3335 tc_err("Unknown pipe-type\n");
3342 int init_intr_urb(struct urb
*urb
, int mem_flags
) {
3343 struct crisv10_urb_priv
*urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
3344 struct USB_EP_Desc
* ep_desc
;
3349 ASSERT(urb_priv
!= NULL
);
3350 ASSERT(usb_pipeint(urb
->pipe
));
3351 /* We can't support interval longer than amount of eof descriptors in
3353 if(urb
->interval
> MAX_INTR_INTERVAL
) {
3354 tc_err("Interrupt interval %dms too big (max: %dms)\n", urb
->interval
,
3359 /* We assume that the SB descriptors already have been setup */
3360 ASSERT(urb_priv
->first_sb
!= NULL
);
3362 /* Round of the interval to 2^n, it is obvious that this code favours
3363 smaller numbers, but that is actually a good thing */
3364 /* FIXME: The "rounding error" for larger intervals will be quite
3365 large. For in traffic this shouldn't be a problem since it will only
3366 mean that we "poll" more often. */
3367 interval
= urb
->interval
;
3368 for (i
= 0; interval
; i
++) {
3369 interval
= interval
>> 1;
3371 urb_priv
->interval
= 1 << (i
- 1);
3373 /* We can only have max interval for Out Interrupt due to that we can only
3374 handle one linked in EP for a certain epid in the Intr descr array at the
3375 time. The USB Controller in the Etrax 100LX continues to process Intr EPs
3376 so we have no way of knowing which one that caused the actual transfer if
3377 we have several linked in. */
3378 if(usb_pipeout(urb
->pipe
)) {
3379 urb_priv
->interval
= MAX_INTR_INTERVAL
;
3382 /* Calculate amount of EPs needed */
3383 ep_count
= MAX_INTR_INTERVAL
/ urb_priv
->interval
;
3385 for(i
= 0; i
< ep_count
; i
++) {
3386 ep_desc
= create_ep(urb_priv
->epid
, urb_priv
->first_sb
, mem_flags
);
3387 if(ep_desc
== NULL
) {
3388 /* Free any descriptors that we may have allocated before failure */
3391 kfree(urb_priv
->intr_ep_pool
[i
]);
3395 urb_priv
->intr_ep_pool
[i
] = ep_desc
;
3397 urb_priv
->intr_ep_pool_length
= ep_count
;
3401 /* DMA RX/TX functions */
3402 /* ----------------------- */
3404 static void tc_dma_init_rx_list(void) {
3407 /* Setup descriptor list except last one */
3408 for (i
= 0; i
< (NBR_OF_RX_DESC
- 1); i
++) {
3409 RxDescList
[i
].sw_len
= RX_DESC_BUF_SIZE
;
3410 RxDescList
[i
].command
= 0;
3411 RxDescList
[i
].next
= virt_to_phys(&RxDescList
[i
+ 1]);
3412 RxDescList
[i
].buf
= virt_to_phys(RxBuf
+ (i
* RX_DESC_BUF_SIZE
));
3413 RxDescList
[i
].hw_len
= 0;
3414 RxDescList
[i
].status
= 0;
3416 /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
3417 USB_IN_Desc for the relevant fields.) */
3418 prepare_rx_descriptor((struct etrax_dma_descr
*)&RxDescList
[i
]);
3421 /* Special handling of last descriptor */
3422 RxDescList
[i
].sw_len
= RX_DESC_BUF_SIZE
;
3423 RxDescList
[i
].command
= IO_STATE(USB_IN_command
, eol
, yes
);
3424 RxDescList
[i
].next
= virt_to_phys(&RxDescList
[0]);
3425 RxDescList
[i
].buf
= virt_to_phys(RxBuf
+ (i
* RX_DESC_BUF_SIZE
));
3426 RxDescList
[i
].hw_len
= 0;
3427 RxDescList
[i
].status
= 0;
3429 /* Setup list pointers that show progress in list */
3430 myNextRxDesc
= &RxDescList
[0];
3431 myLastRxDesc
= &RxDescList
[NBR_OF_RX_DESC
- 1];
3433 flush_etrax_cache();
3434 /* Point DMA to first descriptor in list and start it */
3435 *R_DMA_CH9_FIRST
= virt_to_phys(myNextRxDesc
);
3436 *R_DMA_CH9_CMD
= IO_STATE(R_DMA_CH9_CMD
, cmd
, start
);
3440 static void tc_dma_init_tx_bulk_list(void) {
3442 volatile struct USB_EP_Desc
*epDescr
;
3444 for (i
= 0; i
< (NBR_OF_EPIDS
- 1); i
++) {
3445 epDescr
= &(TxBulkEPList
[i
]);
3446 CHECK_ALIGN(epDescr
);
3447 epDescr
->hw_len
= 0;
3448 epDescr
->command
= IO_FIELD(USB_EP_command
, epid
, i
);
3450 epDescr
->next
= virt_to_phys(&TxBulkEPList
[i
+ 1]);
3452 /* Initiate two EPs, disabled and with the eol flag set. No need for any
3455 /* The first one has the intr flag set so we get an interrupt when the DMA
3456 channel is about to become disabled. */
3457 CHECK_ALIGN(&TxBulkDummyEPList
[i
][0]);
3458 TxBulkDummyEPList
[i
][0].hw_len
= 0;
3459 TxBulkDummyEPList
[i
][0].command
= (IO_FIELD(USB_EP_command
, epid
, DUMMY_EPID
) |
3460 IO_STATE(USB_EP_command
, eol
, yes
) |
3461 IO_STATE(USB_EP_command
, intr
, yes
));
3462 TxBulkDummyEPList
[i
][0].sub
= 0;
3463 TxBulkDummyEPList
[i
][0].next
= virt_to_phys(&TxBulkDummyEPList
[i
][1]);
3465 /* The second one. */
3466 CHECK_ALIGN(&TxBulkDummyEPList
[i
][1]);
3467 TxBulkDummyEPList
[i
][1].hw_len
= 0;
3468 TxBulkDummyEPList
[i
][1].command
= (IO_FIELD(USB_EP_command
, epid
, DUMMY_EPID
) |
3469 IO_STATE(USB_EP_command
, eol
, yes
));
3470 TxBulkDummyEPList
[i
][1].sub
= 0;
3471 /* The last dummy's next pointer is the same as the current EP's next pointer. */
3472 TxBulkDummyEPList
[i
][1].next
= virt_to_phys(&TxBulkEPList
[i
+ 1]);
3475 /* Special handling of last descr in list, make list circular */
3476 epDescr
= &TxBulkEPList
[i
];
3477 CHECK_ALIGN(epDescr
);
3478 epDescr
->hw_len
= 0;
3479 epDescr
->command
= IO_STATE(USB_EP_command
, eol
, yes
) |
3480 IO_FIELD(USB_EP_command
, epid
, i
);
3482 epDescr
->next
= virt_to_phys(&TxBulkEPList
[0]);
3484 /* Init DMA sub-channel pointers to last item in each list */
3485 *R_DMA_CH8_SUB0_EP
= virt_to_phys(&TxBulkEPList
[i
]);
3486 /* No point in starting the bulk channel yet.
3487 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
3490 static void tc_dma_init_tx_ctrl_list(void) {
3492 volatile struct USB_EP_Desc
*epDescr
;
3494 for (i
= 0; i
< (NBR_OF_EPIDS
- 1); i
++) {
3495 epDescr
= &(TxCtrlEPList
[i
]);
3496 CHECK_ALIGN(epDescr
);
3497 epDescr
->hw_len
= 0;
3498 epDescr
->command
= IO_FIELD(USB_EP_command
, epid
, i
);
3500 epDescr
->next
= virt_to_phys(&TxCtrlEPList
[i
+ 1]);
3502 /* Special handling of last descr in list, make list circular */
3503 epDescr
= &TxCtrlEPList
[i
];
3504 CHECK_ALIGN(epDescr
);
3505 epDescr
->hw_len
= 0;
3506 epDescr
->command
= IO_STATE(USB_EP_command
, eol
, yes
) |
3507 IO_FIELD(USB_EP_command
, epid
, i
);
3509 epDescr
->next
= virt_to_phys(&TxCtrlEPList
[0]);
3511 /* Init DMA sub-channel pointers to last item in each list */
3512 *R_DMA_CH8_SUB1_EP
= virt_to_phys(&TxCtrlEPList
[i
]);
3513 /* No point in starting the ctrl channel yet.
3514 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
3518 static void tc_dma_init_tx_intr_list(void) {
3521 TxIntrSB_zout
.sw_len
= 1;
3522 TxIntrSB_zout
.next
= 0;
3523 TxIntrSB_zout
.buf
= virt_to_phys(&zout_buffer
[0]);
3524 TxIntrSB_zout
.command
= (IO_FIELD(USB_SB_command
, rem
, 0) |
3525 IO_STATE(USB_SB_command
, tt
, zout
) |
3526 IO_STATE(USB_SB_command
, full
, yes
) |
3527 IO_STATE(USB_SB_command
, eot
, yes
) |
3528 IO_STATE(USB_SB_command
, eol
, yes
));
3530 for (i
= 0; i
< (MAX_INTR_INTERVAL
- 1); i
++) {
3531 CHECK_ALIGN(&TxIntrEPList
[i
]);
3532 TxIntrEPList
[i
].hw_len
= 0;
3533 TxIntrEPList
[i
].command
=
3534 (IO_STATE(USB_EP_command
, eof
, yes
) |
3535 IO_STATE(USB_EP_command
, enable
, yes
) |
3536 IO_FIELD(USB_EP_command
, epid
, INVALID_EPID
));
3537 TxIntrEPList
[i
].sub
= virt_to_phys(&TxIntrSB_zout
);
3538 TxIntrEPList
[i
].next
= virt_to_phys(&TxIntrEPList
[i
+ 1]);
3541 /* Special handling of last descr in list, make list circular */
3542 CHECK_ALIGN(&TxIntrEPList
[i
]);
3543 TxIntrEPList
[i
].hw_len
= 0;
3544 TxIntrEPList
[i
].command
=
3545 (IO_STATE(USB_EP_command
, eof
, yes
) |
3546 IO_STATE(USB_EP_command
, eol
, yes
) |
3547 IO_STATE(USB_EP_command
, enable
, yes
) |
3548 IO_FIELD(USB_EP_command
, epid
, INVALID_EPID
));
3549 TxIntrEPList
[i
].sub
= virt_to_phys(&TxIntrSB_zout
);
3550 TxIntrEPList
[i
].next
= virt_to_phys(&TxIntrEPList
[0]);
3552 intr_dbg("Initiated Intr EP descriptor list\n");
3555 /* Connect DMA 8 sub-channel 2 to first in list */
3556 *R_DMA_CH8_SUB2_EP
= virt_to_phys(&TxIntrEPList
[0]);
3559 static void tc_dma_init_tx_isoc_list(void) {
3564 /* Read comment at zout_buffer declaration for an explanation to this. */
3565 TxIsocSB_zout
.sw_len
= 1;
3566 TxIsocSB_zout
.next
= 0;
3567 TxIsocSB_zout
.buf
= virt_to_phys(&zout_buffer
[0]);
3568 TxIsocSB_zout
.command
= (IO_FIELD(USB_SB_command
, rem
, 0) |
3569 IO_STATE(USB_SB_command
, tt
, zout
) |
3570 IO_STATE(USB_SB_command
, full
, yes
) |
3571 IO_STATE(USB_SB_command
, eot
, yes
) |
3572 IO_STATE(USB_SB_command
, eol
, yes
));
3574 /* The last isochronous EP descriptor is a dummy. */
3575 for (i
= 0; i
< (NBR_OF_EPIDS
- 1); i
++) {
3576 CHECK_ALIGN(&TxIsocEPList
[i
]);
3577 TxIsocEPList
[i
].hw_len
= 0;
3578 TxIsocEPList
[i
].command
= IO_FIELD(USB_EP_command
, epid
, i
);
3579 TxIsocEPList
[i
].sub
= 0;
3580 TxIsocEPList
[i
].next
= virt_to_phys(&TxIsocEPList
[i
+ 1]);
3583 CHECK_ALIGN(&TxIsocEPList
[i
]);
3584 TxIsocEPList
[i
].hw_len
= 0;
3586 /* Must enable the last EP descr to get eof interrupt. */
3587 TxIsocEPList
[i
].command
= (IO_STATE(USB_EP_command
, enable
, yes
) |
3588 IO_STATE(USB_EP_command
, eof
, yes
) |
3589 IO_STATE(USB_EP_command
, eol
, yes
) |
3590 IO_FIELD(USB_EP_command
, epid
, INVALID_EPID
));
3591 TxIsocEPList
[i
].sub
= virt_to_phys(&TxIsocSB_zout
);
3592 TxIsocEPList
[i
].next
= virt_to_phys(&TxIsocEPList
[0]);
3594 *R_DMA_CH8_SUB3_EP
= virt_to_phys(&TxIsocEPList
[0]);
3595 *R_DMA_CH8_SUB3_CMD
= IO_STATE(R_DMA_CH8_SUB3_CMD
, cmd
, start
);
3598 static int tc_dma_init(struct usb_hcd
*hcd
) {
3599 tc_dma_init_rx_list();
3600 tc_dma_init_tx_bulk_list();
3601 tc_dma_init_tx_ctrl_list();
3602 tc_dma_init_tx_intr_list();
3603 tc_dma_init_tx_isoc_list();
3605 if (cris_request_dma(USB_TX_DMA_NBR
,
3606 "ETRAX 100LX built-in USB (Tx)",
3607 DMA_VERBOSE_ON_ERROR
,
3609 err("Could not allocate DMA ch 8 for USB");
3613 if (cris_request_dma(USB_RX_DMA_NBR
,
3614 "ETRAX 100LX built-in USB (Rx)",
3615 DMA_VERBOSE_ON_ERROR
,
3617 err("Could not allocate DMA ch 9 for USB");
3622 /* Note that these interrupts are not used. */
3623 IO_STATE(R_IRQ_MASK2_SET
, dma8_sub0_descr
, set
) |
3624 /* Sub channel 1 (ctrl) descr. interrupts are used. */
3625 IO_STATE(R_IRQ_MASK2_SET
, dma8_sub1_descr
, set
) |
3626 IO_STATE(R_IRQ_MASK2_SET
, dma8_sub2_descr
, set
) |
3627 /* Sub channel 3 (isoc) descr. interrupts are used. */
3628 IO_STATE(R_IRQ_MASK2_SET
, dma8_sub3_descr
, set
);
3630 /* Note that the dma9_descr interrupt is not used. */
3632 IO_STATE(R_IRQ_MASK2_SET
, dma9_eop
, set
) |
3633 IO_STATE(R_IRQ_MASK2_SET
, dma9_descr
, set
);
3635 if (request_irq(ETRAX_USB_RX_IRQ
, tc_dma_rx_interrupt
, 0,
3636 "ETRAX 100LX built-in USB (Rx)", hcd
)) {
3637 err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ
);
3641 if (request_irq(ETRAX_USB_TX_IRQ
, tc_dma_tx_interrupt
, 0,
3642 "ETRAX 100LX built-in USB (Tx)", hcd
)) {
3643 err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ
);
3650 static void tc_dma_destroy(void) {
3651 free_irq(ETRAX_USB_RX_IRQ
, NULL
);
3652 free_irq(ETRAX_USB_TX_IRQ
, NULL
);
3654 cris_free_dma(USB_TX_DMA_NBR
, "ETRAX 100LX built-in USB (Tx)");
3655 cris_free_dma(USB_RX_DMA_NBR
, "ETRAX 100LX built-in USB (Rx)");
3659 static void tc_dma_link_intr_urb(struct urb
*urb
);
3661 /* Handle processing of Bulk, Ctrl and Intr queues */
3662 static void tc_dma_process_queue(int epid
) {
3664 struct crisv10_urb_priv
*urb_priv
;
3665 unsigned long flags
;
3668 if(epid_state
[epid
].disabled
) {
3669 /* Don't process any URBs on a disabled endpoint */
3673 /* Do not disturb us while fiddling with EPs and epids */
3674 local_irq_save(flags
);
3676 /* For bulk, Ctrl and Intr can we only have one URB active at a time for
3678 if(activeUrbList
[epid
] != NULL
) {
3679 /* An URB is already active on EP, skip checking queue */
3680 local_irq_restore(flags
);
3684 urb
= urb_list_first(epid
);
3686 /* No URB waiting in EP queue. Nothing do to */
3687 local_irq_restore(flags
);
3691 urb_priv
= urb
->hcpriv
;
3692 ASSERT(urb_priv
!= NULL
);
3693 ASSERT(urb_priv
->urb_state
== NOT_STARTED
);
3694 ASSERT(!usb_pipeisoc(urb
->pipe
));
3696 /* Remove this URB from the queue and move it to active */
3697 activeUrbList
[epid
] = urb
;
3698 urb_list_del(urb
, epid
);
3700 urb_priv
->urb_state
= STARTED
;
3702 /* Reset error counters (regardless of which direction this traffic is). */
3703 etrax_epid_clear_error(epid
);
3705 /* Special handling of Intr EP lists */
3706 if(usb_pipeint(urb
->pipe
)) {
3707 tc_dma_link_intr_urb(urb
);
3708 local_irq_restore(flags
);
3712 /* Software must preset the toggle bits for Bulk and Ctrl */
3713 if(usb_pipecontrol(urb
->pipe
)) {
3714 /* Toggle bits are initialized only during setup transaction in a
3716 etrax_epid_set_toggle(epid
, 0, 0);
3717 etrax_epid_set_toggle(epid
, 1, 0);
3719 toggle
= usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
3720 usb_pipeout(urb
->pipe
));
3721 etrax_epid_set_toggle(epid
, usb_pipeout(urb
->pipe
), toggle
);
3724 tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
3725 (unsigned int)urb
, str_dir(urb
->pipe
), str_type(urb
->pipe
), epid
,
3726 sblist_to_str(urb_priv
->first_sb
));
3728 /* We start the DMA sub channel without checking if it's running or not,
3730 1) If it's already running, issuing the start command is a nop.
3731 2) We avoid a test-and-set race condition. */
3732 switch(usb_pipetype(urb
->pipe
)) {
3734 /* Assert that the EP descriptor is disabled. */
3735 ASSERT(!(TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)));
3737 /* Set up and enable the EP descriptor. */
3738 TxBulkEPList
[epid
].sub
= virt_to_phys(urb_priv
->first_sb
);
3739 TxBulkEPList
[epid
].hw_len
= 0;
3740 TxBulkEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
3742 /* Check if the dummy list is already with us (if several urbs were queued). */
3743 if (usb_pipein(urb
->pipe
) && (TxBulkEPList
[epid
].next
!= virt_to_phys(&TxBulkDummyEPList
[epid
][0]))) {
3744 tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
3745 (unsigned long)urb
, epid
);
3747 /* We don't need to check if the DMA is at this EP or not before changing the
3748 next pointer, since we will do it in one 32-bit write (EP descriptors are
3750 TxBulkEPList
[epid
].next
= virt_to_phys(&TxBulkDummyEPList
[epid
][0]);
3753 restart_dma8_sub0();
3755 /* Update/restart the bulk start timer since we just started the channel.*/
3756 mod_timer(&bulk_start_timer
, jiffies
+ BULK_START_TIMER_INTERVAL
);
3757 /* Update/restart the bulk eot timer since we just inserted traffic. */
3758 mod_timer(&bulk_eot_timer
, jiffies
+ BULK_EOT_TIMER_INTERVAL
);
3761 /* Assert that the EP descriptor is disabled. */
3762 ASSERT(!(TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)));
3764 /* Set up and enable the EP descriptor. */
3765 TxCtrlEPList
[epid
].sub
= virt_to_phys(urb_priv
->first_sb
);
3766 TxCtrlEPList
[epid
].hw_len
= 0;
3767 TxCtrlEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
3769 *R_DMA_CH8_SUB1_CMD
= IO_STATE(R_DMA_CH8_SUB1_CMD
, cmd
, start
);
3772 local_irq_restore(flags
);
3775 static void tc_dma_link_intr_urb(struct urb
*urb
) {
3776 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
3777 volatile struct USB_EP_Desc
*tmp_ep
;
3778 struct USB_EP_Desc
*ep_desc
;
3782 ASSERT(urb_priv
!= NULL
);
3783 epid
= urb_priv
->epid
;
3784 ASSERT(urb_priv
->interval
> 0);
3785 ASSERT(urb_priv
->intr_ep_pool_length
> 0);
3787 tmp_ep
= &TxIntrEPList
[0];
3789 /* Only insert one EP descriptor in list for Out Intr URBs.
3790 We can only handle Out Intr with interval of 128ms because
3791 it's not possible to insert several Out Intr EPs because they
3792 are not consumed by the DMA. */
3793 if(usb_pipeout(urb
->pipe
)) {
3794 ep_desc
= urb_priv
->intr_ep_pool
[0];
3796 ep_desc
->next
= tmp_ep
->next
;
3797 tmp_ep
->next
= virt_to_phys(ep_desc
);
3800 /* Loop through Intr EP descriptor list and insert EP for URB at
3801 specified interval */
3803 /* Each EP descriptor with eof flag sat signals a new frame */
3804 if (tmp_ep
->command
& IO_MASK(USB_EP_command
, eof
)) {
3805 /* Insert a EP from URBs EP pool at correct interval */
3806 if ((i
% urb_priv
->interval
) == 0) {
3807 ep_desc
= urb_priv
->intr_ep_pool
[pool_idx
];
3809 ep_desc
->next
= tmp_ep
->next
;
3810 tmp_ep
->next
= virt_to_phys(ep_desc
);
3812 ASSERT(pool_idx
<= urb_priv
->intr_ep_pool_length
);
3816 tmp_ep
= (struct USB_EP_Desc
*)phys_to_virt(tmp_ep
->next
);
3817 } while(tmp_ep
!= &TxIntrEPList
[0]);
3820 intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid
,
3821 sblist_to_str(urb_priv
->first_sb
), urb_priv
->interval
, pool_idx
);
3823 /* We start the DMA sub channel without checking if it's running or not,
3825 1) If it's already running, issuing the start command is a nop.
3826 2) We avoid a test-and-set race condition. */
3827 *R_DMA_CH8_SUB2_CMD
= IO_STATE(R_DMA_CH8_SUB2_CMD
, cmd
, start
);
3830 static void tc_dma_process_isoc_urb(struct urb
*urb
) {
3831 unsigned long flags
;
3832 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
3835 /* Do not disturb us while fiddling with EPs and epids */
3836 local_irq_save(flags
);
3839 ASSERT(urb_priv
->first_sb
);
3840 epid
= urb_priv
->epid
;
3842 if(activeUrbList
[epid
] == NULL
) {
3843 /* EP is idle, so make this URB active */
3844 activeUrbList
[epid
] = urb
;
3845 urb_list_del(urb
, epid
);
3846 ASSERT(TxIsocEPList
[epid
].sub
== 0);
3847 ASSERT(!(TxIsocEPList
[epid
].command
&
3848 IO_STATE(USB_EP_command
, enable
, yes
)));
3850 /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
3851 if(usb_pipein(urb
->pipe
)) {
3852 /* Each EP for In Isoc will have only one SB descriptor, setup when
3853 submitting the first active urb. We do it here by copying from URBs
3854 pre-allocated SB. */
3855 memcpy((void *)&(TxIsocSBList
[epid
]), urb_priv
->first_sb
,
3856 sizeof(TxIsocSBList
[epid
]));
3857 TxIsocEPList
[epid
].hw_len
= 0;
3858 TxIsocEPList
[epid
].sub
= virt_to_phys(&(TxIsocSBList
[epid
]));
3860 /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
3861 TxIsocEPList
[epid
].hw_len
= 0;
3862 TxIsocEPList
[epid
].sub
= virt_to_phys(urb_priv
->first_sb
);
3864 isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
3866 (unsigned int)urb
, urb_priv
->urb_num
, epid
,
3867 (unsigned int)(urb_priv
->first_sb
),
3868 (unsigned int)(urb_priv
->last_sb
));
3871 if (urb
->transfer_flags
& URB_ISO_ASAP
) {
3872 /* The isoc transfer should be started as soon as possible. The
3873 start_frame field is a return value if URB_ISO_ASAP was set. Comparing
3874 R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
3875 token is sent 2 frames later. I'm not sure how this affects usage of
3876 the start_frame field by the device driver, or how it affects things
3877 when USB_ISO_ASAP is not set, so therefore there's no compensation for
3878 the 2 frame "lag" here. */
3879 urb
->start_frame
= (*R_USB_FM_NUMBER
& 0x7ff);
3880 TxIsocEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
3881 urb_priv
->urb_state
= STARTED
;
3882 isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
3885 /* Not started yet. */
3886 urb_priv
->urb_state
= NOT_STARTED
;
3887 isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
3892 /* An URB is already active on the EP. Leave URB in queue and let
3893 finish_isoc_urb process it after current active URB */
3894 ASSERT(TxIsocEPList
[epid
].sub
!= 0);
3896 if(usb_pipein(urb
->pipe
)) {
3897 /* Because there already is a active In URB on this epid we do nothing
3898 and the finish_isoc_urb() function will handle switching to next URB*/
3900 } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
3901 struct USB_SB_Desc
*temp_sb_desc
;
3903 /* Set state STARTED to all Out Isoc URBs added to SB list because we
3904 don't know how many of them that are finished before descr interrupt*/
3905 urb_priv
->urb_state
= STARTED
;
3907 /* Find end of current SB list by looking for SB with eol flag sat */
3908 temp_sb_desc
= phys_to_virt(TxIsocEPList
[epid
].sub
);
3909 while ((temp_sb_desc
->command
& IO_MASK(USB_SB_command
, eol
)) !=
3910 IO_STATE(USB_SB_command
, eol
, yes
)) {
3911 ASSERT(temp_sb_desc
->next
);
3912 temp_sb_desc
= phys_to_virt(temp_sb_desc
->next
);
3915 isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
3916 " sub:0x%x eol:0x%x\n",
3917 (unsigned int)urb
, urb_priv
->urb_num
,
3918 (unsigned int)(urb_priv
->first_sb
),
3919 (unsigned int)(urb_priv
->last_sb
), epid
,
3920 (unsigned int)phys_to_virt(TxIsocEPList
[epid
].sub
),
3921 (unsigned int)temp_sb_desc
);
3923 /* Next pointer must be set before eol is removed. */
3924 temp_sb_desc
->next
= virt_to_phys(urb_priv
->first_sb
);
3925 /* Clear the previous end of list flag since there is a new in the
3926 added SB descriptor list. */
3927 temp_sb_desc
->command
&= ~IO_MASK(USB_SB_command
, eol
);
3929 if (!(TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
))) {
3931 /* 8.8.5 in Designer's Reference says we should check for and correct
3932 any errors in the EP here. That should not be necessary if
3933 epid_attn is handled correctly, so we assume all is ok. */
3934 epid_data
= etrax_epid_iso_get(epid
);
3935 if (IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
) !=
3936 IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
3937 isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
3939 IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
), epid
,
3940 (unsigned int)urb
, urb_priv
->urb_num
);
3943 /* The SB list was exhausted. */
3944 if (virt_to_phys(urb_priv
->last_sb
) != TxIsocEPList
[epid
].sub
) {
3945 /* The new sublist did not get processed before the EP was
3946 disabled. Setup the EP again. */
3948 if(virt_to_phys(temp_sb_desc
) == TxIsocEPList
[epid
].sub
) {
3949 isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
3950 ", restarting from this URBs SB:0x%x\n",
3951 epid
, (unsigned int)temp_sb_desc
,
3952 (unsigned int)(urb_priv
->first_sb
));
3953 TxIsocEPList
[epid
].hw_len
= 0;
3954 TxIsocEPList
[epid
].sub
= virt_to_phys(urb_priv
->first_sb
);
3955 urb
->start_frame
= (*R_USB_FM_NUMBER
& 0x7ff);
3956 /* Enable the EP again so data gets processed this time */
3957 TxIsocEPList
[epid
].command
|=
3958 IO_STATE(USB_EP_command
, enable
, yes
);
3961 /* The EP has been disabled but not at end this URB (god knows
3962 where). This should generate an epid_attn so we should not be
3964 isoc_warn("EP was disabled on sb:0x%x before SB list for"
3965 " URB:0x%x[%d] got processed\n",
3966 (unsigned int)phys_to_virt(TxIsocEPList
[epid
].sub
),
3967 (unsigned int)urb
, urb_priv
->urb_num
);
3970 /* This might happend if we are slow on this function and isn't
3972 isoc_dbg("EP was disabled and finished with SBs from appended"
3973 " URB:0x%x[%d]\n", (unsigned int)urb
, urb_priv
->urb_num
);
3979 /* Start the DMA sub channel */
3980 *R_DMA_CH8_SUB3_CMD
= IO_STATE(R_DMA_CH8_SUB3_CMD
, cmd
, start
);
3982 local_irq_restore(flags
);
3985 static void tc_dma_unlink_intr_urb(struct urb
*urb
) {
3986 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
3987 volatile struct USB_EP_Desc
*first_ep
; /* First EP in the list. */
3988 volatile struct USB_EP_Desc
*curr_ep
; /* Current EP, the iterator. */
3989 volatile struct USB_EP_Desc
*next_ep
; /* The EP after current. */
3990 volatile struct USB_EP_Desc
*unlink_ep
; /* The one we should remove from
3993 volatile int timeout
= 10000;
3996 /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
3999 ASSERT(urb_priv
->intr_ep_pool_length
> 0);
4000 epid
= urb_priv
->epid
;
4002 /* First disable all Intr EPs belonging to epid for this URB */
4003 first_ep
= &TxIntrEPList
[0];
4006 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
4007 if (IO_EXTRACT(USB_EP_command
, epid
, next_ep
->command
) == epid
) {
4009 next_ep
->command
&= ~IO_MASK(USB_EP_command
, enable
);
4011 curr_ep
= phys_to_virt(curr_ep
->next
);
4012 } while (curr_ep
!= first_ep
);
4015 /* Now unlink all EPs belonging to this epid from Descr list */
4016 first_ep
= &TxIntrEPList
[0];
4019 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
4020 if (IO_EXTRACT(USB_EP_command
, epid
, next_ep
->command
) == epid
) {
4021 /* This is the one we should unlink. */
4022 unlink_ep
= next_ep
;
4024 /* Actually unlink the EP from the DMA list. */
4025 curr_ep
->next
= unlink_ep
->next
;
4027 /* Wait until the DMA is no longer at this descriptor. */
4028 while((*R_DMA_CH8_SUB2_EP
== virt_to_phys(unlink_ep
)) &&
4033 curr_ep
= phys_to_virt(curr_ep
->next
);
4034 } while (curr_ep
!= first_ep
);
4036 if(count
!= urb_priv
->intr_ep_pool_length
) {
4037 intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count
,
4038 urb_priv
->intr_ep_pool_length
, (unsigned int)urb
,
4041 intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count
,
4042 urb_priv
->intr_ep_pool_length
, (unsigned int)urb
);
4046 static void check_finished_bulk_tx_epids(struct usb_hcd
*hcd
,
4048 unsigned long flags
;
4051 struct crisv10_urb_priv
* urb_priv
;
4054 /* Protect TxEPList */
4055 local_irq_save(flags
);
4057 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4058 /* A finished EP descriptor is disabled and has a valid sub pointer */
4059 if (!(TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) &&
4060 (TxBulkEPList
[epid
].sub
!= 0)) {
4062 /* Get the active URB for this epid */
4063 urb
= activeUrbList
[epid
];
4066 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4069 /* Only handle finished out Bulk EPs here,
4070 and let RX interrupt take care of the rest */
4071 if(!epid_out_traffic(epid
)) {
4076 tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
4077 epid_out_traffic(epid
) ? "Out" : "In", epid
, (unsigned int)urb
,
4080 tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
4081 epid_out_traffic(epid
) ? "Out" : "In", epid
, (unsigned int)urb
,
4085 if(urb_priv
->urb_state
== UNLINK
) {
4086 /* This Bulk URB is requested to be unlinked, that means that the EP
4087 has been disabled and we might not have sent all data */
4088 tc_finish_urb(hcd
, urb
, urb
->status
);
4092 ASSERT(urb_priv
->urb_state
== STARTED
);
4093 if (phys_to_virt(TxBulkEPList
[epid
].sub
) != urb_priv
->last_sb
) {
4094 tc_err("Endpoint got disabled before reaching last sb\n");
4097 epid_data
= etrax_epid_get(epid
);
4098 if (IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
) ==
4099 IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
4100 /* This means that the endpoint has no error, is disabled
4101 and had inserted traffic, i.e. transfer successfully completed. */
4102 tc_finish_urb(hcd
, urb
, 0);
4104 /* Shouldn't happen. We expect errors to be caught by epid
4106 tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
4107 epid
, IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
));
4110 tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid
);
4114 local_irq_restore(flags
);
4117 static void check_finished_ctrl_tx_epids(struct usb_hcd
*hcd
) {
4118 unsigned long flags
;
4121 struct crisv10_urb_priv
* urb_priv
;
4124 /* Protect TxEPList */
4125 local_irq_save(flags
);
4127 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4128 if(epid
== DUMMY_EPID
)
4131 /* A finished EP descriptor is disabled and has a valid sub pointer */
4132 if (!(TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) &&
4133 (TxCtrlEPList
[epid
].sub
!= 0)) {
4135 /* Get the active URB for this epid */
4136 urb
= activeUrbList
[epid
];
4139 tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid
);
4144 ASSERT(usb_pipein(urb
->pipe
));
4145 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4147 if (phys_to_virt(TxCtrlEPList
[epid
].sub
) != urb_priv
->last_sb
) {
4148 tc_err("Endpoint got disabled before reaching last sb\n");
4151 epid_data
= etrax_epid_get(epid
);
4152 if (IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
) ==
4153 IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
4154 /* This means that the endpoint has no error, is disabled
4155 and had inserted traffic, i.e. transfer successfully completed. */
4157 /* Check if RX-interrupt for In Ctrl has been processed before
4158 finishing the URB */
4159 if(urb_priv
->ctrl_rx_done
) {
4160 tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
4161 (unsigned int)urb
, urb_priv
->urb_num
);
4162 tc_finish_urb(hcd
, urb
, 0);
4164 /* If we get zout descriptor interrupt before RX was done for a
4165 In Ctrl transfer, then we flag that and it will be finished
4166 in the RX-Interrupt */
4167 urb_priv
->ctrl_zout_done
= 1;
4168 tc_dbg("Got zout descr interrupt before RX interrupt\n");
4171 /* Shouldn't happen. We expect errors to be caught by epid
4173 tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid
, (unsigned int)urb
, urb_priv
->urb_num
, IO_EXTRACT(R_USB_EPT_DATA
, error_code
, epid_data
));
4174 __dump_ep_desc(&(TxCtrlEPList
[epid
]));
4175 __dump_ept_data(epid
);
4179 local_irq_restore(flags
);
4182 /* This function goes through all epids that are setup for Out Isoc transfers
4183 and marks (isoc_out_done) all queued URBs that the DMA has finished
4185 No URB completetion is done here to make interrupt routine return quickly.
4186 URBs are completed later with help of complete_isoc_bottom_half() that
4187 becomes schedules when this functions is finished. */
4188 static void check_finished_isoc_tx_epids(void) {
4189 unsigned long flags
;
4192 struct crisv10_urb_priv
* urb_priv
;
4193 struct USB_SB_Desc
* sb_desc
;
4196 /* Protect TxIsocEPList */
4197 local_irq_save(flags
);
4199 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4200 if (TxIsocEPList
[epid
].sub
== 0 || epid
== INVALID_EPID
||
4201 !epid_out_traffic(epid
)) {
4202 /* Nothing here to see. */
4205 ASSERT(epid_inuse(epid
));
4206 ASSERT(epid_isoc(epid
));
4208 sb_desc
= phys_to_virt(TxIsocEPList
[epid
].sub
);
4209 /* Find the last descriptor of the currently active URB for this ep.
4210 This is the first descriptor in the sub list marked for a descriptor
4212 while (sb_desc
&& !IO_EXTRACT(USB_SB_command
, intr
, sb_desc
->command
)) {
4213 sb_desc
= sb_desc
->next
? phys_to_virt(sb_desc
->next
) : 0;
4217 isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
4218 epid
, (unsigned int)phys_to_virt(TxIsocEPList
[epid
].sub
),
4219 (unsigned int)sb_desc
);
4221 urb
= activeUrbList
[epid
];
4223 isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid
);
4228 while(urb
&& !epid_done
) {
4230 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
);
4231 ASSERT(usb_pipeout(urb
->pipe
));
4233 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4235 ASSERT(urb_priv
->urb_state
== STARTED
||
4236 urb_priv
->urb_state
== UNLINK
);
4238 if (sb_desc
!= urb_priv
->last_sb
) {
4239 /* This urb has been sent. */
4240 urb_priv
->isoc_out_done
= 1;
4242 } else { /* Found URB that has last_sb as the interrupt reason */
4244 /* Check if EP has been disabled, meaning that all transfers are done*/
4245 if(!(TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
))) {
4246 ASSERT((sb_desc
->command
& IO_MASK(USB_SB_command
, eol
)) ==
4247 IO_STATE(USB_SB_command
, eol
, yes
));
4248 ASSERT(sb_desc
->next
== 0);
4249 urb_priv
->isoc_out_done
= 1;
4251 isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
4252 (unsigned int)urb
, urb_priv
->urb_num
);
4254 /* Stop looking any further in queue */
4259 if(urb
== activeUrbList
[epid
]) {
4260 urb
= urb_list_first(epid
);
4262 urb
= urb_list_next(urb
, epid
);
4265 } /* END: while(urb && !epid_done) */
4268 local_irq_restore(flags
);
4272 /* This is where the Out Isoc URBs are realy completed. This function is
4273 scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
4274 are done. This functions completes all URBs earlier marked with
4275 isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
4277 static void complete_isoc_bottom_half(struct work_struct
* work
) {
4278 struct crisv10_isoc_complete_data
*comp_data
;
4279 struct usb_iso_packet_descriptor
*packet
;
4280 struct crisv10_urb_priv
* urb_priv
;
4281 unsigned long flags
;
4287 comp_data
= container_of(work
, struct crisv10_isoc_complete_data
, usb_bh
);
4289 local_irq_save(flags
);
4291 for (epid
= 0; epid
< NBR_OF_EPIDS
- 1; epid
++) {
4292 if(!epid_inuse(epid
) || !epid_isoc(epid
) || !epid_out_traffic(epid
) || epid
== DUMMY_EPID
) {
4293 /* Only check valid Out Isoc epids */
4297 isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid
,
4298 (unsigned int)phys_to_virt(TxIsocEPList
[epid
].sub
));
4300 /* The descriptor interrupt handler has marked all transmitted Out Isoc
4301 URBs with isoc_out_done. Now we traverse all epids and for all that
4302 have out Isoc traffic we traverse its URB list and complete the
4303 transmitted URBs. */
4305 while (!epid_done
) {
4307 /* Get the active urb (if any) */
4308 urb
= activeUrbList
[epid
];
4310 isoc_dbg("No active URB on epid:%d anymore\n", epid
);
4316 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
);
4317 ASSERT(usb_pipeout(urb
->pipe
));
4319 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4322 if (!(urb_priv
->isoc_out_done
)) {
4323 /* We have reached URB that isn't flaged done yet, stop traversing. */
4324 isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
4325 " before not yet flaged URB:0x%x[%d]\n",
4326 epid
, (unsigned int)urb
, urb_priv
->urb_num
);
4331 /* This urb has been sent. */
4332 isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
4333 (unsigned int)urb
, urb_priv
->urb_num
);
4335 /* Set ok on transfered packets for this URB and finish it */
4336 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
4337 packet
= &urb
->iso_frame_desc
[i
];
4339 packet
->actual_length
= packet
->length
;
4341 urb_priv
->isoc_packet_counter
= urb
->number_of_packets
;
4342 tc_finish_urb(comp_data
->hcd
, urb
, 0);
4344 } /* END: while(!epid_done) */
4345 } /* END: for(epid...) */
4347 local_irq_restore(flags
);
4348 kmem_cache_free(isoc_compl_cache
, comp_data
);
4352 static void check_finished_intr_tx_epids(struct usb_hcd
*hcd
) {
4353 unsigned long flags
;
4356 struct crisv10_urb_priv
* urb_priv
;
4357 volatile struct USB_EP_Desc
*curr_ep
; /* Current EP, the iterator. */
4358 volatile struct USB_EP_Desc
*next_ep
; /* The EP after current. */
4360 /* Protect TxintrEPList */
4361 local_irq_save(flags
);
4363 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4364 if(!epid_inuse(epid
) || !epid_intr(epid
) || !epid_out_traffic(epid
)) {
4365 /* Nothing to see on this epid. Only check valid Out Intr epids */
4369 urb
= activeUrbList
[epid
];
4371 intr_warn("Found Out Intr epid:%d with no active URB\n", epid
);
4376 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
);
4377 ASSERT(usb_pipeout(urb
->pipe
));
4379 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4382 /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
4384 curr_ep
= &TxIntrEPList
[0];
4386 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
4387 if(next_ep
== urb_priv
->intr_ep_pool
[0]) {
4388 /* We found the Out Intr EP for this epid */
4390 /* Disable it so it doesn't get processed again */
4391 next_ep
->command
&= ~IO_MASK(USB_EP_command
, enable
);
4393 /* Finish the active Out Intr URB with status OK */
4394 tc_finish_urb(hcd
, urb
, 0);
4396 curr_ep
= phys_to_virt(curr_ep
->next
);
4397 } while (curr_ep
!= &TxIntrEPList
[1]);
4400 local_irq_restore(flags
);
4403 /* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
4404 static irqreturn_t
tc_dma_tx_interrupt(int irq
, void *vhc
) {
4405 struct usb_hcd
*hcd
= (struct usb_hcd
*)vhc
;
4408 if (*R_IRQ_READ2
& IO_MASK(R_IRQ_READ2
, dma8_sub0_descr
)) {
4409 /* Clear this interrupt */
4410 *R_DMA_CH8_SUB0_CLR_INTR
= IO_STATE(R_DMA_CH8_SUB0_CLR_INTR
, clr_descr
, do);
4411 restart_dma8_sub0();
4414 if (*R_IRQ_READ2
& IO_MASK(R_IRQ_READ2
, dma8_sub1_descr
)) {
4415 /* Clear this interrupt */
4416 *R_DMA_CH8_SUB1_CLR_INTR
= IO_STATE(R_DMA_CH8_SUB1_CLR_INTR
, clr_descr
, do);
4417 check_finished_ctrl_tx_epids(hcd
);
4420 if (*R_IRQ_READ2
& IO_MASK(R_IRQ_READ2
, dma8_sub2_descr
)) {
4421 /* Clear this interrupt */
4422 *R_DMA_CH8_SUB2_CLR_INTR
= IO_STATE(R_DMA_CH8_SUB2_CLR_INTR
, clr_descr
, do);
4423 check_finished_intr_tx_epids(hcd
);
4426 if (*R_IRQ_READ2
& IO_MASK(R_IRQ_READ2
, dma8_sub3_descr
)) {
4427 struct crisv10_isoc_complete_data
* comp_data
;
4429 /* Flag done Out Isoc for later completion */
4430 check_finished_isoc_tx_epids();
4432 /* Clear this interrupt */
4433 *R_DMA_CH8_SUB3_CLR_INTR
= IO_STATE(R_DMA_CH8_SUB3_CLR_INTR
, clr_descr
, do);
4434 /* Schedule bottom half of Out Isoc completion function. This function
4435 finishes the URBs marked with isoc_out_done */
4436 comp_data
= (struct crisv10_isoc_complete_data
*)
4437 kmem_cache_alloc(isoc_compl_cache
, GFP_ATOMIC
);
4438 ASSERT(comp_data
!= NULL
);
4439 comp_data
->hcd
= hcd
;
4441 INIT_WORK(&comp_data
->usb_bh
, complete_isoc_bottom_half
);
4442 schedule_work(&comp_data
->usb_bh
);
4448 /* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
4449 static irqreturn_t
tc_dma_rx_interrupt(int irq
, void *vhc
) {
4450 unsigned long flags
;
4452 struct usb_hcd
*hcd
= (struct usb_hcd
*)vhc
;
4453 struct crisv10_urb_priv
*urb_priv
;
4459 /* Clear this interrupt. */
4460 *R_DMA_CH9_CLR_INTR
= IO_STATE(R_DMA_CH9_CLR_INTR
, clr_eop
, do);
4462 /* Custom clear interrupt for this interrupt */
4463 /* The reason we cli here is that we call the driver's callback functions. */
4464 local_irq_save(flags
);
4466 /* Note that this while loop assumes that all packets span only
4467 one rx descriptor. */
4468 while(myNextRxDesc
->status
& IO_MASK(USB_IN_status
, eop
)) {
4469 epid
= IO_EXTRACT(USB_IN_status
, epid
, myNextRxDesc
->status
);
4470 /* Get the active URB for this epid */
4471 urb
= activeUrbList
[epid
];
4473 ASSERT(epid_inuse(epid
));
4475 dma_err("No urb for epid %d in rx interrupt\n", epid
);
4479 /* Check if any errors on epid */
4481 if (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, error
)) {
4482 __u32 r_usb_ept_data
;
4484 if (usb_pipeisoc(urb
->pipe
)) {
4485 r_usb_ept_data
= etrax_epid_iso_get(epid
);
4486 if((r_usb_ept_data
& IO_MASK(R_USB_EPT_DATA_ISO
, valid
)) &&
4487 (IO_EXTRACT(R_USB_EPT_DATA_ISO
, error_code
, r_usb_ept_data
) == 0) &&
4488 (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, nodata
))) {
4489 /* Not an error, just a failure to receive an expected iso
4490 in packet in this frame. This is not documented
4491 in the designers reference. Continue processing.
4493 } else real_error
= 1;
4494 } else real_error
= 1;
4498 dma_err("Error in RX descr on epid:%d for URB 0x%x",
4499 epid
, (unsigned int)urb
);
4500 dump_ept_data(epid
);
4501 dump_in_desc(myNextRxDesc
);
4505 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
4507 ASSERT(urb_priv
->urb_state
== STARTED
||
4508 urb_priv
->urb_state
== UNLINK
);
4510 if ((usb_pipetype(urb
->pipe
) == PIPE_BULK
) ||
4511 (usb_pipetype(urb
->pipe
) == PIPE_CONTROL
) ||
4512 (usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
)) {
4514 /* We get nodata for empty data transactions, and the rx descriptor's
4515 hw_len field is not valid in that case. No data to copy in other
4517 if (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, nodata
)) {
4518 /* No data to copy */
4521 dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
4522 (unsigned int)urb, epid, myNextRxDesc->hw_len,
4523 urb_priv->rx_offset);
4525 /* Only copy data if URB isn't flaged to be unlinked*/
4526 if(urb_priv
->urb_state
!= UNLINK
) {
4527 /* Make sure the data fits in the buffer. */
4528 if(urb_priv
->rx_offset
+ myNextRxDesc
->hw_len
4529 <= urb
->transfer_buffer_length
) {
4531 /* Copy the data to URBs buffer */
4532 memcpy(urb
->transfer_buffer
+ urb_priv
->rx_offset
,
4533 phys_to_virt(myNextRxDesc
->buf
), myNextRxDesc
->hw_len
);
4534 urb_priv
->rx_offset
+= myNextRxDesc
->hw_len
;
4536 /* Signal overflow when returning URB */
4537 urb
->status
= -EOVERFLOW
;
4538 tc_finish_urb_later(hcd
, urb
, urb
->status
);
4543 /* Check if it was the last packet in the transfer */
4544 if (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, eot
)) {
4545 /* Special handling for In Ctrl URBs. */
4546 if(usb_pipecontrol(urb
->pipe
) && usb_pipein(urb
->pipe
) &&
4547 !(urb_priv
->ctrl_zout_done
)) {
4548 /* Flag that RX part of Ctrl transfer is done. Because zout descr
4549 interrupt hasn't happend yet will the URB be finished in the
4551 urb_priv
->ctrl_rx_done
= 1;
4552 tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
4553 " for zout\n", (unsigned int)urb
);
4555 tc_finish_urb(hcd
, urb
, 0);
4558 } else { /* ISOC RX */
4560 isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
4561 epid, (unsigned int)urb);
4564 struct usb_iso_packet_descriptor
*packet
;
4566 if (urb_priv
->urb_state
== UNLINK
) {
4567 isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
4569 } else if (urb_priv
->urb_state
== NOT_STARTED
) {
4570 isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
4574 packet
= &urb
->iso_frame_desc
[urb_priv
->isoc_packet_counter
];
4578 if (myNextRxDesc
->status
& IO_MASK(USB_IN_status
, nodata
)) {
4579 /* We get nodata for empty data transactions, and the rx descriptor's
4580 hw_len field is not valid in that case. We copy 0 bytes however to
4582 packet
->actual_length
= 0;
4584 packet
->actual_length
= myNextRxDesc
->hw_len
;
4585 /* Make sure the data fits in the buffer. */
4586 ASSERT(packet
->actual_length
<= packet
->length
);
4587 memcpy(urb
->transfer_buffer
+ packet
->offset
,
4588 phys_to_virt(myNextRxDesc
->buf
), packet
->actual_length
);
4589 if(packet
->actual_length
> 0)
4590 isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
4591 packet
->actual_length
, urb_priv
->isoc_packet_counter
,
4592 (unsigned int)urb
, urb_priv
->urb_num
);
4595 /* Increment the packet counter. */
4596 urb_priv
->isoc_packet_counter
++;
4598 /* Note that we don't care about the eot field in the rx descriptor's
4599 status. It will always be set for isoc traffic. */
4600 if (urb
->number_of_packets
== urb_priv
->isoc_packet_counter
) {
4601 /* Complete the urb with status OK. */
4602 tc_finish_urb(hcd
, urb
, 0);
4607 myNextRxDesc
->status
= 0;
4608 myNextRxDesc
->command
|= IO_MASK(USB_IN_command
, eol
);
4609 myLastRxDesc
->command
&= ~IO_MASK(USB_IN_command
, eol
);
4610 myLastRxDesc
= myNextRxDesc
;
4611 myNextRxDesc
= phys_to_virt(myNextRxDesc
->next
);
4612 flush_etrax_cache();
4613 *R_DMA_CH9_CMD
= IO_STATE(R_DMA_CH9_CMD
, cmd
, restart
);
4616 local_irq_restore(flags
);
4621 static void tc_bulk_start_timer_func(unsigned long dummy
) {
4622 /* We might enable an EP descriptor behind the current DMA position when
4623 it's about to decide that there are no more bulk traffic and it should
4624 stop the bulk channel.
4625 Therefore we periodically check if the bulk channel is stopped and there
4626 is an enabled bulk EP descriptor, in which case we start the bulk
4629 if (!(*R_DMA_CH8_SUB0_CMD
& IO_MASK(R_DMA_CH8_SUB0_CMD
, cmd
))) {
4632 timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
4634 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
4635 if (TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
4636 timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
4638 restart_dma8_sub0();
4640 /* Restart the bulk eot timer since we just started the bulk channel.*/
4641 mod_timer(&bulk_eot_timer
, jiffies
+ BULK_EOT_TIMER_INTERVAL
);
4643 /* No need to search any further. */
4648 timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
4652 static void tc_bulk_eot_timer_func(unsigned long dummy
) {
4653 struct usb_hcd
*hcd
= (struct usb_hcd
*)dummy
;
4655 /* Because of a race condition in the top half, we might miss a bulk eot.
4656 This timer "simulates" a bulk eot if we don't get one for a while,
4657 hopefully correcting the situation. */
4658 timer_dbg("bulk_eot_timer timed out.\n");
4659 check_finished_bulk_tx_epids(hcd
, 1);
4663 /*************************************************************/
4664 /*************************************************************/
4665 /* Device driver block */
4666 /*************************************************************/
4667 /*************************************************************/
4669 /* Forward declarations for device driver functions */
4670 static int devdrv_hcd_probe(struct device
*);
4671 static int devdrv_hcd_remove(struct device
*);
4673 static int devdrv_hcd_suspend(struct device
*, u32
, u32
);
4674 static int devdrv_hcd_resume(struct device
*, u32
);
4675 #endif /* CONFIG_PM */
4678 static struct platform_device
*devdrv_hc_platform_device
;
4680 /* device driver interface */
4681 static struct device_driver devdrv_hc_device_driver
= {
4682 .name
= (char *) hc_name
,
4683 .bus
= &platform_bus_type
,
4685 .probe
= devdrv_hcd_probe
,
4686 .remove
= devdrv_hcd_remove
,
4689 .suspend
= devdrv_hcd_suspend
,
4690 .resume
= devdrv_hcd_resume
,
4691 #endif /* CONFIG_PM */
4694 /* initialize the host controller and driver */
4695 static int __init_or_module
devdrv_hcd_probe(struct device
*dev
)
4697 struct usb_hcd
*hcd
;
4698 struct crisv10_hcd
*crisv10_hcd
;
4701 /* Check DMA burst length */
4702 if(IO_EXTRACT(R_BUS_CONFIG
, dma_burst
, *R_BUS_CONFIG
) !=
4703 IO_STATE(R_BUS_CONFIG
, dma_burst
, burst32
)) {
4704 devdrv_err("Invalid DMA burst length in Etrax 100LX,"
4705 " needs to be 32\n");
4709 hcd
= usb_create_hcd(&crisv10_hc_driver
, dev
, dev_name(dev
));
4713 crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
4714 spin_lock_init(&crisv10_hcd
->lock
);
4715 crisv10_hcd
->num_ports
= num_ports();
4716 crisv10_hcd
->running
= 0;
4718 dev_set_drvdata(dev
, crisv10_hcd
);
4720 devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ
,
4721 ETRAX_USB_RX_IRQ
, ETRAX_USB_TX_IRQ
);
4723 /* Print out chip version read from registers */
4724 int rev_maj
= *R_USB_REVISION
& IO_MASK(R_USB_REVISION
, major
);
4725 int rev_min
= *R_USB_REVISION
& IO_MASK(R_USB_REVISION
, minor
);
4727 devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj
);
4729 devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj
, rev_min
);
4732 devdrv_info("Bulk timer interval, start:%d eot:%d\n",
4733 BULK_START_TIMER_INTERVAL
,
4734 BULK_EOT_TIMER_INTERVAL
);
4737 /* Init root hub data structures */
4739 devdrv_err("Failed init data for Root Hub\n");
4743 if(port_in_use(0)) {
4744 if (cris_request_io_interface(if_usb_1
, "ETRAX100LX USB-HCD")) {
4745 printk(KERN_CRIT
"usb-host: request IO interface usb1 failed");
4749 devdrv_info("Claimed interface for USB physical port 1\n");
4751 if(port_in_use(1)) {
4752 if (cris_request_io_interface(if_usb_2
, "ETRAX100LX USB-HCD")) {
4753 /* Free first interface if second failed to be claimed */
4754 if(port_in_use(0)) {
4755 cris_free_io_interface(if_usb_1
);
4757 printk(KERN_CRIT
"usb-host: request IO interface usb2 failed");
4761 devdrv_info("Claimed interface for USB physical port 2\n");
4764 /* Init transfer controller structs and locks */
4765 if((retval
= tc_init(hcd
)) != 0) {
4769 /* Attach interrupt functions for DMA and init DMA controller */
4770 if((retval
= tc_dma_init(hcd
)) != 0) {
4774 /* Attach the top IRQ handler for USB controller interrupts */
4775 if (request_irq(ETRAX_USB_HC_IRQ
, crisv10_hcd_top_irq
, 0,
4776 "ETRAX 100LX built-in USB (HC)", hcd
)) {
4777 err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ
);
4782 /* iso_eof is only enabled when isoc traffic is running. */
4783 *R_USB_IRQ_MASK_SET
=
4784 /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
4785 IO_STATE(R_USB_IRQ_MASK_SET
, bulk_eot
, set
) |
4786 IO_STATE(R_USB_IRQ_MASK_SET
, epid_attn
, set
) |
4787 IO_STATE(R_USB_IRQ_MASK_SET
, port_status
, set
) |
4788 IO_STATE(R_USB_IRQ_MASK_SET
, ctl_status
, set
);
4791 crisv10_ready_wait();
4792 /* Reset the USB interface. */
4794 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
4795 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
4796 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, reset
);
4798 /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
4799 0x2A30 (10800), to guarantee that control traffic gets 10% of the
4800 bandwidth, and periodic transfer may allocate the rest (90%).
4801 This doesn't work though.
4802 The value 11960 is chosen to be just after the SOF token, with a couple
4803 of bit times extra for possible bit stuffing. */
4804 *R_USB_FM_PSTART
= IO_FIELD(R_USB_FM_PSTART
, value
, 11960);
4806 crisv10_ready_wait();
4807 /* Configure the USB interface as a host controller. */
4809 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
4810 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
4811 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_config
);
4814 /* Check so controller not busy before enabling ports */
4815 crisv10_ready_wait();
4817 /* Enable selected USB ports */
4818 if(port_in_use(0)) {
4819 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, no
);
4821 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, yes
);
4823 if(port_in_use(1)) {
4824 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, no
);
4826 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, yes
);
4829 crisv10_ready_wait();
4830 /* Start processing of USB traffic. */
4832 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
4833 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
4834 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
4836 /* Do not continue probing initialization before USB interface is done */
4837 crisv10_ready_wait();
4839 /* Register our Host Controller to USB Core
4840 * Finish the remaining parts of generic HCD initialization: allocate the
4841 * buffers of consistent memory, register the bus
4842 * and call the driver's reset() and start() routines. */
4843 retval
= usb_add_hcd(hcd
, ETRAX_USB_HC_IRQ
, IRQF_DISABLED
);
4845 devdrv_err("Failed registering HCD driver\n");
4852 devdrv_hcd_remove(dev
);
4857 /* cleanup after the host controller and driver */
4858 static int __init_or_module
devdrv_hcd_remove(struct device
*dev
)
4860 struct crisv10_hcd
*crisv10_hcd
= dev_get_drvdata(dev
);
4861 struct usb_hcd
*hcd
;
4865 hcd
= crisv10_hcd_to_hcd(crisv10_hcd
);
4868 /* Stop USB Controller in Etrax 100LX */
4869 crisv10_hcd_reset(hcd
);
4871 usb_remove_hcd(hcd
);
4872 devdrv_dbg("Removed HCD from USB Core\n");
4874 /* Free USB Controller IRQ */
4875 free_irq(ETRAX_USB_HC_IRQ
, NULL
);
4877 /* Free resources */
4882 if(port_in_use(0)) {
4883 cris_free_io_interface(if_usb_1
);
4885 if(port_in_use(1)) {
4886 cris_free_io_interface(if_usb_2
);
4889 devdrv_dbg("Freed all claimed resources\n");
4897 static int devdrv_hcd_suspend(struct usb_hcd
*hcd
, u32 state
, u32 level
)
4899 return 0; /* no-op for now */
4902 static int devdrv_hcd_resume(struct usb_hcd
*hcd
, u32 level
)
4904 return 0; /* no-op for now */
4907 #endif /* CONFIG_PM */
4910 /*************************************************************/
4911 /*************************************************************/
4913 /*************************************************************/
4914 /*************************************************************/
4916 /* register driver */
4917 static int __init
module_hcd_init(void)
4923 /* Here we select enabled ports by following defines created from
4925 #ifndef CONFIG_ETRAX_USB_HOST_PORT1
4928 #ifndef CONFIG_ETRAX_USB_HOST_PORT2
4932 printk(KERN_INFO
"%s version "VERSION
" "COPYRIGHT
"\n", product_desc
);
4934 devdrv_hc_platform_device
=
4935 platform_device_register_simple((char *) hc_name
, 0, NULL
, 0);
4937 if (IS_ERR(devdrv_hc_platform_device
))
4938 return PTR_ERR(devdrv_hc_platform_device
);
4939 return driver_register(&devdrv_hc_device_driver
);
4941 * Note that we do not set the DMA mask for the device,
4942 * i.e. we pretend that we will use PIO, since no specific
4943 * allocation routines are needed for DMA buffers. This will
4944 * cause the HCD buffer allocation routines to fall back to
4949 /* unregister driver */
4950 static void __exit
module_hcd_exit(void)
4952 driver_unregister(&devdrv_hc_device_driver
);
4957 module_init(module_hcd_init
);
4958 module_exit(module_hcd_exit
);