2 * OHCI HCD (Host Controller Driver) for USB.
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
7 * This file is licenced under the GPL.
10 #include <linux/irq.h>
12 /*-------------------------------------------------------------------------*/
15 * URB goes back to driver, and isn't reissued.
16 * It's completely gone from HC data structures.
17 * PRECONDITION: ahcd lock held, irqs blocked.
20 finish_urb(struct admhcd
*ahcd
, struct urb
*urb
)
21 __releases(ahcd
->lock
)
22 __acquires(ahcd
->lock
)
24 urb_priv_free(ahcd
, urb
->hcpriv
);
27 spin_lock(&urb
->lock
);
28 if (likely(urb
->status
== -EINPROGRESS
))
31 /* report short control reads right even though the data TD always
32 * has TD_R set. (much simpler, but creates the 1-td limit.)
34 if (unlikely(urb
->transfer_flags
& URB_SHORT_NOT_OK
)
35 && unlikely(usb_pipecontrol(urb
->pipe
))
36 && urb
->actual_length
< urb
->transfer_buffer_length
37 && usb_pipein(urb
->pipe
)
38 && urb
->status
== 0) {
39 urb
->status
= -EREMOTEIO
;
40 #ifdef ADMHC_VERBOSE_DEBUG
41 urb_print(ahcd
, urb
, "SHORT", usb_pipeout (urb
->pipe
));
44 spin_unlock(&urb
->lock
);
46 switch (usb_pipetype(urb
->pipe
)) {
47 case PIPE_ISOCHRONOUS
:
48 admhcd_to_hcd(ahcd
)->self
.bandwidth_isoc_reqs
--;
51 admhcd_to_hcd(ahcd
)->self
.bandwidth_int_reqs
--;
55 #ifdef ADMHC_VERBOSE_DEBUG
56 urb_print(ahcd
, urb
, "FINISH", 0);
59 /* urb->complete() can reenter this HCD */
60 spin_unlock(&ahcd
->lock
);
61 usb_hcd_giveback_urb(admhcd_to_hcd(ahcd
), urb
);
62 spin_lock(&ahcd
->lock
);
66 /*-------------------------------------------------------------------------*
67 * ED handling functions
68 *-------------------------------------------------------------------------*/
70 static struct ed
*ed_create(struct admhcd
*ahcd
, unsigned int type
, u32 info
)
75 ed
= ed_alloc(ahcd
, GFP_ATOMIC
);
79 /* dummy td; end of td list for this ed */
80 td
= td_alloc(ahcd
, GFP_ATOMIC
);
88 case PIPE_ISOCHRONOUS
:
99 ed
->hwINFO
= cpu_to_hc32(ahcd
, info
);
100 ed
->hwTailP
= cpu_to_hc32(ahcd
, td
->td_dma
);
101 ed
->hwHeadP
= cpu_to_hc32(ahcd
, td
->td_dma
);
111 /* get and maybe (re)init an endpoint. init _should_ be done only as part
112 * of enumeration, usb_set_configuration() or usb_set_interface().
114 static struct ed
*ed_get(struct admhcd
*ahcd
, struct usb_host_endpoint
*ep
,
115 struct usb_device
*udev
, unsigned int pipe
, int interval
)
123 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
124 * suceeds ... otherwise we wouldn't need "pipe".
126 info
= usb_pipedevice(pipe
);
127 info
|= (ep
->desc
.bEndpointAddress
& ~USB_DIR_IN
) << ED_EN_SHIFT
;
128 info
|= le16_to_cpu(ep
->desc
.wMaxPacketSize
) << ED_MPS_SHIFT
;
129 if (udev
->speed
== USB_SPEED_FULL
)
130 info
|= ED_SPEED_FULL
;
132 ed
= ed_create(ahcd
, usb_pipetype(pipe
), info
);
140 static void ed_next_urb(struct admhcd
*ahcd
, struct ed
*ed
)
145 up
= list_entry(ed
->urb_pending
.next
, struct urb_priv
, pending
);
146 list_del(&up
->pending
);
151 #ifdef ADMHC_VERBOSE_DEBUG
152 urb_print(ahcd
, up
->urb
, "NEXT", 0);
153 admhc_dump_ed(ahcd
, " ", ed
, 0);
156 up
->td
[up
->td_cnt
-1]->hwNextTD
= cpu_to_hc32(ahcd
, ed
->dummy
->td_dma
);
158 carry
= hc32_to_cpup(ahcd
, &ed
->hwHeadP
) & ED_C
;
159 ed
->hwHeadP
= cpu_to_hc32(ahcd
, up
->td
[0]->td_dma
| carry
);
160 ed
->hwINFO
&= ~cpu_to_hc32(ahcd
, ED_SKIP
);
163 /* link an ed into the HC chain */
164 static int ed_schedule(struct admhcd
*ahcd
, struct ed
*ed
)
168 if (admhcd_to_hcd(ahcd
)->state
== HC_STATE_QUIESCING
)
171 if (ed
->state
== ED_NEW
) {
174 old_tail
= ahcd
->ed_tails
[ed
->type
];
176 ed
->ed_next
= old_tail
->ed_next
;
178 ed
->ed_next
->ed_prev
= ed
;
179 ed
->hwNextED
= cpu_to_hc32(ahcd
, ed
->ed_next
->dma
);
181 ed
->ed_prev
= old_tail
;
183 old_tail
->ed_next
= ed
;
184 old_tail
->hwNextED
= cpu_to_hc32(ahcd
, ed
->dma
);
186 ahcd
->ed_tails
[ed
->type
] = ed
;
187 ed
->hwINFO
&= ~cpu_to_hc32(ahcd
, ED_SKIP
);
190 #ifdef ADMHC_VERBOSE_DEBUG
191 admhc_dump_ed(ahcd
, "ED-SCHED", ed
, 0);
194 if (!ed
->urb_active
) {
195 ed_next_urb(ahcd
, ed
);
196 admhc_dma_enable(ahcd
);
202 static void ed_deschedule(struct admhcd
*ahcd
, struct ed
*ed
)
205 #ifdef ADMHC_VERBOSE_DEBUG
206 admhc_dump_ed(ahcd
, "ED-DESCHED", ed
, 0);
209 /* remove this ED from the HC list */
210 ed
->ed_prev
->hwNextED
= ed
->hwNextED
;
212 /* and remove it from our list */
213 ed
->ed_prev
->ed_next
= ed
->ed_next
;
216 ed
->ed_next
->ed_prev
= ed
->ed_prev
;
220 if (ahcd
->ed_tails
[ed
->type
] == ed
)
221 ahcd
->ed_tails
[ed
->type
] = ed
->ed_prev
;
226 static void ed_start_deschedule(struct admhcd
*ahcd
, struct ed
*ed
)
229 #ifdef ADMHC_VERBOSE_DEBUG
230 admhc_dump_ed(ahcd
, "ED-UNLINK", ed
, 0);
233 ed
->hwINFO
|= cpu_to_hc32(ahcd
, ED_SKIP
);
234 ed
->state
= ED_UNLINK
;
236 /* SOF interrupt might get delayed; record the frame counter value that
237 * indicates when the HC isn't looking at it, so concurrent unlinks
238 * behave. frame_no wraps every 2^16 msec, and changes right before
241 ed
->tick
= admhc_frame_no(ahcd
) + 1;
243 admhc_intr_enable(ahcd
, ADMHC_INTR_SOFI
);
246 /*-------------------------------------------------------------------------*
247 * TD handling functions
248 *-------------------------------------------------------------------------*/
250 static void td_fill(struct admhcd
*ahcd
, u32 info
, dma_addr_t data
, int len
,
256 if (up
->td_idx
>= up
->td_cnt
) {
257 admhc_err(ahcd
, "td_fill error, idx=%d, cnt=%d\n", up
->td_idx
,
262 td
= up
->td
[up
->td_idx
];
267 if (up
->td_idx
== up
->td_cnt
-1)
271 cbl
|= (len
& TD_BL_MASK
);
275 /* setup hardware specific fields */
276 td
->hwINFO
= cpu_to_hc32(ahcd
, info
);
277 td
->hwDBP
= cpu_to_hc32(ahcd
, data
);
278 td
->hwCBL
= cpu_to_hc32(ahcd
, cbl
);
281 up
->td
[up
->td_idx
-1]->hwNextTD
= cpu_to_hc32(ahcd
, td
->td_dma
);
286 /*-------------------------------------------------------------------------*/
288 /* Prepare all TDs of a transfer, and queue them onto the ED.
289 * Caller guarantees HC is active.
290 * Usually the ED is already on the schedule, so TDs might be
291 * processed as soon as they're queued.
293 static void td_submit_urb(struct admhcd
*ahcd
, struct urb
*urb
)
295 struct urb_priv
*urb_priv
= urb
->hcpriv
;
297 int data_len
= urb
->transfer_buffer_length
;
300 int is_out
= usb_pipeout(urb
->pipe
);
303 /* OHCI handles the bulk/interrupt data toggles itself. We just
304 * use the device toggle bits for resetting, and rely on the fact
305 * that resetting toggle is meaningless if the endpoint is active.
308 if (usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
), is_out
)) {
312 usb_settoggle(urb
->dev
, usb_pipeendpoint (urb
->pipe
),
316 urb_priv
->td_idx
= 0;
319 data
= urb
->transfer_dma
;
323 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
324 * using TD_CC_GET, as well as by seeing them on the done list.
325 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
327 switch (urb_priv
->ed
->type
) {
330 ? TD_T_CARRY
| TD_SCC_NOTACCESSED
| TD_DP_OUT
331 : TD_T_CARRY
| TD_SCC_NOTACCESSED
| TD_DP_IN
;
333 /* setup service interval and starting frame number */
334 info
|= (urb
->start_frame
& TD_FN_MASK
);
335 info
|= (urb
->interval
& TD_ISI_MASK
) << TD_ISI_SHIFT
;
337 td_fill(ahcd
, info
, data
, data_len
, urb_priv
);
340 admhcd_to_hcd(ahcd
)->self
.bandwidth_int_reqs
++;
345 ? TD_SCC_NOTACCESSED
| TD_DP_OUT
346 : TD_SCC_NOTACCESSED
| TD_DP_IN
;
348 /* TDs _could_ transfer up to 8K each */
349 while (data_len
> TD_DATALEN_MAX
) {
350 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
),
351 data
, TD_DATALEN_MAX
, urb_priv
);
352 data
+= TD_DATALEN_MAX
;
353 data_len
-= TD_DATALEN_MAX
;
357 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
), data
,
361 if ((urb
->transfer_flags
& URB_ZERO_PACKET
)
362 && (cnt
< urb_priv
->td_cnt
)) {
363 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
),
369 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
370 * any DATA phase works normally, and the STATUS ack is special.
373 /* fill a TD for the setup */
374 info
= TD_SCC_NOTACCESSED
| TD_DP_SETUP
| TD_T_DATA0
;
375 td_fill(ahcd
, info
, urb
->setup_dma
, 8, urb_priv
);
379 /* fill a TD for the data */
380 info
= TD_SCC_NOTACCESSED
| TD_T_DATA1
;
381 info
|= is_out
? TD_DP_OUT
: TD_DP_IN
;
382 /* NOTE: mishandles transfers >8K, some >4K */
383 td_fill(ahcd
, info
, data
, data_len
, urb_priv
);
387 /* fill a TD for the ACK */
388 info
= (is_out
|| data_len
== 0)
389 ? TD_SCC_NOTACCESSED
| TD_DP_IN
| TD_T_DATA1
390 : TD_SCC_NOTACCESSED
| TD_DP_OUT
| TD_T_DATA1
;
391 td_fill(ahcd
, info
, data
, 0, urb_priv
);
396 /* ISO has no retransmit, so no toggle;
397 * Each TD could handle multiple consecutive frames (interval 1);
398 * we could often reduce the number of TDs here.
400 case PIPE_ISOCHRONOUS
:
401 info
= TD_SCC_NOTACCESSED
;
402 for (cnt
= 0; cnt
< urb
->number_of_packets
; cnt
++) {
403 int frame
= urb
->start_frame
;
405 frame
+= cnt
* urb
->interval
;
407 td_fill(ahcd
, info
| frame
,
408 data
+ urb
->iso_frame_desc
[cnt
].offset
,
409 urb
->iso_frame_desc
[cnt
].length
,
412 admhcd_to_hcd(ahcd
)->self
.bandwidth_isoc_reqs
++;
416 if (urb_priv
->td_cnt
!= cnt
)
417 admhc_err(ahcd
, "bad number of tds created for urb %p\n", urb
);
419 urb_priv
->td_idx
= 0;
422 /* calculate transfer length/status and update the urb
423 * PRECONDITION: irqsafe (only for urb->status locking)
425 static int td_done(struct admhcd
*ahcd
, struct urb
*urb
, struct td
*td
)
427 u32 info
= hc32_to_cpup(ahcd
, &td
->hwINFO
);
428 u32 dbp
= hc32_to_cpup(ahcd
, &td
->hwDBP
);
429 u32 cbl
= TD_BL_GET(hc32_to_cpup(ahcd
, &td
->hwCBL
));
430 int type
= usb_pipetype(urb
->pipe
);
433 cc
= TD_CC_GET(info
);
435 /* ISO ... drivers see per-TD length/status */
436 if (type
== PIPE_ISOCHRONOUS
) {
441 /* NOTE: assumes FC in tdINFO == 0, and that
442 * only the first of 0..MAXPSW psws is used.
446 if (tdINFO
& TD_CC
) /* hc didn't touch? */
449 if (usb_pipeout (urb
->pipe
))
450 dlen
= urb
->iso_frame_desc
[td
->index
].length
;
452 /* short reads are always OK for ISO */
453 if (cc
== TD_DATAUNDERRUN
)
455 dlen
= tdPSW
& 0x3ff;
458 urb
->actual_length
+= dlen
;
459 urb
->iso_frame_desc
[td
->index
].actual_length
= dlen
;
460 urb
->iso_frame_desc
[td
->index
].status
= cc_to_error
[cc
];
462 if (cc
!= TD_CC_NOERROR
)
464 "urb %p iso td %p (%d) len %d cc %d\n",
465 urb
, td
, 1 + td
->index
, dlen
, cc
);
467 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
468 * except that "setup" bytes aren't counted and "short" transfers
469 * might not be reported as errors.
473 #ifdef ADMHC_VERBOSE_DEBUG
474 admhc_dump_td(ahcd
, "td_done", td
);
477 /* count all non-empty packets except control SETUP packet */
478 if ((type
!= PIPE_CONTROL
|| td
->index
!= 0) && dbp
!= 0) {
479 urb
->actual_length
+= dbp
- td
->data_dma
+ cbl
;
486 /*-------------------------------------------------------------------------*/
488 static void ed_update(struct admhcd
*ahcd
, struct ed
*ed
, int force
)
500 #ifdef ADMHC_VERBOSE_DEBUG
501 urb_print(ahcd
, urb
, "UPDATE", 0);
502 admhc_dump_ed(ahcd
, "ED-UPDATE", ed
, 1);
506 for (; up
->td_idx
< up
->td_cnt
; up
->td_idx
++) {
507 struct td
*td
= up
->td
[up
->td_idx
];
509 if (hc32_to_cpup(ahcd
, &td
->hwINFO
) & TD_OWN
)
512 cc
= td_done(ahcd
, urb
, td
);
513 if (cc
!= TD_CC_NOERROR
) {
515 "urb %p td %p (%d) cc %d, len=%d/%d\n",
516 urb
, td
, td
->index
, cc
,
518 urb
->transfer_buffer_length
);
520 up
->td_idx
= up
->td_cnt
;
525 if ((up
->td_idx
!= up
->td_cnt
) && (!force
))
526 /* the URB is not completed yet */
529 /* update packet status if needed (short is normally ok) */
530 if (cc
== TD_CC_DATAUNDERRUN
531 && !(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
534 if (cc
!= TD_CC_NOERROR
&& cc
< TD_CC_HCD0
) {
535 spin_lock(&urb
->lock
);
536 if (urb
->status
== -EINPROGRESS
)
537 urb
->status
= cc_to_error
[cc
];
538 spin_unlock(&urb
->lock
);
541 finish_urb(ahcd
, urb
);
543 ed
->urb_active
= NULL
;
547 /* there are some tds completed; called in_irq(), with HCD locked */
548 static void admhc_td_complete(struct admhcd
*ahcd
)
553 for (ed
= ahcd
->ed_head
; ed
; ed
= ed
->ed_next
) {
554 if (ed
->state
!= ED_OPER
)
557 if (hc32_to_cpup(ahcd
, &ed
->hwHeadP
) & ED_H
) {
558 admhc_dump_ed(ahcd
, "ed halted", ed
, 1);
559 ed_update(ahcd
, ed
, 1);
560 ed
->hwHeadP
&= ~cpu_to_hc32(ahcd
, ED_H
);
562 ed_update(ahcd
, ed
, 0);
564 if (ed
->urb_active
) {
569 if (!(list_empty(&ed
->urb_pending
))) {
571 ed_next_urb(ahcd
, ed
);
575 ed_start_deschedule(ahcd
, ed
);
579 admhc_dma_disable(ahcd
);
583 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
584 static void admhc_finish_unlinks(struct admhcd
*ahcd
, u16 tick
)
589 for (ed
= ahcd
->ed_head
; ed
; ed
= ed
->ed_next
) {
590 if (ed
->state
!= ED_UNLINK
)
593 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd
)->state
)))
594 if (tick_before(tick
, ed
->tick
)) {
599 /* process partial status */
601 ed_update(ahcd
, ed
, 1);
603 if (list_empty(&ed
->urb_pending
))
604 ed_deschedule(ahcd
, ed
);
606 ed_schedule(ahcd
, ed
);
610 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd
)->state
)))
611 admhc_intr_disable(ahcd
, ADMHC_INTR_SOFI
);
This page took 0.071 seconds and 5 git commands to generate.