2 * ADM5120 HCD (Host Controller Driver) for USB
4 * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
6 * This file was derived from: drivers/usb/host/ohci-q.c
7 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
8 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
10 * This file is licenced under the GPL.
13 #include <linux/irq.h>
15 /*-------------------------------------------------------------------------*/
18 * URB goes back to driver, and isn't reissued.
19 * It's completely gone from HC data structures.
20 * PRECONDITION: ahcd lock held, irqs blocked.
23 finish_urb(struct admhcd
*ahcd
, struct urb
*urb
)
24 __releases(ahcd
->lock
)
25 __acquires(ahcd
->lock
)
27 urb_priv_free(ahcd
, urb
->hcpriv
);
30 spin_lock(&urb
->lock
);
31 if (likely(urb
->status
== -EINPROGRESS
))
34 /* report short control reads right even though the data TD always
35 * has TD_R set. (much simpler, but creates the 1-td limit.)
37 if (unlikely(urb
->transfer_flags
& URB_SHORT_NOT_OK
)
38 && unlikely(usb_pipecontrol(urb
->pipe
))
39 && urb
->actual_length
< urb
->transfer_buffer_length
40 && usb_pipein(urb
->pipe
)
41 && urb
->status
== 0) {
42 urb
->status
= -EREMOTEIO
;
43 #ifdef ADMHC_VERBOSE_DEBUG
44 urb_print(ahcd
, urb
, "SHORT", usb_pipeout(urb
->pipe
));
47 spin_unlock(&urb
->lock
);
49 switch (usb_pipetype(urb
->pipe
)) {
50 case PIPE_ISOCHRONOUS
:
51 admhcd_to_hcd(ahcd
)->self
.bandwidth_isoc_reqs
--;
54 admhcd_to_hcd(ahcd
)->self
.bandwidth_int_reqs
--;
58 #ifdef ADMHC_VERBOSE_DEBUG
59 urb_print(ahcd
, urb
, "RET", usb_pipeout (urb
->pipe
));
62 /* urb->complete() can reenter this HCD */
63 spin_unlock(&ahcd
->lock
);
64 usb_hcd_giveback_urb(admhcd_to_hcd(ahcd
), urb
);
65 spin_lock(&ahcd
->lock
);
69 /*-------------------------------------------------------------------------*
70 * ED handling functions
71 *-------------------------------------------------------------------------*/
74 /* search for the right schedule branch to use for a periodic ed.
75 * does some load balancing; returns the branch, or negative errno.
77 static int balance(struct admhcd
*ahcd
, int interval
, int load
)
79 int i
, branch
= -ENOSPC
;
81 /* iso periods can be huge; iso tds specify frame numbers */
82 if (interval
> NUM_INTS
)
85 /* search for the least loaded schedule branch of that period
86 * that has enough bandwidth left unreserved.
88 for (i
= 0; i
< interval
; i
++) {
89 if (branch
< 0 || ahcd
->load
[branch
] > ahcd
->load
[i
]) {
92 /* usb 1.1 says 90% of one frame */
93 for (j
= i
; j
< NUM_INTS
; j
+= interval
) {
94 if ((ahcd
->load
[j
] + load
) > 900)
106 /*-------------------------------------------------------------------------*/
109 /* both iso and interrupt requests have periods; this routine puts them
110 * into the schedule tree in the apppropriate place. most iso devices use
111 * 1msec periods, but that's not required.
113 static void periodic_link (struct admhcd
*ahcd
, struct ed
*ed
)
117 admhc_vdbg (ahcd
, "link %sed %p branch %d [%dus.], interval %d\n",
118 (ed
->hwINFO
& cpu_to_hc32(ahcd
, ED_ISO
)) ? "iso " : "",
119 ed
, ed
->branch
, ed
->load
, ed
->interval
);
121 for (i
= ed
->branch
; i
< NUM_INTS
; i
+= ed
->interval
) {
122 struct ed
**prev
= &ahcd
->periodic
[i
];
123 __hc32
*prev_p
= &ahcd
->hcca
->int_table
[i
];
124 struct ed
*here
= *prev
;
126 /* sorting each branch by period (slow before fast)
127 * lets us share the faster parts of the tree.
128 * (plus maybe: put interrupt eds before iso)
130 while (here
&& ed
!= here
) {
131 if (ed
->interval
> here
->interval
)
133 prev
= &here
->ed_next
;
134 prev_p
= &here
->hwNextED
;
140 ed
->hwNextED
= *prev_p
;
143 *prev_p
= cpu_to_hc32(ahcd
, ed
->dma
);
146 ahcd
->load
[i
] += ed
->load
;
148 admhcd_to_hcd(ahcd
)->self
.bandwidth_allocated
+= ed
->load
/ ed
->interval
;
152 /* link an ed into the HC chain */
154 static int ed_schedule(struct admhcd
*ahcd
, struct ed
*ed
)
158 if (admhcd_to_hcd(ahcd
)->state
== HC_STATE_QUIESCING
)
163 old_tail
= ahcd
->ed_tails
[ed
->type
];
165 ed
->ed_next
= old_tail
->ed_next
;
167 ed
->ed_next
->ed_prev
= ed
;
168 ed
->hwNextED
= cpu_to_hc32(ahcd
, ed
->ed_next
->dma
);
170 ed
->ed_prev
= old_tail
;
172 old_tail
->ed_next
= ed
;
173 old_tail
->hwNextED
= cpu_to_hc32(ahcd
, ed
->dma
);
175 ahcd
->ed_tails
[ed
->type
] = ed
;
177 admhc_dma_enable(ahcd
);
182 /*-------------------------------------------------------------------------*/
185 /* scan the periodic table to find and unlink this ED */
186 static void periodic_unlink (struct admhcd
*ahcd
, struct ed
*ed
)
190 for (i
= ed
->branch
; i
< NUM_INTS
; i
+= ed
->interval
) {
192 struct ed
**prev
= &ahcd
->periodic
[i
];
193 __hc32
*prev_p
= &ahcd
->hcca
->int_table
[i
];
195 while (*prev
&& (temp
= *prev
) != ed
) {
196 prev_p
= &temp
->hwNextED
;
197 prev
= &temp
->ed_next
;
200 *prev_p
= ed
->hwNextED
;
203 ahcd
->load
[i
] -= ed
->load
;
206 admhcd_to_hcd(ahcd
)->self
.bandwidth_allocated
-= ed
->load
/ ed
->interval
;
207 admhc_vdbg (ahcd
, "unlink %sed %p branch %d [%dus.], interval %d\n",
208 (ed
->hwINFO
& cpu_to_hc32(ahcd
, ED_ISO
)) ? "iso " : "",
209 ed
, ed
->branch
, ed
->load
, ed
->interval
);
213 /* unlink an ed from the HC chain.
214 * just the link to the ed is unlinked.
215 * the link from the ed still points to another operational ed or 0
216 * so the HC can eventually finish the processing of the unlinked ed
217 * (assuming it already started that, which needn't be true).
219 * ED_UNLINK is a transient state: the HC may still see this ED, but soon
220 * it won't. ED_SKIP means the HC will finish its current transaction,
221 * but won't start anything new. The TD queue may still grow; device
222 * drivers don't know about this HCD-internal state.
224 * When the HC can't see the ED, something changes ED_UNLINK to one of:
226 * - ED_OPER: when there's any request queued, the ED gets rescheduled
227 * immediately. HC should be working on them.
229 * - ED_IDLE: when there's no TD queue. there's no reason for the HC
230 * to care about this ED; safe to disable the endpoint.
232 * When finish_unlinks() runs later, after SOF interrupt, it will often
233 * complete one or more URB unlinks before making that state change.
235 static void ed_deschedule(struct admhcd
*ahcd
, struct ed
*ed
)
238 #ifdef ADMHC_VERBOSE_DEBUG
239 admhc_dump_ed(ahcd
, "ED-DESCHED", ed
, 1);
242 ed
->hwINFO
|= cpu_to_hc32(ahcd
, ED_SKIP
);
244 ed
->state
= ED_UNLINK
;
246 /* remove this ED from the HC list */
247 ed
->ed_prev
->hwNextED
= ed
->hwNextED
;
249 /* and remove it from our list also */
250 ed
->ed_prev
->ed_next
= ed
->ed_next
;
253 ed
->ed_next
->ed_prev
= ed
->ed_prev
;
255 if (ahcd
->ed_tails
[ed
->type
] == ed
)
256 ahcd
->ed_tails
[ed
->type
] = ed
->ed_prev
;
259 /*-------------------------------------------------------------------------*/
261 static struct ed
*ed_create(struct admhcd
*ahcd
, unsigned int type
, u32 info
)
266 ed
= ed_alloc(ahcd
, GFP_ATOMIC
);
270 /* dummy td; end of td list for this ed */
271 td
= td_alloc(ahcd
, GFP_ATOMIC
);
279 case PIPE_ISOCHRONOUS
:
288 ed
->hwINFO
= cpu_to_hc32(ahcd
, info
);
289 ed
->hwTailP
= cpu_to_hc32(ahcd
, td
->td_dma
);
290 ed
->hwHeadP
= ed
->hwTailP
; /* ED_C, ED_H zeroed */
300 /* get and maybe (re)init an endpoint. init _should_ be done only as part
301 * of enumeration, usb_set_configuration() or usb_set_interface().
303 static struct ed
*ed_get(struct admhcd
*ahcd
, struct usb_host_endpoint
*ep
,
304 struct usb_device
*udev
, unsigned int pipe
, int interval
)
309 spin_lock_irqsave(&ahcd
->lock
, flags
);
315 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
316 * suceeds ... otherwise we wouldn't need "pipe".
318 info
= usb_pipedevice(pipe
);
319 info
|= (ep
->desc
.bEndpointAddress
& ~USB_DIR_IN
) << ED_EN_SHIFT
;
320 info
|= le16_to_cpu(ep
->desc
.wMaxPacketSize
) << ED_MPS_SHIFT
;
321 if (udev
->speed
== USB_SPEED_FULL
)
322 info
|= ED_SPEED_FULL
;
324 ed
= ed_create(ahcd
, usb_pipetype(pipe
), info
);
329 spin_unlock_irqrestore(&ahcd
->lock
, flags
);
334 /*-------------------------------------------------------------------------*/
336 /* request unlinking of an endpoint from an operational HC.
337 * put the ep on the rm_list
338 * real work is done at the next start frame (SOFI) hardware interrupt
339 * caller guarantees HCD is running, so hardware access is safe,
340 * and that ed->state is ED_OPER
342 static void start_ed_unlink(struct admhcd
*ahcd
, struct ed
*ed
)
345 #ifdef ADMHC_VERBOSE_DEBUG
346 admhc_dump_ed(ahcd
, "ED-UNLINK", ed
, 1);
349 ed
->hwINFO
|= cpu_to_hc32(ahcd
, ED_DEQUEUE
);
350 ed_deschedule(ahcd
, ed
);
352 /* add this ED into the remove list */
353 ed
->ed_rm_next
= ahcd
->ed_rm_list
;
354 ahcd
->ed_rm_list
= ed
;
356 /* enable SOF interrupt */
357 admhc_intr_ack(ahcd
, ADMHC_INTR_SOFI
);
358 admhc_intr_enable(ahcd
, ADMHC_INTR_SOFI
);
359 /* flush those writes */
360 admhc_writel_flush(ahcd
);
362 /* SOF interrupt might get delayed; record the frame counter value that
363 * indicates when the HC isn't looking at it, so concurrent unlinks
364 * behave. frame_no wraps every 2^16 msec, and changes right before
367 ed
->tick
= admhc_frame_no(ahcd
) + 1;
370 /*-------------------------------------------------------------------------*
371 * TD handling functions
372 *-------------------------------------------------------------------------*/
374 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
377 td_fill(struct admhcd
*ahcd
, u32 info
, dma_addr_t data
, int len
,
378 struct urb
*urb
, int index
)
380 struct td
*td
, *td_pt
;
381 struct urb_priv
*urb_priv
= urb
->hcpriv
;
386 if (index
== (urb_priv
->td_cnt
- 1) &&
387 ((urb
->transfer_flags
& URB_NO_INTERRUPT
) == 0))
390 if (index
== (urb_priv
->td_cnt
- 1))
394 /* use this td as the next dummy */
395 td_pt
= urb_priv
->td
[index
];
397 /* fill the old dummy TD */
398 td
= urb_priv
->td
[index
] = urb_priv
->ed
->dummy
;
399 urb_priv
->ed
->dummy
= td_pt
;
401 td
->ed
= urb_priv
->ed
;
402 td
->next_dl_td
= NULL
;
410 cbl
|= (len
& TD_BL_MASK
);
414 /* setup hardware specific fields */
415 td
->hwINFO
= cpu_to_hc32(ahcd
, info
);
416 td
->hwDBP
= cpu_to_hc32(ahcd
, data
);
417 td
->hwCBL
= cpu_to_hc32(ahcd
, cbl
);
418 td
->hwNextTD
= cpu_to_hc32(ahcd
, td_pt
->td_dma
);
420 /* append to queue */
421 list_add_tail(&td
->td_list
, &td
->ed
->td_list
);
423 /* hash it for later reverse mapping */
424 hash
= TD_HASH_FUNC(td
->td_dma
);
425 td
->td_hash
= ahcd
->td_hash
[hash
];
426 ahcd
->td_hash
[hash
] = td
;
428 /* HC might read the TD (or cachelines) right away ... */
430 td
->ed
->hwTailP
= td
->hwNextTD
;
433 /*-------------------------------------------------------------------------*/
435 /* Prepare all TDs of a transfer, and queue them onto the ED.
436 * Caller guarantees HC is active.
437 * Usually the ED is already on the schedule, so TDs might be
438 * processed as soon as they're queued.
440 static void td_submit_urb(struct admhcd
*ahcd
, struct urb
*urb
)
442 struct urb_priv
*urb_priv
= urb
->hcpriv
;
444 int data_len
= urb
->transfer_buffer_length
;
447 int is_out
= usb_pipeout(urb
->pipe
);
450 /* OHCI handles the bulk/interrupt data toggles itself. We just
451 * use the device toggle bits for resetting, and rely on the fact
452 * that resetting toggle is meaningless if the endpoint is active.
455 if (usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
), is_out
)) {
459 usb_settoggle(urb
->dev
, usb_pipeendpoint (urb
->pipe
),
463 urb_priv
->td_idx
= 0;
464 list_add(&urb_priv
->pending
, &ahcd
->pending
);
467 data
= urb
->transfer_dma
;
471 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
472 * using TD_CC_GET, as well as by seeing them on the done list.
473 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
475 switch (urb_priv
->ed
->type
) {
478 ? TD_T_CARRY
| TD_SCC_NOTACCESSED
| TD_DP_OUT
479 : TD_T_CARRY
| TD_SCC_NOTACCESSED
| TD_DP_IN
;
481 /* setup service interval and starting frame number */
482 info
|= (urb
->start_frame
& TD_FN_MASK
);
483 info
|= (urb
->interval
& TD_ISI_MASK
) << TD_ISI_SHIFT
;
485 td_fill(ahcd
, info
, data
, data_len
, urb
, cnt
);
488 admhcd_to_hcd(ahcd
)->self
.bandwidth_int_reqs
++;
493 ? TD_SCC_NOTACCESSED
| TD_DP_OUT
494 : TD_SCC_NOTACCESSED
| TD_DP_IN
;
496 /* TDs _could_ transfer up to 8K each */
497 while (data_len
> TD_DATALEN_MAX
) {
498 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
),
499 data
, TD_DATALEN_MAX
, urb
, cnt
);
500 data
+= TD_DATALEN_MAX
;
501 data_len
-= TD_DATALEN_MAX
;
505 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
), data
,
509 if ((urb
->transfer_flags
& URB_ZERO_PACKET
)
510 && (cnt
< urb_priv
->td_cnt
)) {
511 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
),
517 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
518 * any DATA phase works normally, and the STATUS ack is special.
521 /* fill a TD for the setup */
522 info
= TD_SCC_NOTACCESSED
| TD_DP_SETUP
| TD_T_DATA0
;
523 td_fill(ahcd
, info
, urb
->setup_dma
, 8, urb
, cnt
++);
526 /* fill a TD for the data */
527 info
= TD_SCC_NOTACCESSED
| TD_T_DATA1
;
528 info
|= is_out
? TD_DP_OUT
: TD_DP_IN
;
529 /* NOTE: mishandles transfers >8K, some >4K */
530 td_fill(ahcd
, info
, data
, data_len
, urb
, cnt
++);
533 /* fill a TD for the ACK */
534 info
= (is_out
|| data_len
== 0)
535 ? TD_SCC_NOTACCESSED
| TD_DP_IN
| TD_T_DATA1
536 : TD_SCC_NOTACCESSED
| TD_DP_OUT
| TD_T_DATA1
;
537 td_fill(ahcd
, info
, data
, 0, urb
, cnt
++);
541 /* ISO has no retransmit, so no toggle;
542 * Each TD could handle multiple consecutive frames (interval 1);
543 * we could often reduce the number of TDs here.
545 case PIPE_ISOCHRONOUS
:
546 info
= TD_SCC_NOTACCESSED
;
547 for (cnt
= 0; cnt
< urb
->number_of_packets
; cnt
++) {
548 int frame
= urb
->start_frame
;
550 frame
+= cnt
* urb
->interval
;
552 td_fill(ahcd
, info
| frame
,
553 data
+ urb
->iso_frame_desc
[cnt
].offset
,
554 urb
->iso_frame_desc
[cnt
].length
, urb
, cnt
);
556 admhcd_to_hcd(ahcd
)->self
.bandwidth_isoc_reqs
++;
560 if (urb_priv
->td_cnt
!= cnt
)
561 admhc_err(ahcd
, "bad number of tds created for urb %p\n", urb
);
564 /*-------------------------------------------------------------------------*
565 * Done List handling functions
566 *-------------------------------------------------------------------------*/
568 /* calculate transfer length/status and update the urb
569 * PRECONDITION: irqsafe (only for urb->status locking)
571 static int td_done(struct admhcd
*ahcd
, struct urb
*urb
, struct td
*td
)
573 struct urb_priv
*urb_priv
= urb
->hcpriv
;
574 u32 info
= hc32_to_cpup(ahcd
, &td
->hwINFO
);
575 int type
= usb_pipetype(urb
->pipe
);
578 cc
= TD_CC_GET(info
);
580 /* ISO ... drivers see per-TD length/status */
581 if (type
== PIPE_ISOCHRONOUS
) {
586 /* NOTE: assumes FC in tdINFO == 0, and that
587 * only the first of 0..MAXPSW psws is used.
591 if (tdINFO
& TD_CC
) /* hc didn't touch? */
594 if (usb_pipeout (urb
->pipe
))
595 dlen
= urb
->iso_frame_desc
[td
->index
].length
;
597 /* short reads are always OK for ISO */
598 if (cc
== TD_DATAUNDERRUN
)
600 dlen
= tdPSW
& 0x3ff;
602 urb
->actual_length
+= dlen
;
603 urb
->iso_frame_desc
[td
->index
].actual_length
= dlen
;
604 urb
->iso_frame_desc
[td
->index
].status
= cc_to_error
[cc
];
606 if (cc
!= TD_CC_NOERROR
)
608 "urb %p iso td %p (%d) len %d cc %d\n",
609 urb
, td
, 1 + td
->index
, dlen
, cc
);
611 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
612 * except that "setup" bytes aren't counted and "short" transfers
613 * might not be reported as errors.
616 u32 bl
= TD_BL_GET(hc32_to_cpup(ahcd
, &td
->hwCBL
));
617 u32 tdDBP
= hc32_to_cpup(ahcd
, &td
->hwDBP
);
619 /* update packet status if needed (short is normally ok) */
620 if (cc
== TD_CC_DATAUNDERRUN
621 && !(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
624 if (cc
!= TD_CC_NOERROR
&& cc
< TD_CC_HCD0
) {
625 spin_lock(&urb
->lock
);
626 if (urb
->status
== -EINPROGRESS
)
627 urb
->status
= cc_to_error
[cc
];
628 spin_unlock(&urb
->lock
);
631 /* count all non-empty packets except control SETUP packet */
632 if ((type
!= PIPE_CONTROL
|| td
->index
!= 0) && tdDBP
!= 0) {
633 urb
->actual_length
+= tdDBP
- td
->data_dma
+ bl
;
636 if (cc
!= TD_CC_NOERROR
&& cc
< TD_CC_HCD0
)
638 "urb %p td %p (%d) cc %d, len=%d/%d\n",
639 urb
, td
, td
->index
, cc
,
641 urb
->transfer_buffer_length
);
644 list_del(&td
->td_list
);
650 /*-------------------------------------------------------------------------*/
652 static inline struct td
*
653 ed_halted(struct admhcd
*ahcd
, struct td
*td
, int cc
, struct td
*rev
)
655 struct urb
*urb
= td
->urb
;
656 struct ed
*ed
= td
->ed
;
657 struct list_head
*tmp
= td
->td_list
.next
;
658 __hc32 toggle
= ed
->hwHeadP
& cpu_to_hc32(ahcd
, ED_C
);
660 admhc_dump_ed(ahcd
, "ed halted", td
->ed
, 1);
661 /* clear ed halt; this is the td that caused it, but keep it inactive
662 * until its urb->complete() has a chance to clean up.
664 ed
->hwINFO
|= cpu_to_hc32(ahcd
, ED_SKIP
);
666 ed
->hwHeadP
&= ~cpu_to_hc32(ahcd
, ED_H
);
668 /* put any later tds from this urb onto the donelist, after 'td',
669 * order won't matter here: no errors, and nothing was transferred.
670 * also patch the ed so it looks as if those tds completed normally.
672 while (tmp
!= &ed
->td_list
) {
676 next
= list_entry(tmp
, struct td
, td_list
);
677 tmp
= next
->td_list
.next
;
679 if (next
->urb
!= urb
)
682 /* NOTE: if multi-td control DATA segments get supported,
683 * this urb had one of them, this td wasn't the last td
684 * in that segment (TD_R clear), this ed halted because
685 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
686 * then we need to leave the control STATUS packet queued
691 info
|= cpu_to_hc32(ahcd
, TD_DONE
);
693 info
&= ~cpu_to_hc32(ahcd
, TD_CC
);
696 next
->next_dl_td
= rev
;
699 ed
->hwHeadP
= next
->hwNextTD
| toggle
;
702 /* help for troubleshooting: report anything that
703 * looks odd ... that doesn't include protocol stalls
704 * (or maybe some other things)
707 case TD_CC_DATAUNDERRUN
:
708 if ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)
712 if (usb_pipecontrol(urb
->pipe
))
717 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
718 urb
, urb
->dev
->devpath
,
719 usb_pipeendpoint (urb
->pipe
),
720 usb_pipein (urb
->pipe
) ? "in" : "out",
721 hc32_to_cpu(ahcd
, td
->hwINFO
),
722 cc
, cc_to_error
[cc
]);
728 /*-------------------------------------------------------------------------*/
730 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
732 finish_unlinks(struct admhcd
*ahcd
, u16 tick
)
734 struct ed
*ed
, **last
;
737 for (last
= &ahcd
->ed_rm_list
, ed
= *last
; ed
!= NULL
; ed
= *last
) {
738 struct list_head
*entry
, *tmp
;
739 int completed
, modified
;
742 /* only take off EDs that the HC isn't using, accounting for
743 * frame counter wraps and EDs with partially retired TDs
745 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd
)->state
))) {
746 if (tick_before (tick
, ed
->tick
)) {
748 last
= &ed
->ed_rm_next
;
752 if (!list_empty(&ed
->td_list
)) {
756 td
= list_entry(ed
->td_list
.next
, struct td
,
758 head
= hc32_to_cpu(ahcd
, ed
->hwHeadP
) &
761 /* INTR_WDH may need to clean up first */
762 if (td
->td_dma
!= head
)
768 /* reentrancy: if we drop the schedule lock, someone might
769 * have modified this list. normally it's just prepending
770 * entries (which we'd ignore), but paranoia won't hurt.
772 *last
= ed
->ed_rm_next
;
773 ed
->ed_rm_next
= NULL
;
776 /* unlink urbs as requested, but rescan the list after
777 * we call a completion since it might have unlinked
778 * another (earlier) urb
780 * When we get here, the HC doesn't see this ed. But it
781 * must not be rescheduled until all completed URBs have
782 * been given back to the driver.
787 list_for_each_safe(entry
, tmp
, &ed
->td_list
) {
790 struct urb_priv
*urb_priv
;
793 td
= list_entry(entry
, struct td
, td_list
);
795 urb_priv
= td
->urb
->hcpriv
;
797 if (urb
->status
== -EINPROGRESS
) {
798 prev
= &td
->hwNextTD
;
802 if ((urb_priv
) == NULL
)
805 /* patch pointer hc uses */
806 savebits
= *prev
& ~cpu_to_hc32(ahcd
, TD_MASK
);
807 *prev
= td
->hwNextTD
| savebits
;
809 /* HC may have partly processed this TD */
810 #ifdef ADMHC_VERBOSE_DEBUG
811 urb_print(ahcd
, urb
, "PARTIAL", 0);
813 td_done(ahcd
, urb
, td
);
815 /* if URB is done, clean up */
816 if (urb_priv
->td_idx
== urb_priv
->td_cnt
) {
817 modified
= completed
= 1;
818 finish_urb(ahcd
, urb
);
821 if (completed
&& !list_empty(&ed
->td_list
))
824 /* ED's now officially unlinked, hc doesn't see */
826 ed
->hwHeadP
&= ~cpu_to_hc32(ahcd
, ED_H
);
829 ed
->hwINFO
&= ~cpu_to_hc32(ahcd
, ED_SKIP
| ED_DEQUEUE
);
831 /* but if there's work queued, reschedule */
832 if (!list_empty(&ed
->td_list
)) {
833 if (HC_IS_RUNNING(admhcd_to_hcd(ahcd
)->state
))
834 ed_schedule(ahcd
, ed
);
842 /*-------------------------------------------------------------------------*/
845 * Process normal completions (error or success) and clean the schedules.
847 * This is the main path for handing urbs back to drivers. The only other
848 * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
849 * scanning the (re-reversed) donelist as this does.
852 static void ed_unhalt(struct admhcd
*ahcd
, struct ed
*ed
, struct urb
*urb
)
854 struct list_head
*entry
,*tmp
;
855 __hc32 toggle
= ed
->hwHeadP
& cpu_to_hc32(ahcd
, ED_C
);
857 #ifdef ADMHC_VERBOSE_DEBUG
858 admhc_dump_ed(ahcd
, "UNHALT", ed
, 0);
860 /* clear ed halt; this is the td that caused it, but keep it inactive
861 * until its urb->complete() has a chance to clean up.
863 ed
->hwINFO
|= cpu_to_hc32(ahcd
, ED_SKIP
);
865 ed
->hwHeadP
&= ~cpu_to_hc32(ahcd
, ED_H
);
867 list_for_each_safe(entry
, tmp
, &ed
->td_list
) {
868 struct td
*td
= list_entry(entry
, struct td
, td_list
);
875 info
&= ~cpu_to_hc32(ahcd
, TD_CC
| TD_OWN
);
878 ed
->hwHeadP
= td
->hwNextTD
| toggle
;
884 static void ed_intr_refill(struct admhcd
*ahcd
, struct ed
*ed
)
886 __hc32 toggle
= ed
->hwHeadP
& cpu_to_hc32(ahcd
, ED_C
);
888 ed
->hwHeadP
= ed
->hwTailP
| toggle
;
892 static inline int is_ed_halted(struct admhcd
*ahcd
, struct ed
*ed
)
894 return ((hc32_to_cpup(ahcd
, &ed
->hwHeadP
) & ED_H
) == ED_H
);
897 static inline int is_td_halted(struct admhcd
*ahcd
, struct ed
*ed
,
900 return ((hc32_to_cpup(ahcd
, &ed
->hwHeadP
) & TD_MASK
) ==
901 (hc32_to_cpup(ahcd
, &td
->hwNextTD
) & TD_MASK
));
904 static void ed_update(struct admhcd
*ahcd
, struct ed
*ed
)
906 struct list_head
*entry
,*tmp
;
908 #ifdef ADMHC_VERBOSE_DEBUG
909 admhc_dump_ed(ahcd
, "UPDATE", ed
, 1);
912 list_for_each_safe(entry
, tmp
, &ed
->td_list
) {
913 struct td
*td
= list_entry(entry
, struct td
, td_list
);
914 struct urb
*urb
= td
->urb
;
915 struct urb_priv
*urb_priv
= urb
->hcpriv
;
918 if (hc32_to_cpup(ahcd
, &td
->hwINFO
) & TD_OWN
)
921 /* update URB's length and status from TD */
922 cc
= td_done(ahcd
, urb
, td
);
923 if (is_ed_halted(ahcd
, ed
) && is_td_halted(ahcd
, ed
, td
))
924 ed_unhalt(ahcd
, ed
, urb
);
926 if (ed
->type
== PIPE_INTERRUPT
)
927 ed_intr_refill(ahcd
,ed
);
929 /* If all this urb's TDs are done, call complete() */
930 if (urb_priv
->td_idx
== urb_priv
->td_cnt
)
931 finish_urb(ahcd
, urb
);
933 /* clean schedule: unlink EDs that are no longer busy */
934 if (list_empty(&ed
->td_list
)) {
935 if (ed
->state
== ED_OPER
)
936 start_ed_unlink(ahcd
, ed
);
938 /* ... reenabling halted EDs only after fault cleanup */
939 } else if ((ed
->hwINFO
& cpu_to_hc32(ahcd
,
940 ED_SKIP
| ED_DEQUEUE
))
941 == cpu_to_hc32(ahcd
, ED_SKIP
)) {
942 td
= list_entry(ed
->td_list
.next
, struct td
, td_list
);
944 if (!(td
->hwINFO
& cpu_to_hc32(ahcd
, TD_DONE
))) {
945 ed
->hwINFO
&= ~cpu_to_hc32(ahcd
, ED_SKIP
);
946 /* ... hc may need waking-up */
949 admhc_writel (ahcd
, OHCI_CLF
,
950 &ahcd
->regs
->cmdstatus
);
953 admhc_writel (ahcd
, OHCI_BLF
,
954 &ahcd
->regs
->cmdstatus
);
959 if ((td
->hwINFO
& cpu_to_hc32(ahcd
, TD_OWN
)))
960 ed
->hwINFO
&= ~cpu_to_hc32(ahcd
, ED_SKIP
);
967 /* there are some tds completed; called in_irq(), with HCD locked */
968 static void admhc_td_complete(struct admhcd
*ahcd
)
972 for (ed
= ahcd
->ed_head
; ed
; ed
= ed
->ed_next
) {
973 if (ed
->state
!= ED_OPER
)
This page took 0.115166 seconds and 5 git commands to generate.