[package] iwinfo: factor txpower offset into info display output, recompile if driver...
[openwrt.git] / target / linux / adm5120 / files / drivers / usb / host / adm5120-q.c
index 24acc87..d7d9e67 100644 (file)
@@ -1,10 +1,16 @@
 /*
- * OHCI HCD (Host Controller Driver) for USB.
+ * ADM5120 HCD (Host Controller Driver) for USB
  *
- * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/usb/host/ohci-q.c
+ *   (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ *   (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
  *
- * This file is licenced under the GPL.
  */
 
 #include <linux/irq.h>
  * PRECONDITION:  ahcd lock held, irqs blocked.
  */
 static void
-finish_urb(struct admhcd *ahcd, struct urb *urb)
+finish_urb(struct admhcd *ahcd, struct urb *urb, int status)
 __releases(ahcd->lock)
 __acquires(ahcd->lock)
 {
        urb_priv_free(ahcd, urb->hcpriv);
-       urb->hcpriv = NULL;
 
-       spin_lock(&urb->lock);
-       if (likely(urb->status == -EINPROGRESS))
-               urb->status = 0;
-
-       /* report short control reads right even though the data TD always
-        * has TD_R set.  (much simpler, but creates the 1-td limit.)
-        */
-       if (unlikely(urb->transfer_flags & URB_SHORT_NOT_OK)
-                       && unlikely(usb_pipecontrol(urb->pipe))
-                       && urb->actual_length < urb->transfer_buffer_length
-                       && usb_pipein(urb->pipe)
-                       && urb->status == 0) {
-               urb->status = -EREMOTEIO;
-#ifdef ADMHC_VERBOSE_DEBUG
-               urb_print(urb, "SHORT", usb_pipeout (urb->pipe));
-#endif
-       }
-       spin_unlock(&urb->lock);
+       if (likely(status == -EINPROGRESS))
+               status = 0;
 
        switch (usb_pipetype(urb->pipe)) {
        case PIPE_ISOCHRONOUS:
@@ -53,12 +42,13 @@ __acquires(ahcd->lock)
        }
 
 #ifdef ADMHC_VERBOSE_DEBUG
-       urb_print(urb, "RET", usb_pipeout (urb->pipe));
+       urb_print(ahcd, urb, "RET", usb_pipeout(urb->pipe), status);
 #endif
 
        /* urb->complete() can reenter this HCD */
+       usb_hcd_unlink_urb_from_ep(admhcd_to_hcd(ahcd), urb);
        spin_unlock(&ahcd->lock);
-       usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb);
+       usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb, status);
        spin_lock(&ahcd->lock);
 }
 
@@ -83,12 +73,12 @@ static int balance(struct admhcd *ahcd, int interval, int load)
         * that has enough bandwidth left unreserved.
         */
        for (i = 0; i < interval ; i++) {
-               if (branch < 0 || ahcd->load [branch] > ahcd->load [i]) {
+               if (branch < 0 || ahcd->load[branch] > ahcd->load[i]) {
                        int     j;
 
                        /* usb 1.1 says 90% of one frame */
                        for (j = i; j < NUM_INTS; j += interval) {
-                               if ((ahcd->load [j] + load) > 900)
+                               if ((ahcd->load[j] + load) > 900)
                                        break;
                        }
                        if (j < NUM_INTS)
@@ -107,17 +97,17 @@ static int balance(struct admhcd *ahcd, int interval, int load)
  * into the schedule tree in the apppropriate place.  most iso devices use
  * 1msec periods, but that's not required.
  */
-static void periodic_link (struct admhcd *ahcd, struct ed *ed)
+static void periodic_link(struct admhcd *ahcd, struct ed *ed)
 {
        unsigned        i;
 
-       admhc_vdbg (ahcd, "link %sed %p branch %d [%dus.], interval %d\n",
-               (ed->hwINFO & cpu_to_hc32 (ahcd, ED_ISO)) ? "iso " : "",
+       admhc_vdbg(ahcd, "link %sed %p branch %d [%dus.], interval %d\n",
+               (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
                ed, ed->branch, ed->load, ed->interval);
 
        for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
-               struct ed       **prev = &ahcd->periodic [i];
-               __hc32          *prev_p = &ahcd->hcca->int_table [i];
+               struct ed       **prev = &ahcd->periodic[i];
+               __hc32          *prev_p = &ahcd->hcca->int_table[i];
                struct ed       *here = *prev;
 
                /* sorting each branch by period (slow before fast)
@@ -135,12 +125,12 @@ static void periodic_link (struct admhcd *ahcd, struct ed *ed)
                        ed->ed_next = here;
                        if (here)
                                ed->hwNextED = *prev_p;
-                       wmb ();
+                       wmb();
                        *prev = ed;
                        *prev_p = cpu_to_hc32(ahcd, ed->dma);
                        wmb();
                }
-               ahcd->load [i] += ed->load;
+               ahcd->load[i] += ed->load;
        }
        admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval;
 }
@@ -180,14 +170,14 @@ static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
 
 #if 0  /* FIXME */
 /* scan the periodic table to find and unlink this ED */
-static void periodic_unlink (struct admhcd *ahcd, struct ed *ed)
+static void periodic_unlink(struct admhcd *ahcd, struct ed *ed)
 {
        int     i;
 
        for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
                struct ed       *temp;
-               struct ed       **prev = &ahcd->periodic [i];
-               __hc32          *prev_p = &ahcd->hcca->int_table [i];
+               struct ed       **prev = &ahcd->periodic[i];
+               __hc32          *prev_p = &ahcd->hcca->int_table[i];
 
                while (*prev && (temp = *prev) != ed) {
                        prev_p = &temp->hwNextED;
@@ -197,12 +187,12 @@ static void periodic_unlink (struct admhcd *ahcd, struct ed *ed)
                        *prev_p = ed->hwNextED;
                        *prev = ed->ed_next;
                }
-               ahcd->load [i] -= ed->load;
+               ahcd->load[i] -= ed->load;
        }
 
        admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval;
-       admhc_vdbg (ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n",
-               (ed->hwINFO & cpu_to_hc32 (ahcd, ED_ISO)) ? "iso " : "",
+       admhc_vdbg(ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n",
+               (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
                ed, ed->branch, ed->load, ed->interval);
 }
 #endif
@@ -231,6 +221,11 @@ static void periodic_unlink (struct admhcd *ahcd, struct ed *ed)
  */
 static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
 {
+
+#ifdef ADMHC_VERBOSE_DEBUG
+       admhc_dump_ed(ahcd, "ED-DESCHED", ed, 1);
+#endif
+
        ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
        wmb();
        ed->state = ED_UNLINK;
@@ -333,7 +328,12 @@ static struct ed *ed_get(struct admhcd *ahcd,      struct usb_host_endpoint *ep,
  */
 static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed)
 {
-       ed->hwINFO |= cpu_to_hc32 (ahcd, ED_DEQUEUE);
+
+#ifdef ADMHC_VERBOSE_DEBUG
+       admhc_dump_ed(ahcd, "ED-UNLINK", ed, 1);
+#endif
+
+       ed->hwINFO |= cpu_to_hc32(ahcd, ED_DEQUEUE);
        ed_deschedule(ahcd, ed);
 
        /* add this ED into the remove list */
@@ -432,9 +432,7 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
        int             cnt = 0;
        u32             info = 0;
        int             is_out = usb_pipeout(urb->pipe);
-       int             periodic = 0;
        u32             toggle = 0;
-       struct td       *td;
 
        /* OHCI handles the bulk/interrupt data toggles itself.  We just
         * use the device toggle bits for resetting, and rely on the fact
@@ -532,7 +530,10 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
         * we could often reduce the number of TDs here.
         */
        case PIPE_ISOCHRONOUS:
-               info = TD_SCC_NOTACCESSED;
+               info = is_out
+                       ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
+                       : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
+
                for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
                        int frame = urb->start_frame;
 
@@ -554,73 +555,68 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
  * Done List handling functions
  *-------------------------------------------------------------------------*/
 
-/* calculate transfer length/status and update the urb
- * PRECONDITION:  irqsafe (only for urb->status locking)
- */
+/* calculate transfer length/status and update the urb */
 static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
 {
        struct urb_priv *urb_priv = urb->hcpriv;
-       u32     info = hc32_to_cpup(ahcd, &td->hwINFO);
+       u32     info;
+       u32     bl;
+       u32     tdDBP;
        int     type = usb_pipetype(urb->pipe);
        int     cc;
+       int     status = -EINPROGRESS;
 
+       info = hc32_to_cpup(ahcd, &td->hwINFO);
+       tdDBP = hc32_to_cpup(ahcd, &td->hwDBP);
+       bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
        cc = TD_CC_GET(info);
 
        /* ISO ... drivers see per-TD length/status */
        if (type == PIPE_ISOCHRONOUS) {
-#if 0
                /* TODO */
                int     dlen = 0;
 
                /* NOTE:  assumes FC in tdINFO == 0, and that
                 * only the first of 0..MAXPSW psws is used.
                 */
+               if (info & TD_CC)       /* hc didn't touch? */
+                       return status;
 
-               cc = TD_CC_GET(td);
-               if (tdINFO & TD_CC)     /* hc didn't touch? */
-                       return;
-
-               if (usb_pipeout (urb->pipe))
-                       dlen = urb->iso_frame_desc [td->index].length;
+               if (usb_pipeout(urb->pipe))
+                       dlen = urb->iso_frame_desc[td->index].length;
                else {
                        /* short reads are always OK for ISO */
-                       if (cc == TD_DATAUNDERRUN)
+                       if (cc == TD_CC_DATAUNDERRUN)
                                cc = TD_CC_NOERROR;
-                       dlen = tdPSW & 0x3ff;
+                       dlen = tdDBP - td->data_dma + bl;
                }
+
                urb->actual_length += dlen;
-               urb->iso_frame_desc [td->index].actual_length = dlen;
-               urb->iso_frame_desc [td->index].status = cc_to_error [cc];
+               urb->iso_frame_desc[td->index].actual_length = dlen;
+               urb->iso_frame_desc[td->index].status = cc_to_error[cc];
 
                if (cc != TD_CC_NOERROR)
-                       admhc_vdbg (ahcd,
+                       admhc_vdbg(ahcd,
                                "urb %p iso td %p (%d) len %d cc %d\n",
                                urb, td, 1 + td->index, dlen, cc);
-#endif
+
        /* BULK, INT, CONTROL ... drivers see aggregate length/status,
         * except that "setup" bytes aren't counted and "short" transfers
         * might not be reported as errors.
         */
        } else {
-               u32     bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
-               u32     tdDBP = hc32_to_cpup(ahcd, &td->hwDBP);
-
                /* update packet status if needed (short is normally ok) */
                if (cc == TD_CC_DATAUNDERRUN
                                && !(urb->transfer_flags & URB_SHORT_NOT_OK))
                        cc = TD_CC_NOERROR;
 
-               if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
-                       spin_lock(&urb->lock);
-                       if (urb->status == -EINPROGRESS)
-                               urb->status = cc_to_error[cc];
-                       spin_unlock(&urb->lock);
-               }
+               if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
+                       status = cc_to_error[cc];
+
 
                /* count all non-empty packets except control SETUP packet */
-               if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0) {
+               if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0)
                        urb->actual_length += tdDBP - td->data_dma + bl;
-               }
 
                if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
                        admhc_vdbg(ahcd,
@@ -633,34 +629,34 @@ static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
        list_del(&td->td_list);
        urb_priv->td_idx++;
 
-       return cc;
+       return status;
 }
 
 /*-------------------------------------------------------------------------*/
 
-static inline struct td *
+static inline void
 ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
 {
        struct urb              *urb = td->urb;
+       struct urb_priv         *urb_priv = urb->hcpriv;
        struct ed               *ed = td->ed;
        struct list_head        *tmp = td->td_list.next;
-       __hc32                  toggle = ed->hwHeadP & cpu_to_hc32 (ahcd, ED_C);
+       __hc32                  toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
 
        admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
        /* clear ed halt; this is the td that caused it, but keep it inactive
         * until its urb->complete() has a chance to clean up.
         */
-       ed->hwINFO |= cpu_to_hc32 (ahcd, ED_SKIP);
+       ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
        wmb();
-       ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H);
+       ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
 
-       /* put any later tds from this urb onto the donelist, after 'td',
-        * order won't matter here: no errors, and nothing was transferred.
-        * also patch the ed so it looks as if those tds completed normally.
+       /* Get rid of all later tds from this urb. We don't have
+        * to be careful: no errors and nothing was transferred.
+        * Also patch the ed so it looks as if those tds completed normally.
         */
        while (tmp != &ed->td_list) {
                struct td       *next;
-               __hc32          info;
 
                next = list_entry(tmp, struct td, td_list);
                tmp = next->td_list.next;
@@ -675,16 +671,8 @@ ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
                 * then we need to leave the control STATUS packet queued
                 * and clear ED_SKIP.
                 */
-               info = next->hwINFO;
-#if 0          /* FIXME */
-               info |= cpu_to_hc32 (ahcd, TD_DONE);
-#endif
-               info &= ~cpu_to_hc32 (ahcd, TD_CC);
-               next->hwINFO = info;
-
-               next->next_dl_td = rev;
-               rev = next;
-
+               list_del(&next->td_list);
+               urb_priv->td_cnt++;
                ed->hwHeadP = next->hwNextTD | toggle;
        }
 
@@ -702,16 +690,14 @@ ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
                        break;
                /* fallthrough */
        default:
-               admhc_dbg (ahcd,
+               admhc_dbg(ahcd,
                        "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
                        urb, urb->dev->devpath,
                        usb_pipeendpoint (urb->pipe),
-                       usb_pipein (urb->pipe) ? "in" : "out",
+                       usb_pipein(urb->pipe) ? "in" : "out",
                        hc32_to_cpu(ahcd, td->hwINFO),
-                       cc, cc_to_error [cc]);
+                       cc, cc_to_error[cc]);
        }
-
-       return rev;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -732,13 +718,13 @@ rescan_all:
                 * frame counter wraps and EDs with partially retired TDs
                 */
                if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) {
-                       if (tick_before (tick, ed->tick)) {
+                       if (tick_before(tick, ed->tick)) {
 skip_ed:
                                last = &ed->ed_rm_next;
                                continue;
                        }
-
-                       if (!list_empty (&ed->td_list)) {
+#if 0
+                       if (!list_empty(&ed->td_list)) {
                                struct td       *td;
                                u32             head;
 
@@ -751,6 +737,7 @@ skip_ed:
                                if (td->td_dma != head)
                                        goto skip_ed;
                        }
+#endif
                }
 
                /* reentrancy:  if we drop the schedule lock, someone might
@@ -772,17 +759,18 @@ skip_ed:
 rescan_this:
                completed = 0;
                prev = &ed->hwHeadP;
-               list_for_each_safe (entry, tmp, &ed->td_list) {
+               list_for_each_safe(entry, tmp, &ed->td_list) {
                        struct td       *td;
                        struct urb      *urb;
                        struct urb_priv *urb_priv;
                        __hc32          savebits;
+                       int             status;
 
                        td = list_entry(entry, struct td, td_list);
                        urb = td->urb;
                        urb_priv = td->urb->hcpriv;
 
-                       if (urb->status == -EINPROGRESS) {
+                       if (!urb->unlinked) {
                                prev = &td->hwNextTD;
                                continue;
                        }
@@ -795,27 +783,29 @@ rescan_this:
                        *prev = td->hwNextTD | savebits;
 
                        /* HC may have partly processed this TD */
-                       urb_print(urb, "PARTIAL", 1);
-                       td_done(ahcd, urb, td);
+#ifdef ADMHC_VERBOSE_DEBUG
+                       urb_print(ahcd, urb, "PARTIAL", 0);
+#endif
+                       status = td_done(ahcd, urb, td);
 
                        /* if URB is done, clean up */
                        if (urb_priv->td_idx == urb_priv->td_cnt) {
                                modified = completed = 1;
-                               finish_urb(ahcd, urb);
+                               finish_urb(ahcd, urb, status);
                        }
                }
-               if (completed && !list_empty (&ed->td_list))
+               if (completed && !list_empty(&ed->td_list))
                        goto rescan_this;
 
                /* ED's now officially unlinked, hc doesn't see */
                ed->state = ED_IDLE;
                ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
                ed->hwNextED = 0;
-               wmb ();
-               ed->hwINFO &= ~cpu_to_hc32 (ahcd, ED_SKIP | ED_DEQUEUE);
+               wmb();
+               ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP | ED_DEQUEUE);
 
                /* but if there's work queued, reschedule */
-               if (!list_empty (&ed->td_list)) {
+               if (!list_empty(&ed->td_list)) {
                        if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))
                                ed_schedule(ahcd, ed);
                }
@@ -837,10 +827,8 @@ rescan_this:
 
 static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
 {
-       struct list_head *entry,*tmp;
-       struct urb_priv *urb_priv = urb->hcpriv;
-       __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ahcd, ED_C);
-
+       struct list_head *entry, *tmp;
+       __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
 
 #ifdef ADMHC_VERBOSE_DEBUG
        admhc_dump_ed(ahcd, "UNHALT", ed, 0);
@@ -848,9 +836,9 @@ static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
        /* clear ed halt; this is the td that caused it, but keep it inactive
         * until its urb->complete() has a chance to clean up.
         */
-       ed->hwINFO |= cpu_to_hc32 (ahcd, ED_SKIP);
+       ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
        wmb();
-       ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H);
+       ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
 
        list_for_each_safe(entry, tmp, &ed->td_list) {
                struct td *td = list_entry(entry, struct td, td_list);
@@ -869,6 +857,14 @@ static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
 
 }
 
+static void ed_intr_refill(struct admhcd *ahcd, struct ed *ed)
+{
+       __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
+
+       ed->hwHeadP = ed->hwTailP | toggle;
+}
+
+
 static inline int is_ed_halted(struct admhcd *ahcd, struct ed *ed)
 {
        return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) == ED_H);
@@ -883,29 +879,32 @@ static inline int is_td_halted(struct admhcd *ahcd, struct ed *ed,
 
 static void ed_update(struct admhcd *ahcd, struct ed *ed)
 {
-       struct list_head *entry,*tmp;
+       struct list_head *entry, *tmp;
 
 #ifdef ADMHC_VERBOSE_DEBUG
-       admhc_dump_ed(ahcd, "UPDATE", ed, 0);
+       admhc_dump_ed(ahcd, "UPDATE", ed, 1);
 #endif
 
        list_for_each_safe(entry, tmp, &ed->td_list) {
                struct td *td = list_entry(entry, struct td, td_list);
                struct urb *urb = td->urb;
                struct urb_priv *urb_priv = urb->hcpriv;
-               int cc;
+               int status;
 
                if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
                        break;
 
                /* update URB's length and status from TD */
-               cc = td_done(ahcd, urb, td);
+               status = td_done(ahcd, urb, td);
                if (is_ed_halted(ahcd, ed) && is_td_halted(ahcd, ed, td))
                        ed_unhalt(ahcd, ed, urb);
 
+               if (ed->type == PIPE_INTERRUPT)
+                       ed_intr_refill(ahcd, ed);
+
                /* If all this urb's TDs are done, call complete() */
                if (urb_priv->td_idx == urb_priv->td_cnt)
-                       finish_urb(ahcd, urb);
+                       finish_urb(ahcd, urb, status);
 
                /* clean schedule:  unlink EDs that are no longer busy */
                if (list_empty(&ed->td_list)) {
@@ -913,21 +912,21 @@ static void ed_update(struct admhcd *ahcd, struct ed *ed)
                                start_ed_unlink(ahcd, ed);
 
                /* ... reenabling halted EDs only after fault cleanup */
-               } else if ((ed->hwINFO & cpu_to_hc32 (ahcd,
+               } else if ((ed->hwINFO & cpu_to_hc32(ahcd,
                                                ED_SKIP | ED_DEQUEUE))
-                                       == cpu_to_hc32 (ahcd, ED_SKIP)) {
+                                       == cpu_to_hc32(ahcd, ED_SKIP)) {
                        td = list_entry(ed->td_list.next, struct td, td_list);
 #if 0
-                       if (!(td->hwINFO & cpu_to_hc32 (ahcd, TD_DONE))) {
-                               ed->hwINFO &= ~cpu_to_hc32 (ahcd, ED_SKIP);
+                       if (!(td->hwINFO & cpu_to_hc32(ahcd, TD_DONE))) {
+                               ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
                                /* ... hc may need waking-up */
                                switch (ed->type) {
                                case PIPE_CONTROL:
-                                       admhc_writel (ahcd, OHCI_CLF,
+                                       admhc_writel(ahcd, OHCI_CLF,
                                                &ahcd->regs->cmdstatus);
                                        break;
                                case PIPE_BULK:
-                                       admhc_writel (ahcd, OHCI_BLF,
+                                       admhc_writel(ahcd, OHCI_BLF,
                                                &ahcd->regs->cmdstatus);
                                        break;
                                }
This page took 0.052708 seconds and 4 git commands to generate.