.urb_enqueue            = ehci_urb_enqueue,
        .urb_dequeue            = ehci_urb_dequeue,
        .endpoint_disable       = ehci_endpoint_disable,
+       .endpoint_reset         = ehci_endpoint_reset,
 
        /*
         * scheduling support
 
        .urb_enqueue = ehci_urb_enqueue,
        .urb_dequeue = ehci_urb_dequeue,
        .endpoint_disable = ehci_endpoint_disable,
+       .endpoint_reset = ehci_endpoint_reset,
 
        /*
         * scheduling support
 
        return;
 }
 
+static void
+ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+       struct ehci_hcd         *ehci = hcd_to_ehci(hcd);
+       struct ehci_qh          *qh;
+       int                     eptype = usb_endpoint_type(&ep->desc);
+
+       if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+               return;
+
+ rescan:
+       spin_lock_irq(&ehci->lock);
+       qh = ep->hcpriv;
+
+       /* For Bulk and Interrupt endpoints we maintain the toggle state
+        * in the hardware; the toggle bits in udev aren't used at all.
+        * When an endpoint is reset by usb_clear_halt() we must reset
+        * the toggle bit in the QH.
+        */
+       if (qh) {
+               if (!list_empty(&qh->qtd_list)) {
+                       WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+               } else if (qh->qh_state == QH_STATE_IDLE) {
+                       qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+               } else {
+                       /* It's not safe to write into the overlay area
+                        * while the QH is active.  Unlink it first and
+                        * wait for the unlink to complete.
+                        */
+                       if (qh->qh_state == QH_STATE_LINKED) {
+                               if (eptype == USB_ENDPOINT_XFER_BULK) {
+                                       unlink_async(ehci, qh);
+                               } else {
+                                       intr_deschedule(ehci, qh);
+                                       (void) qh_schedule(ehci, qh);
+                               }
+                       }
+                       spin_unlock_irq(&ehci->lock);
+                       schedule_timeout_uninterruptible(1);
+                       goto rescan;
+               }
+       }
+       spin_unlock_irq(&ehci->lock);
+}
+
 static int ehci_get_frame (struct usb_hcd *hcd)
 {
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
 
        .urb_enqueue            = ehci_urb_enqueue,
        .urb_dequeue            = ehci_urb_dequeue,
        .endpoint_disable       = ehci_endpoint_disable,
+       .endpoint_reset         = ehci_endpoint_reset,
        .get_frame_number       = ehci_get_frame,
        .hub_status_data        = ehci_hub_status_data,
        .hub_control            = ehci_hub_control,
 
        .urb_enqueue = ehci_urb_enqueue,
        .urb_dequeue = ehci_urb_dequeue,
        .endpoint_disable = ehci_endpoint_disable,
+       .endpoint_reset = ehci_endpoint_reset,
 
        /*
         * scheduling support
 
        .urb_enqueue =          ehci_urb_enqueue,
        .urb_dequeue =          ehci_urb_dequeue,
        .endpoint_disable =     ehci_endpoint_disable,
+       .endpoint_reset =       ehci_endpoint_reset,
 
        /*
         * scheduling support
 
        .urb_enqueue            = ehci_urb_enqueue,
        .urb_dequeue            = ehci_urb_dequeue,
        .endpoint_disable       = ehci_endpoint_disable,
+       .endpoint_reset         = ehci_endpoint_reset,
 
        /*
         * scheduling support
 
        .urb_enqueue            = ehci_urb_enqueue,
        .urb_dequeue            = ehci_urb_dequeue,
        .endpoint_disable       = ehci_endpoint_disable,
+       .endpoint_reset         = ehci_endpoint_reset,
        .get_frame_number       = ehci_get_frame,
        .hub_status_data        = ehci_hub_status_data,
        .hub_control            = ehci_hub_control,
 
        qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
        qh->hw_alt_next = EHCI_LIST_END(ehci);
 
-       /* Except for control endpoints, we make hardware maintain data
-        * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
-        * and set the pseudo-toggle in udev. Only usb_clear_halt() will
-        * ever clear it.
-        */
-       if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
-               unsigned        is_out, epnum;
-
-               is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
-               epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
-               if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
-                       qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
-                       usb_settoggle (qh->dev, epnum, is_out, 1);
-               }
-       }
-
        /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
        wmb ();
        qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
        qh->qh_state = QH_STATE_IDLE;
        qh->hw_info1 = cpu_to_hc32(ehci, info1);
        qh->hw_info2 = cpu_to_hc32(ehci, info2);
-       usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
        qh_refresh (ehci, qh);
        return qh;
 }
                }
        }
 
-       /* clear halt and/or toggle; and maybe recover from silicon quirk */
+       /* clear halt and maybe recover from silicon quirk */
        if (qh->qh_state == QH_STATE_IDLE)
                qh_refresh (ehci, qh);