seg->bounce_offs = 0;
 }
 
-static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
-                          struct xhci_ring *ep_ring, int status)
+static void xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
+                           struct xhci_ring *ep_ring, int status)
 {
        struct urb *urb = NULL;
 
                        status = 0;
                xhci_giveback_urb_in_irq(xhci, td, status);
        }
-
-       return 0;
 }
 
 
        return 0;
 }
 
-static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
-                    struct xhci_ring *ep_ring, struct xhci_td *td,
-                    u32 trb_comp_code)
+static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+                     struct xhci_ring *ep_ring, struct xhci_td *td,
+                     u32 trb_comp_code)
 {
        struct xhci_ep_ctx *ep_ctx;
 
                 * stopped TDs. A stopped TD may be restarted, so don't update
                 * the ring dequeue pointer or take this TD off any lists yet.
                 */
-               return 0;
+               return;
        case COMP_USB_TRANSACTION_ERROR:
        case COMP_BABBLE_DETECTED_ERROR:
        case COMP_SPLIT_TRANSACTION_ERROR:
                                xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
                                         (unsigned long long)xhci_trb_virt_to_dma(
                                                 td->start_seg, td->start_trb));
-                               return 0;
+                               return;
                        }
                        /* endpoint not halted, don't reset it */
                        break;
                /* Almost same procedure as for STALL_ERROR below */
                xhci_clear_hub_tt_buffer(xhci, td, ep);
                xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
-               return 0;
+               return;
        case COMP_STALL_ERROR:
                /*
                 * xhci internal endpoint state will go to a "halt" state for
 
                xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
 
-               return 0; /* xhci_handle_halted_endpoint marked td cancelled */
+               return; /* xhci_handle_halted_endpoint marked td cancelled */
        default:
                break;
        }
        ep_ring->deq_seg = td->end_seg;
        inc_deq(xhci, ep_ring);
 
-       return xhci_td_cleanup(xhci, td, ep_ring, td->status);
+       xhci_td_cleanup(xhci, td, ep_ring, td->status);
 }
 
 /* sum trb lengths from the first trb up to stop_trb, _excluding_ stop_trb */
 /*
  * Process control tds, update urb status and actual_length.
  */
-static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
-               struct xhci_ring *ep_ring,  struct xhci_td *td,
-                          union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+                           struct xhci_ring *ep_ring,  struct xhci_td *td,
+                           union xhci_trb *ep_trb, struct xhci_transfer_event *event)
 {
        struct xhci_ep_ctx *ep_ctx;
        u32 trb_comp_code;
                td->urb_length_set = true;
                td->urb->actual_length = requested - remaining;
                xhci_dbg(xhci, "Waiting for status stage event\n");
-               return 0;
+               return;
        }
 
        /* at status stage */
                td->urb->actual_length = requested;
 
 finish_td:
-       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+       finish_td(xhci, ep, ep_ring, td, trb_comp_code);
 }
 
 /*
  * Process isochronous tds, update urb packet status and actual_length.
  */
-static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
-               struct xhci_ring *ep_ring, struct xhci_td *td,
-               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+                           struct xhci_ring *ep_ring, struct xhci_td *td,
+                           union xhci_trb *ep_trb, struct xhci_transfer_event *event)
 {
        struct urb_priv *urb_priv;
        int idx;
        if (td->error_mid_td && ep_trb != td->end_trb) {
                xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
                td->urb_length_set = true;
-               return 0;
+               return;
        }
-
-       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+       finish_td(xhci, ep, ep_ring, td, trb_comp_code);
 }
 
-static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
-                       struct xhci_virt_ep *ep, int status)
+static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+                        struct xhci_virt_ep *ep, int status)
 {
        struct urb_priv *urb_priv;
        struct usb_iso_packet_descriptor *frame;
        ep->ring->deq_seg = td->end_seg;
        inc_deq(xhci, ep->ring);
 
-       return xhci_td_cleanup(xhci, td, ep->ring, status);
+       xhci_td_cleanup(xhci, td, ep->ring, status);
 }
 
 /*
  * Process bulk and interrupt tds, update urb status and actual_length.
  */
-static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
-               struct xhci_ring *ep_ring, struct xhci_td *td,
-               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+                                struct xhci_ring *ep_ring, struct xhci_td *td,
+                                union xhci_trb *ep_trb, struct xhci_transfer_event *event)
 {
        struct xhci_slot_ctx *slot_ctx;
        u32 trb_comp_code;
                td->status = 0;
 
                xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
-               return 0;
+               return;
        default:
                /* do nothing */
                break;
                td->urb->actual_length = 0;
        }
 
-       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+       finish_td(xhci, ep, ep_ring, td, trb_comp_code);
 }
 
 /* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */