/*
         * We want to find the pointer, segment and cycle state of the new trb
-        * (the one after current TD's last_trb). We know the cycle state at
-        * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
+        * (the one after current TD's end_trb). We know the cycle state at
+        * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are
         * found.
         */
        do {
                        if (td_last_trb_found)
                                break;
                }
-               if (new_deq == td->last_trb)
+               if (new_deq == td->end_trb)
                        td_last_trb_found = true;
 
                if (cycle_found && trb_is_link(new_deq) &&
                       struct xhci_td *td, bool flip_cycle)
 {
        struct xhci_segment *seg        = td->start_seg;
-       union xhci_trb *trb             = td->first_trb;
+       union xhci_trb *trb             = td->start_trb;
 
        while (1) {
                trb_to_noop(trb, TRB_TR_NOOP);
 
                /* flip cycle if asked to */
-               if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+               if (flip_cycle && trb != td->start_trb && trb != td->end_trb)
                        trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
 
-               if (trb == td->last_trb)
+               if (trb == td->end_trb)
                        break;
 
                next_trb(&seg, &trb);
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                               "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
                               (unsigned long long)xhci_trb_virt_to_dma(
-                                      td->start_seg, td->first_trb),
+                                      td->start_seg, td->start_trb),
                               td->urb->stream_id, td->urb);
                list_del_init(&td->td_list);
                ring = xhci_urb_to_transfer_ring(xhci, td->urb);
        dma_addr_t end_trb_dma;
        struct xhci_segment *cur_seg;
 
-       start_dma = xhci_trb_virt_to_dma(td->start_seg, td->first_trb);
+       start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
        cur_seg = td->start_seg;
 
        do {
                end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
                                &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
                /* If the end TRB isn't in this segment, this is set to 0 */
-               end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->last_trb);
+               end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
 
                if (debug)
                        xhci_warn(xhci,
                            !list_empty(&td->cancelled_td_list)) {
                                xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
                                         (unsigned long long)xhci_trb_virt_to_dma(
-                                                td->start_seg, td->first_trb));
+                                                td->start_seg, td->start_trb));
                                return 0;
                        }
                        /* endpoint not halted, don't reset it */
        }
 
        /* Update ring dequeue pointer */
-       ep_ring->dequeue = td->last_trb;
-       ep_ring->deq_seg = td->last_trb_seg;
+       ep_ring->dequeue = td->end_trb;
+       ep_ring->deq_seg = td->end_seg;
        inc_deq(xhci, ep_ring);
 
        return xhci_td_cleanup(xhci, td, ep_ring, td->status);
 static u32 sum_trb_lengths(struct xhci_td *td, union xhci_trb *stop_trb)
 {
        u32 sum;
-       union xhci_trb *trb = td->first_trb;
+       union xhci_trb *trb = td->start_trb;
        struct xhci_segment *seg = td->start_seg;
 
        for (sum = 0; trb != stop_trb; next_trb(&seg, &trb)) {
                fallthrough;
        case COMP_ISOCH_BUFFER_OVERRUN:
                frame->status = -EOVERFLOW;
-               if (ep_trb != td->last_trb)
+               if (ep_trb != td->end_trb)
                        td->error_mid_td = true;
                break;
        case COMP_INCOMPATIBLE_DEVICE_ERROR:
        case COMP_USB_TRANSACTION_ERROR:
                frame->status = -EPROTO;
                sum_trbs_for_length = true;
-               if (ep_trb != td->last_trb)
+               if (ep_trb != td->end_trb)
                        td->error_mid_td = true;
                break;
        case COMP_STOPPED:
 
 finish_td:
        /* Don't give back TD yet if we encountered an error mid TD */
-       if (td->error_mid_td && ep_trb != td->last_trb) {
+       if (td->error_mid_td && ep_trb != td->end_trb) {
                xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
                td->urb_length_set = true;
                return 0;
        frame->actual_length = 0;
 
        /* Update ring dequeue pointer */
-       ep->ring->dequeue = td->last_trb;
-       ep->ring->deq_seg = td->last_trb_seg;
+       ep->ring->dequeue = td->end_trb;
+       ep->ring->deq_seg = td->end_seg;
        inc_deq(xhci, ep->ring);
 
        return xhci_td_cleanup(xhci, td, ep->ring, status);
        case COMP_SUCCESS:
                ep->err_count = 0;
                /* handle success with untransferred data as short packet */
-               if (ep_trb != td->last_trb || remaining) {
+               if (ep_trb != td->end_trb || remaining) {
                        xhci_warn(xhci, "WARN Successful completion on short TX\n");
                        xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
                                 td->urb->ep->desc.bEndpointAddress,
                break;
        }
 
-       if (ep_trb == td->last_trb)
+       if (ep_trb == td->end_trb)
                td->urb->actual_length = requested - remaining;
        else
                td->urb->actual_length =
 
        if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) {
                xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
-               ep_ring->dequeue = td->last_trb;
-               ep_ring->deq_seg = td->last_trb_seg;
+               ep_ring->dequeue = td->end_trb;
+               ep_ring->deq_seg = td->end_seg;
                inc_deq(xhci, ep_ring);
                xhci_td_cleanup(xhci, td, ep_ring, td->status);
        }
        /* Add this TD to the tail of the endpoint ring's TD list */
        list_add_tail(&td->td_list, &ep_ring->td_list);
        td->start_seg = ep_ring->enq_seg;
-       td->first_trb = ep_ring->enqueue;
+       td->start_trb = ep_ring->enqueue;
 
        return 0;
 }
                        field &= ~TRB_CHAIN;
                        field |= TRB_IOC;
                        more_trbs_coming = false;
-                       td->last_trb = ring->enqueue;
-                       td->last_trb_seg = ring->enq_seg;
+                       td->end_trb = ring->enqueue;
+                       td->end_seg = ring->enq_seg;
                        if (xhci_urb_suitable_for_idt(urb)) {
                                memcpy(&send_addr, urb->transfer_buffer,
                                       trb_buff_len);
                ret = prepare_transfer(xhci, xhci->devs[slot_id],
                                       ep_index, urb->stream_id,
                                       1, urb, 1, mem_flags);
-               urb_priv->td[1].last_trb = ring->enqueue;
-               urb_priv->td[1].last_trb_seg = ring->enq_seg;
+               urb_priv->td[1].end_trb = ring->enqueue;
+               urb_priv->td[1].end_seg = ring->enq_seg;
                field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
                queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
        }
        }
 
        /* Save the DMA address of the last TRB in the TD */
-       td->last_trb = ep_ring->enqueue;
-       td->last_trb_seg = ep_ring->enq_seg;
+       td->end_trb = ep_ring->enqueue;
+       td->end_seg = ep_ring->enq_seg;
 
        /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
        /* If the device sent data, the status stage is an OUT transfer */
                                field |= TRB_CHAIN;
                        } else {
                                more_trbs_coming = false;
-                               td->last_trb = ep_ring->enqueue;
-                               td->last_trb_seg = ep_ring->enq_seg;
+                               td->end_trb = ep_ring->enqueue;
+                               td->end_seg = ep_ring->enq_seg;
                                field |= TRB_IOC;
                                if (trb_block_event_intr(xhci, num_tds, i, ir))
                                        field |= TRB_BEI;
        /* Use the first TD as a temporary variable to turn the TDs we've queued
         * into No-ops with a software-owned cycle bit. That way the hardware
         * won't accidentally start executing bogus TDs when we partially
-        * overwrite them.  td->first_trb and td->start_seg are already set.
+        * overwrite them.  td->start_trb and td->start_seg are already set.
         */
-       urb_priv->td[0].last_trb = ep_ring->enqueue;
+       urb_priv->td[0].end_trb = ep_ring->enqueue;
        /* Every TRB except the first & last will have its cycle bit flipped. */
        td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
 
        /* Reset the ring enqueue back to the first TRB and its cycle bit. */
-       ep_ring->enqueue = urb_priv->td[0].first_trb;
+       ep_ring->enqueue = urb_priv->td[0].start_trb;
        ep_ring->enq_seg = urb_priv->td[0].start_seg;
        ep_ring->cycle_state = start_cycle;
        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);