static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
                                               unsigned int cycle_state,
                                               unsigned int max_packet,
+                                              unsigned int num,
                                               gfp_t flags)
 {
        struct xhci_segment *seg;
                for (i = 0; i < TRBS_PER_SEGMENT; i++)
                        seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
        }
+       seg->num = num;
        seg->dma = dma;
        seg->next = NULL;
 
                enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
 {
        struct xhci_segment *prev;
+       unsigned int num = 0;
        bool chain_links;
 
        /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
                         (type == TYPE_ISOC &&
                          (xhci->quirks & XHCI_AMD_0x96_HOST)));
 
-       prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
+       prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
        if (!prev)
                return -ENOMEM;
-       num_segs--;
+       num++;
 
        *first = prev;
-       while (num_segs > 0) {
+       while (num < num_segs) {
                struct xhci_segment     *next;
 
-               next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
+               next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
+                                         flags);
                if (!next) {
                        prev = *first;
                        while (prev) {
                xhci_link_segments(prev, next, type, chain_links);
 
                prev = next;
-               num_segs--;
+               num++;
        }
        xhci_link_segments(prev, *first, type, chain_links);
        *last = prev;
 {
        struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
        size_t erst_size;
-       u64 tmp64;
        u32 tmp;
 
        if (!ir)
                tmp &= ERST_SIZE_MASK;
                writel(tmp, &ir->ir_set->erst_size);
 
-               tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
-               tmp64 &= (u64) ERST_PTR_MASK;
-               xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue);
+               xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue);
        }
 
        /* free interrrupter event ring */
 
 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
 {
-       u64 temp;
        dma_addr_t deq;
 
        deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
        if (!deq)
                xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n");
        /* Update HC event ring dequeue pointer */
-       temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
-       temp &= ERST_PTR_MASK;
        /* Don't clear the EHB bit (which is RW1C) because
         * there might be more events to service.
         */
-       temp &= ~ERST_EHB;
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                       "// Write event ring dequeue pointer, preserving EHB bit");
-       xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+       xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK),
                        &ir->ir_set->erst_dequeue);
 }