spin_lock_irqsave (&ehci->lock, flags);
        for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
                qh_lines (ehci, qh, &next, &size);
-       if (ehci->async_unlink && size > 0) {
+       if (!list_empty(&ehci->async_unlink) && size > 0) {
                temp = scnprintf(next, size, "\nunlink =\n");
                size -= temp;
                next += temp;
 
-               for (qh = ehci->async_unlink; size > 0 && qh;
-                               qh = qh->unlink_next)
-                       qh_lines (ehci, qh, &next, &size);
+               list_for_each_entry(qh, &ehci->async_unlink, unlink_node) {
+                       if (size <= 0)
+                               break;
+                       qh_lines(ehci, qh, &next, &size);
+               }
        }
        spin_unlock_irqrestore (&ehci->lock, flags);
 
                }
        }
 
-       if (ehci->async_unlink) {
+       if (!list_empty(&ehci->async_unlink)) {
                temp = scnprintf(next, size, "async unlink qh %p\n",
-                               ehci->async_unlink);
+                               list_first_entry(&ehci->async_unlink,
+                                               struct ehci_qh, unlink_node));
                size -= temp;
                next += temp;
        }
 
         * periodic_size can shrink by USBCMD update if hcc_params allows.
         */
        ehci->periodic_size = DEFAULT_I_TDPS;
+       INIT_LIST_HEAD(&ehci->async_unlink);
+       INIT_LIST_HEAD(&ehci->async_iaa);
+       INIT_LIST_HEAD(&ehci->intr_unlink);
        INIT_LIST_HEAD(&ehci->intr_qh_list);
        INIT_LIST_HEAD(&ehci->cached_itd_list);
        INIT_LIST_HEAD(&ehci->cached_sitd_list);
                /* guard against (alleged) silicon errata */
                if (cmd & CMD_IAAD)
                        ehci_dbg(ehci, "IAA with IAAD still set?\n");
-               if (ehci->async_iaa)
+               if (!list_empty(&ehci->async_iaa))
                        COUNT(ehci->stats.iaa);
                end_unlink_async(ehci);
        }
 
        if (--ehci->async_count)
                return;
 
-       /* The async schedule and async_unlink list are supposed to be empty */
-       WARN_ON(ehci->async->qh_next.qh || ehci->async_unlink);
+       /* The async schedule and unlink lists are supposed to be empty */
+       WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
+                       !list_empty(&ehci->async_iaa));
 
        /* Don't turn off the schedule until ASS is 1 */
        ehci_poll_ASS(ehci);
 
        /* Add to the end of the list of QHs waiting for the next IAAD */
        qh->qh_state = QH_STATE_UNLINK_WAIT;
-       if (ehci->async_unlink)
-               ehci->async_unlink_last->unlink_next = qh;
-       else
-               ehci->async_unlink = qh;
-       ehci->async_unlink_last = qh;
+       list_add_tail(&qh->unlink_node, &ehci->async_unlink);
 
        /* Unlink it from the schedule */
        prev = ehci->async;
         * Do nothing if an IAA cycle is already running or
         * if one will be started shortly.
         */
-       if (ehci->async_iaa || ehci->async_unlinking)
+       if (!list_empty(&ehci->async_iaa) || ehci->async_unlinking)
                return;
 
        /* If the controller isn't running, we don't have to wait for it */
        if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
 
                /* Do all the waiting QHs */
-               ehci->async_iaa = ehci->async_unlink;
-               ehci->async_unlink = NULL;
+               list_splice_tail_init(&ehci->async_unlink, &ehci->async_iaa);
 
                if (!nested)            /* Avoid recursion */
                        end_unlink_async(ehci);
                struct ehci_qh          *qh;
 
                /* Do only the first waiting QH (nVidia bug?) */
-               qh = ehci->async_unlink;
+               qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
+                               unlink_node);
 
                /*
                 * Intel (?) bug: The HC can write back the overlay region
                 * even after the IAA interrupt occurs.  In self-defense,
                 * always go through two IAA cycles for each QH.
                 */
-               if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
+               if (qh->qh_state == QH_STATE_UNLINK_WAIT)
                        qh->qh_state = QH_STATE_UNLINK;
-               } else {
-                       ehci->async_iaa = qh;
-                       ehci->async_unlink = qh->unlink_next;
-                       qh->unlink_next = NULL;
-               }
+               else
+                       list_move_tail(&qh->unlink_node, &ehci->async_iaa);
 
                /* Make sure the unlinks are all visible to the hardware */
                wmb();
        /* Process the idle QHs */
  restart:
        ehci->async_unlinking = true;
-       while (ehci->async_iaa) {
-               qh = ehci->async_iaa;
-               ehci->async_iaa = qh->unlink_next;
-               qh->unlink_next = NULL;
+       while (!list_empty(&ehci->async_iaa)) {
+               qh = list_first_entry(&ehci->async_iaa, struct ehci_qh,
+                               unlink_node);
+               list_del(&qh->unlink_node);
 
                qh->qh_state = QH_STATE_IDLE;
                qh->qh_next.qh = NULL;
        ehci->async_unlinking = false;
 
        /* Start a new IAA cycle if any QHs are waiting for it */
-       if (ehci->async_unlink) {
+       if (!list_empty(&ehci->async_unlink)) {
                start_iaa_cycle(ehci, true);
                if (unlikely(ehci->rh_state < EHCI_RH_RUNNING))
                        goto restart;
        }
 
        /* If nothing else is being unlinked, unlink the last empty QH */
-       if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) {
+       if (list_empty(&ehci->async_iaa) && list_empty(&ehci->async_unlink) &&
+                       qh_to_unlink) {
                start_unlink_async(ehci, qh_to_unlink);
                --count;
        }
 
        qh->unlink_cycle = ehci->intr_unlink_cycle;
 
        /* New entries go at the end of the intr_unlink list */
-       if (ehci->intr_unlink)
-               ehci->intr_unlink_last->unlink_next = qh;
-       else
-               ehci->intr_unlink = qh;
-       ehci->intr_unlink_last = qh;
+       list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
 
        if (ehci->intr_unlinking)
                ;       /* Avoid recursive calls */
        else if (ehci->rh_state < EHCI_RH_RUNNING)
                ehci_handle_intr_unlinks(ehci);
-       else if (ehci->intr_unlink == qh) {
+       else if (ehci->intr_unlink.next == &qh->unlink_node) {
                ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
                ++ehci->intr_unlink_cycle;
        }
 
         * process all the QHs on the list.
         */
        ehci->intr_unlinking = true;
-       while (ehci->intr_unlink) {
-               struct ehci_qh  *qh = ehci->intr_unlink;
+       while (!list_empty(&ehci->intr_unlink)) {
+               struct ehci_qh  *qh;
 
+               qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
+                               unlink_node);
                if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
                        break;
-               ehci->intr_unlink = qh->unlink_next;
-               qh->unlink_next = NULL;
+               list_del(&qh->unlink_node);
                end_unlink_intr(ehci, qh);
        }
 
        /* Handle remaining entries later */
-       if (ehci->intr_unlink) {
+       if (!list_empty(&ehci->intr_unlink)) {
                ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
                ++ehci->intr_unlink_cycle;
        }
 
        /* async schedule support */
        struct ehci_qh          *async;
        struct ehci_qh          *dummy;         /* For AMD quirk use */
-       struct ehci_qh          *async_unlink;
-       struct ehci_qh          *async_unlink_last;
-       struct ehci_qh          *async_iaa;
+       struct list_head        async_unlink;
+       struct list_head        async_iaa;
        unsigned                async_unlink_cycle;
        unsigned                async_count;    /* async activity count */
 
        unsigned                i_thresh;       /* uframes HC might cache */
 
        union ehci_shadow       *pshadow;       /* mirror hw periodic table */
-       struct ehci_qh          *intr_unlink;
-       struct ehci_qh          *intr_unlink_last;
+       struct list_head        intr_unlink;
        unsigned                intr_unlink_cycle;
        unsigned                now_frame;      /* frame from HC hardware */
        unsigned                last_iso_frame; /* last frame scanned for iso */
        struct list_head        qtd_list;       /* sw qtd list */
        struct list_head        intr_node;      /* list of intr QHs */
        struct ehci_qtd         *dummy;
-       struct ehci_qh          *unlink_next;   /* next on unlink list */
+       struct list_head        unlink_node;
 
        unsigned                unlink_cycle;