static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
+       struct usb_request      *tmp;
        unsigned long           flags;
 
        /* fill unused rxq slots with some skb */
        spin_lock_irqsave(&dev->req_lock, flags);
-       while (!list_empty(&dev->rx_reqs)) {
-               req = container_of(dev->rx_reqs.next,
-                               struct usb_request, list);
+       list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
                list_del_init(&req->list);
                spin_unlock_irqrestore(&dev->req_lock, flags);
 
                return NETDEV_TX_BUSY;
        }
 
-       req = container_of(dev->tx_reqs.next, struct usb_request, list);
+       req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
        list_del(&req->list);
 
        /* temporarily stop TX queue when the freelist empties */
 {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
+       struct usb_request      *tmp;
 
        WARN_ON(!dev);
        if (!dev)
         */
        usb_ep_disable(link->in_ep);
        spin_lock(&dev->req_lock);
-       while (!list_empty(&dev->tx_reqs)) {
-               req = container_of(dev->tx_reqs.next,
-                                       struct usb_request, list);
+       list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
                list_del(&req->list);
 
                spin_unlock(&dev->req_lock);
 
        usb_ep_disable(link->out_ep);
        spin_lock(&dev->req_lock);
-       while (!list_empty(&dev->rx_reqs)) {
-               req = container_of(dev->rx_reqs.next,
-                                       struct usb_request, list);
+       list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
                list_del(&req->list);
 
                spin_unlock(&dev->req_lock);