}
        }
 
-       if (list_empty(&tp->rx_done))
+       if (list_empty(&tp->rx_done) || work_done >= budget)
                goto out1;
 
        clear_bit(RX_EPROTO, &tp->flags);
                struct urb *urb;
                u8 *rx_data;
 
+               /* A bulk transfer of USB may contain may packets, so the
+                * total packets may more than the budget. Deal with all
+                * packets in current bulk transfer, and stop to handle the
+                * next bulk transfer until next schedule, if budget is
+                * exhausted.
+                */
+               if (work_done >= budget)
+                       break;
+
                list_del_init(cursor);
 
                agg = list_entry(cursor, struct rx_agg, list);
                        unsigned int pkt_len, rx_frag_head_sz;
                        struct sk_buff *skb;
 
-                       /* limit the skb numbers for rx_queue */
-                       if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
-                               break;
+                       WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000);
 
                        pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
                        if (pkt_len < ETH_ZLEN)
                }
        }
 
+       /* Splice the remained list back to rx_done for next schedule */
        if (!list_empty(&rx_queue)) {
                spin_lock_irqsave(&tp->rx_lock, flags);
-               list_splice_tail(&rx_queue, &tp->rx_done);
+               list_splice(&rx_queue, &tp->rx_done);
                spin_unlock_irqrestore(&tp->rx_lock, flags);
        }