while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
                dma_addr_t phys;
 
-               tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
-
                /* Ensure we see complete descriptor update */
                dma_rmb();
-               phys = desc_get_phys_addr(lp, cur_p);
-               dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
-                                DMA_FROM_DEVICE);
 
                skb = cur_p->skb;
                cur_p->skb = NULL;
-               length = cur_p->app4 & 0x0000FFFF;
-
-               skb_put(skb, length);
-               skb->protocol = eth_type_trans(skb, ndev);
-               /*skb_checksum_none_assert(skb);*/
-               skb->ip_summed = CHECKSUM_NONE;
-
-               /* if we're doing Rx csum offload, set it up */
-               if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
-                       csumstatus = (cur_p->app2 &
-                                     XAE_FULL_CSUM_STATUS_MASK) >> 3;
-                       if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
-                           (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               /* skb could be NULL if a previous pass already received the
+                * packet for this slot in the ring, but failed to refill it
+                * with a newly allocated buffer. In this case, don't try to
+                * receive it again.
+                */
+               if (likely(skb)) {
+                       length = cur_p->app4 & 0x0000FFFF;
+
+                       phys = desc_get_phys_addr(lp, cur_p);
+                       dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
+                                        DMA_FROM_DEVICE);
+
+                       skb_put(skb, length);
+                       skb->protocol = eth_type_trans(skb, ndev);
+                       /*skb_checksum_none_assert(skb);*/
+                       skb->ip_summed = CHECKSUM_NONE;
+
+                       /* if we're doing Rx csum offload, set it up */
+                       if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+                               csumstatus = (cur_p->app2 &
+                                             XAE_FULL_CSUM_STATUS_MASK) >> 3;
+                               if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
+                                   csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
+                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               }
+                       } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+                                  skb->protocol == htons(ETH_P_IP) &&
+                                  skb->len > 64) {
+                               skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+                               skb->ip_summed = CHECKSUM_COMPLETE;
                        }
-               } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
-                          skb->protocol == htons(ETH_P_IP) &&
-                          skb->len > 64) {
-                       skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
-                       skb->ip_summed = CHECKSUM_COMPLETE;
-               }
 
-               netif_rx(skb);
+                       netif_rx(skb);
 
-               size += length;
-               packets++;
+                       size += length;
+                       packets++;
+               }
 
                new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
                if (!new_skb)
-                       return;
+                       break;
 
                phys = dma_map_single(ndev->dev.parent, new_skb->data,
                                      lp->max_frm_size,
                        if (net_ratelimit())
                                netdev_err(ndev, "RX DMA mapping error\n");
                        dev_kfree_skb(new_skb);
-                       return;
+                       break;
                }
                desc_set_phys_addr(lp, phys, cur_p);
 
                cur_p->status = 0;
                cur_p->skb = new_skb;
 
+               /* Only update tail_p to mark this slot as usable after it has
+                * been successfully refilled.
+                */
+               tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+
                if (++lp->rx_bd_ci >= lp->rx_bd_num)
                        lp->rx_bd_ci = 0;
                cur_p = &lp->rx_bd_v[lp->rx_bd_ci];