* i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
+ *
+ * Returns true if any errors on allocation
  **/
-void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
        u16 i = rx_ring->next_to_use;
        union i40e_rx_desc *rx_desc;
 
        /* do nothing if no valid netdev defined */
        if (!rx_ring->netdev || !cleaned_count)
-               return;
+               return false;
 
        while (cleaned_count--) {
                rx_desc = I40E_RX_DESC(rx_ring, i);
                        i = 0;
        }
 
+       if (rx_ring->next_to_use != i)
+               i40e_release_rx_desc(rx_ring, i);
+
+       return false;
+
 no_buffers:
        if (rx_ring->next_to_use != i)
                i40e_release_rx_desc(rx_ring, i);
+
+       /* make sure to come back via polling to try again after
+        * allocation failure
+        */
+       return true;
 }
 
 /**
  * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
+ *
+ * Returns true if any errors on allocation
  **/
-void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
        u16 i = rx_ring->next_to_use;
        union i40e_rx_desc *rx_desc;
 
        /* do nothing if no valid netdev defined */
        if (!rx_ring->netdev || !cleaned_count)
-               return;
+               return false;
 
        while (cleaned_count--) {
                rx_desc = I40E_RX_DESC(rx_ring, i);
                        if (dma_mapping_error(rx_ring->dev, bi->dma)) {
                                rx_ring->rx_stats.alloc_buff_failed++;
                                bi->dma = 0;
+                               dev_kfree_skb(bi->skb);
+                               bi->skb = NULL;
                                goto no_buffers;
                        }
                }
                        i = 0;
        }
 
+       if (rx_ring->next_to_use != i)
+               i40e_release_rx_desc(rx_ring, i);
+
+       return false;
+
 no_buffers:
        if (rx_ring->next_to_use != i)
                i40e_release_rx_desc(rx_ring, i);
+
+       /* make sure to come back via polling to try again after
+        * allocation failure
+        */
+       return true;
 }
 
 /**
  *
  * Returns true if there's any budget left (e.g. the clean is finished)
  **/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
        u16 i = rx_ring->next_to_clean;
        union i40e_rx_desc *rx_desc;
        u32 rx_error, rx_status;
+       bool failure = false;
        u8 rx_ptype;
        u64 qword;
 
                u16 vlan_tag;
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+                       failure = failure ||
+                                 i40e_alloc_rx_buffers_ps(rx_ring,
+                                                          cleaned_count);
                        cleaned_count = 0;
                }
 
                                                        rx_ring->rx_hdr_len);
                        if (!skb) {
                                rx_ring->rx_stats.alloc_buff_failed++;
+                               failure = true;
                                break;
                        }
 
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
-       return total_rx_packets;
+       return failure ? budget : total_rx_packets;
 }
 
 /**
        union i40e_rx_desc *rx_desc;
        u32 rx_error, rx_status;
        u16 rx_packet_len;
+       bool failure = false;
        u8 rx_ptype;
        u64 qword;
        u16 i;
                u16 vlan_tag;
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
+                       failure = failure ||
+                                 i40e_alloc_rx_buffers_1buf(rx_ring,
+                                                            cleaned_count);
                        cleaned_count = 0;
                }
 
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
-       return total_rx_packets;
+       return failure ? budget : total_rx_packets;
 }
 
 static u32 i40e_buildreg_itr(const int type, const u16 itr)
 
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
 void i40e_alloc_rx_headers(struct i40e_ring *rxr);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
 
  * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
+ *
+ * Returns true if any errors on allocation
  **/
-void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
        u16 i = rx_ring->next_to_use;
        union i40e_rx_desc *rx_desc;
 
        /* do nothing if no valid netdev defined */
        if (!rx_ring->netdev || !cleaned_count)
-               return;
+               return false;
 
        while (cleaned_count--) {
                rx_desc = I40E_RX_DESC(rx_ring, i);
                        i = 0;
        }
 
+       if (rx_ring->next_to_use != i)
+               i40e_release_rx_desc(rx_ring, i);
+
+       return false;
+
 no_buffers:
        if (rx_ring->next_to_use != i)
                i40e_release_rx_desc(rx_ring, i);
+
+       /* make sure to come back via polling to try again after
+        * allocation failure
+        */
+       return true;
 }
 
 /**
  * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
  * @rx_ring: ring to place buffers on
  * @cleaned_count: number of buffers to replace
+ *
+ * Returns true if any errors on allocation
  **/
-void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
        u16 i = rx_ring->next_to_use;
        union i40e_rx_desc *rx_desc;
 
        /* do nothing if no valid netdev defined */
        if (!rx_ring->netdev || !cleaned_count)
-               return;
+               return false;
 
        while (cleaned_count--) {
                rx_desc = I40E_RX_DESC(rx_ring, i);
                        if (dma_mapping_error(rx_ring->dev, bi->dma)) {
                                rx_ring->rx_stats.alloc_buff_failed++;
                                bi->dma = 0;
+                               dev_kfree_skb(bi->skb);
+                               bi->skb = NULL;
                                goto no_buffers;
                        }
                }
                        i = 0;
        }
 
+       if (rx_ring->next_to_use != i)
+               i40e_release_rx_desc(rx_ring, i);
+
+       return false;
+
 no_buffers:
        if (rx_ring->next_to_use != i)
                i40e_release_rx_desc(rx_ring, i);
+
+       /* make sure to come back via polling to try again after
+        * allocation failure
+        */
+       return true;
 }
 
 /**
  *
  * Returns true if there's any budget left (e.g. the clean is finished)
  **/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
        u16 i = rx_ring->next_to_clean;
        union i40e_rx_desc *rx_desc;
        u32 rx_error, rx_status;
+       bool failure = false;
        u8 rx_ptype;
        u64 qword;
 
                u16 vlan_tag;
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+                       failure = failure ||
+                                 i40evf_alloc_rx_buffers_ps(rx_ring,
+                                                            cleaned_count);
                        cleaned_count = 0;
                }
 
                                                        rx_ring->rx_hdr_len);
                        if (!skb) {
                                rx_ring->rx_stats.alloc_buff_failed++;
+                               failure = true;
                                break;
                        }
 
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
-       return total_rx_packets;
+       return failure ? budget : total_rx_packets;
 }
 
 /**
        union i40e_rx_desc *rx_desc;
        u32 rx_error, rx_status;
        u16 rx_packet_len;
+       bool failure = false;
        u8 rx_ptype;
        u64 qword;
        u16 i;
                u16 vlan_tag;
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
+                       failure = failure ||
+                                 i40evf_alloc_rx_buffers_1buf(rx_ring,
+                                                              cleaned_count);
                        cleaned_count = 0;
                }
 
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
-       return total_rx_packets;
+       return failure ? budget : total_rx_packets;
 }
 
 static u32 i40e_buildreg_itr(const int type, const u16 itr)
 
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
 void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
 netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);