return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+                                   int rx_buffer_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
        struct page *page = rx_buffer->page;
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
        /* The last offset is a bit aggressive in that we assume the
 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
                                                   union ixgbe_adv_rx_desc *rx_desc,
                                                   struct sk_buff **skb,
-                                                  const unsigned int size)
+                                                  const unsigned int size,
+                                                  int *rx_buffer_pgcnt)
 {
        struct ixgbe_rx_buffer *rx_buffer;
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buffer->page);
+#else
+               0;
+#endif
        prefetchw(rx_buffer->page);
        *skb = rx_buffer->skb;
 
 
 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
                                struct ixgbe_rx_buffer *rx_buffer,
-                               struct sk_buff *skb)
+                               struct sk_buff *skb,
+                               int rx_buffer_pgcnt)
 {
-       if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+       if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
                /* hand second half of page back to the ring */
                ixgbe_reuse_rx_page(rx_ring, rx_buffer);
        } else {
                union ixgbe_adv_rx_desc *rx_desc;
                struct ixgbe_rx_buffer *rx_buffer;
                struct sk_buff *skb;
+               int rx_buffer_pgcnt;
                unsigned int size;
 
                /* return some buffers to hardware, one at a time is too slow */
                 */
                dma_rmb();
 
-               rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+               rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
 
                /* retrieve a buffer from the ring */
                if (!skb) {
                        break;
                }
 
-               ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+               ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
                cleaned_count++;
 
                /* place incomplete frames back on ring for completion */