/**
  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
  * @rx_buf: buffer containing the page
+ * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
  *
  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
  * which will assign the current buffer to the buffer that next_to_alloc is
  * pointing to; otherwise, the DMA mapping needs to be destroyed and
  * page freed
  */
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+static bool
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
        struct page *page = rx_buf->page;
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
 #define ICE_LAST_OFFSET \
  * @rx_ring: Rx descriptor ring to transact packets on
  * @skb: skb to be used
  * @size: size of buffer to add to skb
+ * @rx_buf_pgcnt: rx_buf page refcount
  *
  * This function will pull an Rx buffer from the ring and synchronize it
  * for use by the CPU.
  */
 static struct ice_rx_buf *
 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
-              const unsigned int size)
+              const unsigned int size, int *rx_buf_pgcnt)
 {
        struct ice_rx_buf *rx_buf;
 
        rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+       *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buf->page);
+#else
+               0;
+#endif
        prefetchw(rx_buf->page);
        *skb = rx_buf->skb;
 
  * ice_put_rx_buf - Clean up used buffer and either recycle or free
  * @rx_ring: Rx descriptor ring to transact packets on
  * @rx_buf: Rx buffer to pull data from
+ * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
  *
  * This function will update next_to_clean and then clean up the contents
  * of the rx_buf. It will either recycle the buffer or unmap it and free
  * the associated resources.
  */
-static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+static void
+ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+              int rx_buf_pgcnt)
 {
        u16 ntc = rx_ring->next_to_clean + 1;
 
        if (!rx_buf)
                return;
 
-       if (ice_can_reuse_rx_page(rx_buf)) {
+       if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
                /* hand second half of page back to the ring */
                ice_reuse_rx_page(rx_ring, rx_buf);
        } else {
                struct sk_buff *skb;
                unsigned int size;
                u16 stat_err_bits;
+               int rx_buf_pgcnt;
                u16 vlan_tag = 0;
                u8 rx_ptype;
 
                dma_rmb();
 
                if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
-                       ice_put_rx_buf(rx_ring, NULL);
+                       ice_put_rx_buf(rx_ring, NULL, 0);
                        cleaned_count++;
                        continue;
                }
                        ICE_RX_FLX_DESC_PKT_LEN_M;
 
                /* retrieve a buffer from the ring */
-               rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+               rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
 
                if (!size) {
                        xdp.data = NULL;
                total_rx_pkts++;
 
                cleaned_count++;
-               ice_put_rx_buf(rx_ring, rx_buf);
+               ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
                continue;
 construct_skb:
                if (skb) {
                        break;
                }
 
-               ice_put_rx_buf(rx_ring, rx_buf);
+               ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
                cleaned_count++;
 
                /* skip if it is NOP desc */