#define IGC_XDP_PASS           0
 #define IGC_XDP_CONSUMED       BIT(0)
 #define IGC_XDP_TX             BIT(1)
+#define IGC_XDP_REDIRECT       BIT(2)
 
 static int debug = -1;
 
 }
 
 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
-                                              const unsigned int size)
+                                              const unsigned int size,
+                                              int *rx_buffer_pgcnt)
 {
        struct igc_rx_buffer *rx_buffer;
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buffer->page);
+#else
+               0;
+#endif
        prefetchw(rx_buffer->page);
 
        /* we are reusing so sync this buffer for CPU use */
        new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
 }
 
-static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
+static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
+                                 int rx_buffer_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
        struct page *page = rx_buffer->page;
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
 #define IGC_LAST_OFFSET \
         * the pagecnt_bias and page count so that we fully restock the
         * number of references the driver holds.
         */
-       if (unlikely(!pagecnt_bias)) {
-               page_ref_add(page, USHRT_MAX);
+       if (unlikely(pagecnt_bias == 1)) {
+               page_ref_add(page, USHRT_MAX - 1);
                rx_buffer->pagecnt_bias = USHRT_MAX;
        }
 
 }
 
 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
-                             struct igc_rx_buffer *rx_buffer)
+                             struct igc_rx_buffer *rx_buffer,
+                             int rx_buffer_pgcnt)
 {
-       if (igc_can_reuse_rx_page(rx_buffer)) {
+       if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
                /* hand second half of page back to the ring */
                igc_reuse_rx_page(rx_ring, rx_buffer);
        } else {
        bi->dma = dma;
        bi->page = page;
        bi->page_offset = igc_rx_offset(rx_ring);
-       bi->pagecnt_bias = 1;
+       page_ref_add(page, USHRT_MAX - 1);
+       bi->pagecnt_bias = USHRT_MAX;
 
        return true;
 }
                else
                        res = IGC_XDP_TX;
                break;
+       case XDP_REDIRECT:
+               if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
+                       res = IGC_XDP_CONSUMED;
+               else
+                       res = IGC_XDP_REDIRECT;
+               break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
                igc_flush_tx_descriptors(ring);
                __netif_tx_unlock(nq);
        }
+
+       if (status & IGC_XDP_REDIRECT)
+               xdp_do_flush();
 }
 
 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
        struct igc_ring *rx_ring = q_vector->rx.ring;
        struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = igc_desc_unused(rx_ring);
-       int xdp_status = 0;
+       int xdp_status = 0, rx_buffer_pgcnt;
 
        while (likely(total_packets < budget)) {
                union igc_adv_rx_desc *rx_desc;
                 */
                dma_rmb();
 
-               rx_buffer = igc_get_rx_buffer(rx_ring, size);
+               rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
                truesize = igc_get_rx_frame_truesize(rx_ring, size);
 
                pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
                                rx_buffer->pagecnt_bias++;
                                break;
                        case IGC_XDP_TX:
+                       case IGC_XDP_REDIRECT:
                                igc_rx_buffer_flip(rx_buffer, truesize);
                                xdp_status |= xdp_res;
                                break;
                        break;
                }
 
-               igc_put_rx_buffer(rx_ring, rx_buffer);
+               igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
                cleaned_count++;
 
                /* fetch next buffer in frame if non-eop */
        }
 }
 
+static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+                       struct xdp_frame **frames, u32 flags)
+{
+       struct igc_adapter *adapter = netdev_priv(dev);
+       int cpu = smp_processor_id();
+       struct netdev_queue *nq;
+       struct igc_ring *ring;
+       int i, drops;
+
+       if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
+               return -ENETDOWN;
+
+       if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+               return -EINVAL;
+
+       ring = igc_xdp_get_tx_ring(adapter, cpu);
+       nq = txring_txq(ring);
+
+       __netif_tx_lock(nq, cpu);
+
+       drops = 0;
+       for (i = 0; i < num_frames; i++) {
+               int err;
+               struct xdp_frame *xdpf = frames[i];
+
+               err = igc_xdp_init_tx_descriptor(ring, xdpf);
+               if (err) {
+                       xdp_return_frame_rx_napi(xdpf);
+                       drops++;
+               }
+       }
+
+       if (flags & XDP_XMIT_FLUSH)
+               igc_flush_tx_descriptors(ring);
+
+       __netif_tx_unlock(nq);
+
+       return num_frames - drops;
+}
+
 static const struct net_device_ops igc_netdev_ops = {
        .ndo_open               = igc_open,
        .ndo_stop               = igc_close,
        .ndo_do_ioctl           = igc_ioctl,
        .ndo_setup_tc           = igc_setup_tc,
        .ndo_bpf                = igc_bpf,
+       .ndo_xdp_xmit           = igc_xdp_xmit,
 };
 
 /* PCIe configuration access */