#include <linux/ip.h>
 #include <linux/pm_runtime.h>
 #include <net/pkt_sched.h>
+#include <linux/bpf_trace.h>
 
 #include <net/ipv6.h>
 
 #include "igc.h"
 #include "igc_hw.h"
 #include "igc_tsn.h"
+#include "igc_xdp.h"
 
 #define DRV_SUMMARY    "Intel(R) 2.5G Ethernet Linux Driver"
 
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
 
+#define IGC_XDP_PASS           0
+#define IGC_XDP_CONSUMED       BIT(0)
+
 static int debug = -1;
 
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
                        i = 0;
        }
 
+       clear_ring_uses_large_buffer(rx_ring);
+
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
        return err;
 }
 
+static bool igc_xdp_is_enabled(struct igc_adapter *adapter)
+{
+       return !!adapter->xdp_prog;
+}
+
 /**
  * igc_configure_rx_ring - Configure a receive ring after Reset
  * @adapter: board private structure
        u32 srrctl = 0, rxdctl = 0;
        u64 rdba = ring->dma;
 
+       if (igc_xdp_is_enabled(adapter))
+               set_ring_uses_large_buffer(ring);
+
        /* disable the queue */
        wr32(IGC_RXDCTL(reg_idx), 0);
 
 
 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
                                         struct igc_rx_buffer *rx_buffer,
-                                        unsigned int size, int pkt_offset,
+                                        struct xdp_buff *xdp,
                                         ktime_t timestamp)
 {
-       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset +
-                  pkt_offset;
+       unsigned int size = xdp->data_end - xdp->data;
        unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
+       void *va = xdp->data;
        unsigned int headlen;
        struct sk_buff *skb;
 
                                union igc_adv_rx_desc *rx_desc,
                                struct sk_buff *skb)
 {
+       /* XDP packets use error pointer so abort at this point */
+       if (IS_ERR(skb))
+               return true;
+
        if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
                struct net_device *netdev = rx_ring->netdev;
 
 
 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
 {
-       return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
+       struct igc_adapter *adapter = rx_ring->q_vector->adapter;
+
+       if (ring_uses_build_skb(rx_ring))
+               return IGC_SKB_PAD;
+       if (igc_xdp_is_enabled(adapter))
+               return XDP_PACKET_HEADROOM;
+
+       return 0;
 }
 
 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
        }
 }
 
+static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
+                                       struct xdp_buff *xdp)
+{
+       struct bpf_prog *prog;
+       int res;
+       u32 act;
+
+       rcu_read_lock();
+
+       prog = READ_ONCE(adapter->xdp_prog);
+       if (!prog) {
+               res = IGC_XDP_PASS;
+               goto unlock;
+       }
+
+       act = bpf_prog_run_xdp(prog, xdp);
+       switch (act) {
+       case XDP_PASS:
+               res = IGC_XDP_PASS;
+               break;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+               fallthrough;
+       case XDP_ABORTED:
+               trace_xdp_exception(adapter->netdev, prog, act);
+               fallthrough;
+       case XDP_DROP:
+               res = IGC_XDP_CONSUMED;
+               break;
+       }
+
+unlock:
+       rcu_read_unlock();
+       return ERR_PTR(-res);
+}
+
 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
 {
        unsigned int total_bytes = 0, total_packets = 0;
                union igc_adv_rx_desc *rx_desc;
                struct igc_rx_buffer *rx_buffer;
                ktime_t timestamp = 0;
+               struct xdp_buff xdp;
                int pkt_offset = 0;
                unsigned int size;
+               void *pktbuf;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
 
                rx_buffer = igc_get_rx_buffer(rx_ring, size);
 
-               if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
-                       void *pktbuf = page_address(rx_buffer->page) +
-                                      rx_buffer->page_offset;
+               pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
 
+               if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
                        timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
                                                        pktbuf);
                        pkt_offset = IGC_TS_HDR_LEN;
                        size -= IGC_TS_HDR_LEN;
                }
 
-               /* retrieve a buffer from the ring */
-               if (skb)
+               if (!skb) {
+                       struct igc_adapter *adapter = q_vector->adapter;
+
+                       xdp.data = pktbuf + pkt_offset;
+                       xdp.data_end = xdp.data + size;
+                       xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring);
+                       xdp_set_data_meta_invalid(&xdp);
+                       xdp.frame_sz = igc_get_rx_frame_truesize(rx_ring, size);
+
+                       skb = igc_xdp_run_prog(adapter, &xdp);
+               }
+
+               if (IS_ERR(skb)) {
+                       rx_buffer->pagecnt_bias++;
+                       total_packets++;
+                       total_bytes += size;
+               } else if (skb)
                        igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
                else if (ring_uses_build_skb(rx_ring))
                        skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
                else
-                       skb = igc_construct_skb(rx_ring, rx_buffer, size,
-                                               pkt_offset, timestamp);
+                       skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
+                                               timestamp);
 
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
        struct igc_adapter *adapter = netdev_priv(netdev);
 
+       if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
+               netdev_dbg(netdev, "Jumbo frames not supported with XDP");
+               return -EINVAL;
+       }
+
        /* adjust max frame to be at least the size of a standard frame */
        if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
                max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
        }
 }
 
+static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+       struct igc_adapter *adapter = netdev_priv(dev);
+
+       switch (bpf->command) {
+       case XDP_SETUP_PROG:
+               return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static const struct net_device_ops igc_netdev_ops = {
        .ndo_open               = igc_open,
        .ndo_stop               = igc_close,
        .ndo_features_check     = igc_features_check,
        .ndo_do_ioctl           = igc_ioctl,
        .ndo_setup_tc           = igc_setup_tc,
+       .ndo_bpf                = igc_bpf,
 };
 
 /* PCIe configuration access */