#include <net/ll_poll.h>
 
+#ifdef CONFIG_NET_LL_RX_POLL
+#define LL_EXTENDED_STATS
+#endif
 /* common prefix used by pr_<> macros */
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 struct ixgbe_queue_stats {
        u64 packets;
        u64 bytes;
+#ifdef LL_EXTENDED_STATS
+       u64 yields;
+       u64 misses;
+       u64 cleaned;
+#endif  /* LL_EXTENDED_STATS */
 };
 
 struct ixgbe_tx_queue_stats {
                WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
                q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
                rc = false;
+#ifdef LL_EXTENDED_STATS
+               q_vector->tx.ring->stats.yields++;
+#endif
        } else
                /* we don't care if someone yielded */
                q_vector->state = IXGBE_QV_STATE_NAPI;
        if ((q_vector->state & IXGBE_QV_LOCKED)) {
                q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
                rc = false;
+#ifdef LL_EXTENDED_STATS
+               q_vector->rx.ring->stats.yields++;
+#endif
        } else
                /* preserve yield marks */
                q_vector->state |= IXGBE_QV_STATE_POLL;
 
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
+#ifdef LL_EXTENDED_STATS
+                       data[i] = 0;
+                       data[i+1] = 0;
+                       data[i+2] = 0;
+                       i += 3;
+#endif
                        continue;
                }
 
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
+#ifdef LL_EXTENDED_STATS
+               data[i] = ring->stats.yields;
+               data[i+1] = ring->stats.misses;
+               data[i+2] = ring->stats.cleaned;
+               i += 3;
+#endif
        }
        for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
                ring = adapter->rx_ring[j];
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
+#ifdef LL_EXTENDED_STATS
+                       data[i] = 0;
+                       data[i+1] = 0;
+                       data[i+2] = 0;
+                       i += 3;
+#endif
                        continue;
                }
 
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
+#ifdef LL_EXTENDED_STATS
+               data[i] = ring->stats.yields;
+               data[i+1] = ring->stats.misses;
+               data[i+2] = ring->stats.cleaned;
+               i += 3;
+#endif
        }
 
        for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
+#ifdef LL_EXTENDED_STATS
+                       sprintf(p, "tx_q_%u_napi_yield", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_q_%u_misses", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_q_%u_cleaned", i);
+                       p += ETH_GSTRING_LEN;
+#endif /* LL_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
+#ifdef LL_EXTENDED_STATS
+                       sprintf(p, "rx_q_%u_ll_poll_yield", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_q_%u_misses", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_q_%u_cleaned", i);
+                       p += ETH_GSTRING_LEN;
+#endif /* LL_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
                        sprintf(p, "tx_pb_%u_pxon", i);