else
                ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
 
+       if (wx->adaptive_itr) {
+               ec->use_adaptive_rx_coalesce = 1;
+               ec->use_adaptive_tx_coalesce = 1;
+       }
+
        /* if in mixed tx/rx queues per vector mode, report only rx settings */
        if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
                return 0;
            (ec->tx_coalesce_usecs > (max_eitr >> 2)))
                return -EINVAL;
 
+       if (ec->use_adaptive_rx_coalesce) {
+               wx->adaptive_itr = true;
+               wx->rx_itr_setting = 1;
+               wx->tx_itr_setting = 1;
+               return 0;
+       }
+
        if (ec->rx_coalesce_usecs > 1)
                wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
        else
                wx->rx_itr_setting = ec->rx_coalesce_usecs;
 
-       if (wx->rx_itr_setting != 1)
-               rx_itr_param = wx->rx_itr_setting;
-
        if (ec->tx_coalesce_usecs > 1)
                wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
        else
                wx->tx_itr_setting = ec->tx_coalesce_usecs;
 
+       if (wx->adaptive_itr) {
+               wx->adaptive_itr = false;
+               wx->rx_itr_setting = rx_itr_param;
+               wx->tx_itr_setting = tx_itr_param;
+       } else if (wx->rx_itr_setting == 1 || wx->tx_itr_setting == 1) {
+               wx->adaptive_itr = true;
+       }
+
+       if (wx->rx_itr_setting != 1)
+               rx_itr_param = wx->rx_itr_setting;
+
        if (wx->tx_itr_setting != 1)
                tx_itr_param = wx->tx_itr_setting;
 
 
 #include "wx_lib.h"
 #include "wx_ptp.h"
 #include "wx_hw.h"
+#include "wx_vf_lib.h"
 
 /* Lookup table mapping the HW PTYPE to the bit field for decoding */
 static struct wx_dec_ptype wx_ptype_lookup[256] = {
        return !!budget;
 }
 
+static void wx_update_rx_dim_sample(struct wx_q_vector *q_vector)
+{
+       struct dim_sample sample = {};
+
+       dim_update_sample(q_vector->total_events,
+                         q_vector->rx.total_packets,
+                         q_vector->rx.total_bytes,
+                         &sample);
+
+       net_dim(&q_vector->rx.dim, &sample);
+}
+
+static void wx_update_tx_dim_sample(struct wx_q_vector *q_vector)
+{
+       struct dim_sample sample = {};
+
+       dim_update_sample(q_vector->total_events,
+                         q_vector->tx.total_packets,
+                         q_vector->tx.total_bytes,
+                         &sample);
+
+       net_dim(&q_vector->tx.dim, &sample);
+}
+
+static void wx_update_dim_sample(struct wx_q_vector *q_vector)
+{
+       wx_update_rx_dim_sample(q_vector);
+       wx_update_tx_dim_sample(q_vector);
+}
+
 /**
  * wx_poll - NAPI polling RX/TX cleanup routine
  * @napi: napi struct with our devices info in it
 
        /* all work done, exit the polling mode */
        if (likely(napi_complete_done(napi, work_done))) {
+               if (wx->adaptive_itr)
+                       wx_update_dim_sample(q_vector);
                if (netif_running(wx->netdev))
                        wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
        }
 }
 EXPORT_SYMBOL(wx_xmit_frame);
 
+static void wx_set_itr(struct wx_q_vector *q_vector)
+{
+       struct wx *wx = q_vector->wx;
+       u32 new_itr;
+
+       if (!wx->adaptive_itr)
+               return;
+
+       /* use the smallest value of new ITR delay calculations */
+       new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
+       new_itr <<= 2;
+
+       if (new_itr != q_vector->itr) {
+               /* save the algorithm value here */
+               q_vector->itr = new_itr;
+
+               if (wx->pdev->is_virtfn)
+                       wx_write_eitr_vf(q_vector);
+               else
+                       wx_write_eitr(q_vector);
+       }
+}
+
+static void wx_rx_dim_work(struct work_struct *work)
+{
+       struct dim *dim = container_of(work, struct dim, work);
+       struct dim_cq_moder rx_moder;
+       struct wx_ring_container *rx;
+       struct wx_q_vector *q_vector;
+
+       rx = container_of(dim, struct wx_ring_container, dim);
+
+       rx_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+       rx->itr = rx_moder.usec;
+
+       q_vector = container_of(rx, struct wx_q_vector, rx);
+       wx_set_itr(q_vector);
+
+       dim->state = DIM_START_MEASURE;
+}
+
+static void wx_tx_dim_work(struct work_struct *work)
+{
+       struct dim *dim = container_of(work, struct dim, work);
+       struct dim_cq_moder tx_moder;
+       struct wx_ring_container *tx;
+       struct wx_q_vector *q_vector;
+
+       tx = container_of(dim, struct wx_ring_container, dim);
+
+       tx_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+       tx->itr = tx_moder.usec;
+
+       q_vector = container_of(tx, struct wx_q_vector, tx);
+       wx_set_itr(q_vector);
+
+       dim->state = DIM_START_MEASURE;
+}
+
 void wx_napi_enable_all(struct wx *wx)
 {
        struct wx_q_vector *q_vector;
 
        for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
                q_vector = wx->q_vector[q_idx];
+
+               INIT_WORK(&q_vector->rx.dim.work, wx_rx_dim_work);
+               INIT_WORK(&q_vector->tx.dim.work, wx_tx_dim_work);
+               q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+               q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
                napi_enable(&q_vector->napi);
        }
 }
        for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
                q_vector = wx->q_vector[q_idx];
                napi_disable(&q_vector->napi);
+               disable_work_sync(&q_vector->rx.dim.work);
+               disable_work_sync(&q_vector->tx.dim.work);
        }
 }
 EXPORT_SYMBOL(wx_napi_disable_all);
        struct wx_q_vector *q_vector = data;
 
        /* EIAM disabled interrupts (on this vector) for us */
-       if (q_vector->rx.ring || q_vector->tx.ring)
+       if (q_vector->rx.ring || q_vector->tx.ring) {
                napi_schedule_irqoff(&q_vector->napi);
+               q_vector->total_events++;
+       }
 
        return IRQ_HANDLED;
 }