intr->dyn_ctl = idpf_get_reg_addr(adapter,
                                                  reg_vals[vec_id].dyn_ctl_reg);
                intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+               intr->dyn_ctl_intena_msk_m = PF_GLINT_DYN_CTL_INTENA_MSK_M;
                intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
                intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
+               intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
 
                spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
                                               IDPF_PF_ITR_IDX_SPACING);
 
        /* net_dim() updates ITR out-of-band using a work item */
        idpf_net_dim(q_vector);
 
+       q_vector->wb_on_itr = false;
        intval = idpf_vport_intr_buildreg_itr(q_vector,
                                              IDPF_NO_ITR_UPDATE_IDX, 0);
 
        clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
 
        /* If work not completed, return budget and polling will return */
-       if (!clean_complete)
+       if (!clean_complete) {
+               idpf_vport_intr_set_wb_on_itr(q_vector);
                return budget;
+       }
 
        work_done = min_t(int, work_done, budget - 1);
 
         */
        if (likely(napi_complete_done(napi, work_done)))
                idpf_vport_intr_update_itr_ena_irq(q_vector);
+       else
+               idpf_vport_intr_set_wb_on_itr(q_vector);
 
        /* Switch to poll mode in the tear-down path after sending disable
         * queues virtchnl message, as the interrupts will be disabled after
 
  * struct idpf_intr_reg
  * @dyn_ctl: Dynamic control interrupt register
  * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
+ * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
  * @dyn_ctl_itridx_s: Register bit offset for ITR index
  * @dyn_ctl_itridx_m: Mask for ITR index
  * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
+ * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
  * @rx_itr: RX ITR register
  * @tx_itr: TX ITR register
  * @icr_ena: Interrupt cause register offset
 struct idpf_intr_reg {
        void __iomem *dyn_ctl;
        u32 dyn_ctl_intena_m;
+       u32 dyn_ctl_intena_msk_m;
        u32 dyn_ctl_itridx_s;
        u32 dyn_ctl_itridx_m;
        u32 dyn_ctl_intrvl_s;
+       u32 dyn_ctl_wb_on_itr_m;
        void __iomem *rx_itr;
        void __iomem *tx_itr;
        void __iomem *icr_ena;
  * @intr_reg: See struct idpf_intr_reg
  * @napi: napi handler
  * @total_events: Number of interrupts processed
+ * @wb_on_itr: whether WB on ITR is enabled
  * @tx_dim: Data for TX net_dim algorithm
  * @tx_itr_value: TX interrupt throttling rate
  * @tx_intr_mode: Dynamic ITR or not
        __cacheline_group_begin_aligned(read_write);
        struct napi_struct napi;
        u16 total_events;
+       bool wb_on_itr;
 
        struct dim tx_dim;
        u16 tx_itr_value;
        cpumask_var_t affinity_mask;
        __cacheline_group_end_aligned(cold);
 };
-libeth_cacheline_set_assert(struct idpf_q_vector, 104,
+libeth_cacheline_set_assert(struct idpf_q_vector, 112,
                            424 + 2 * sizeof(struct dim),
                            8 + sizeof(cpumask_var_t));
 
                idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
 }
 
+/**
+ * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
+ * @q_vector: pointer to queue vector struct
+ */
+static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
+{
+       struct idpf_intr_reg *reg;
+
+       if (q_vector->wb_on_itr)
+               return;
+
+       q_vector->wb_on_itr = true;
+       reg = &q_vector->intr_reg;
+
+       writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
+              (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
+              reg->dyn_ctl);
+}
+
 int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
 void idpf_vport_init_num_qs(struct idpf_vport *vport,
                            struct virtchnl2_create_vport *vport_msg);
 
                intr->dyn_ctl = idpf_get_reg_addr(adapter,
                                                  reg_vals[vec_id].dyn_ctl_reg);
                intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
+               intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M;
                intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
+               intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
 
                spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
                                               IDPF_VF_ITR_IDX_SPACING);