}
        q_vector = vsi->q_vectors[v_idx];
 
-       ice_for_each_tx_ring(tx_ring, q_vector->tx)
+       ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+               if (vsi->netdev)
+                       netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
+                                            NETDEV_QUEUE_TYPE_TX, NULL);
                tx_ring->q_vector = NULL;
-       ice_for_each_rx_ring(rx_ring, q_vector->rx)
+       }
+       ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+               if (vsi->netdev)
+                       netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
+                                            NETDEV_QUEUE_TYPE_RX, NULL);
                rx_ring->q_vector = NULL;
+       }
 
        /* only VSI with an associated netdev is set up with NAPI */
        if (vsi->netdev)
 
                        goto unroll_vector_base;
 
                ice_vsi_map_rings_to_vectors(vsi);
+
+               /* Associate q_vector rings to napi */
+               ice_vsi_set_napi_queues(vsi, true);
+
                vsi->stat_offsets_loaded = false;
 
                if (ice_is_xdp_ena_vsi(vsi)) {
                synchronize_irq(vsi->q_vectors[i]->irq.virq);
 }
 
+/**
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @dev: device to which NAPI and queue belong
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ * @locked: is the rtnl_lock already held
+ *
+ * Set the napi instance for the queue
+ */
+static void
+ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+                  enum netdev_queue_type type, struct napi_struct *napi,
+                  bool locked)
+{
+       if (!locked)
+               rtnl_lock();
+       netif_queue_set_napi(dev, queue_index, type, napi);
+       if (!locked)
+               rtnl_unlock();
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector
+ */
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+       struct ice_rx_ring *rx_ring;
+       struct ice_tx_ring *tx_ring;
+
+       ice_for_each_rx_ring(rx_ring, q_vector->rx)
+               ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+                                  NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+                                  locked);
+
+       ice_for_each_tx_ring(tx_ring, q_vector->tx)
+               ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+                                  NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+                                  locked);
+}
+
+/**
+ * ice_vsi_set_napi_queues
+ * @vsi: VSI pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate queue[s] with napi for all vectors
+ */
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+{
+       int i;
+
+       if (!vsi->netdev)
+               return;
+
+       ice_for_each_q_vector(vsi, i)
+               ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+}
+
 /**
  * ice_vsi_release - Delete a VSI and free its resources
  * @vsi: the VSI being removed