struct virtnet_sq_stats stats;
 
+       struct virtnet_interrupt_coalesce intr_coal;
+
        struct napi_struct napi;
 
        /* Record whether sq is in reset state. */
 
        struct virtnet_rq_stats stats;
 
+       struct virtnet_interrupt_coalesce intr_coal;
+
        /* Chain pages by the private ptr. */
        struct page *pages;
 
        struct virtio_net_ctrl_rss rss;
        struct virtio_net_ctrl_coal_tx coal_tx;
        struct virtio_net_ctrl_coal_rx coal_rx;
+       struct virtio_net_ctrl_coal_vq coal_vq;
 };
 
 struct virtnet_info {
        return 0;
 }
 
+static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
+                                        u16 vqn, u32 max_usecs, u32 max_packets)
+{
+       struct scatterlist sgs;
+
+       vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
+       vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
+       vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
+       sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+
+       if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+                                 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
+                                 &sgs))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
+                                         struct ethtool_coalesce *ec,
+                                         u16 queue)
+{
+       int err;
+
+       if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
+               err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
+                                                   ec->rx_coalesce_usecs,
+                                                   ec->rx_max_coalesced_frames);
+               if (err)
+                       return err;
+               /* Save parameters */
+               vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+               vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+       }
+
+       if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
+               err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
+                                                   ec->tx_coalesce_usecs,
+                                                   ec->tx_max_coalesced_frames);
+               if (err)
+                       return err;
+               /* Save parameters */
+               vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
+               vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
+       }
+
+       return 0;
+}
+
 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
 {
        /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
        return 0;
 }
 
+static int virtnet_should_update_vq_weight(int dev_flags, int weight,
+                                          int vq_weight, bool *should_update)
+{
+       if (weight ^ vq_weight) {
+               if (dev_flags & IFF_UP)
+                       return -EBUSY;
+               *should_update = true;
+       }
+
+       return 0;
+}
+
 static int virtnet_set_coalesce(struct net_device *dev,
                                struct ethtool_coalesce *ec,
                                struct kernel_ethtool_coalesce *kernel_coal,
                                struct netlink_ext_ack *extack)
 {
        struct virtnet_info *vi = netdev_priv(dev);
-       int ret, i, napi_weight;
+       int ret, queue_number, napi_weight;
        bool update_napi = false;
 
        /* Can't change NAPI weight if the link is up */
        napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
-       if (napi_weight ^ vi->sq[0].napi.weight) {
-               if (dev->flags & IFF_UP)
-                       return -EBUSY;
-               else
-                       update_napi = true;
+       for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
+               ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
+                                                     vi->sq[queue_number].napi.weight,
+                                                     &update_napi);
+               if (ret)
+                       return ret;
+
+               if (update_napi) {
+                       /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
+                        * updated for the sake of simplicity, which might not be necessary
+                        */
+                       break;
+               }
        }
 
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
                return ret;
 
        if (update_napi) {
-               for (i = 0; i < vi->max_queue_pairs; i++)
-                       vi->sq[i].napi.weight = napi_weight;
+               for (; queue_number < vi->max_queue_pairs; queue_number++)
+                       vi->sq[queue_number].napi.weight = napi_weight;
        }
 
        return ret;
        return 0;
 }
 
+static int virtnet_set_per_queue_coalesce(struct net_device *dev,
+                                         u32 queue,
+                                         struct ethtool_coalesce *ec)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int ret, napi_weight;
+       bool update_napi = false;
+
+       if (queue >= vi->max_queue_pairs)
+               return -EINVAL;
+
+       /* Can't change NAPI weight if the link is up */
+       napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
+       ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
+                                             vi->sq[queue].napi.weight,
+                                             &update_napi);
+       if (ret)
+               return ret;
+
+       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+               ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
+       else
+               ret = virtnet_coal_params_supported(ec);
+
+       if (ret)
+               return ret;
+
+       if (update_napi)
+               vi->sq[queue].napi.weight = napi_weight;
+
+       return 0;
+}
+
+static int virtnet_get_per_queue_coalesce(struct net_device *dev,
+                                         u32 queue,
+                                         struct ethtool_coalesce *ec)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+
+       if (queue >= vi->max_queue_pairs)
+               return -EINVAL;
+
+       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+               ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
+               ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
+               ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
+               ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
+       } else {
+               ec->rx_max_coalesced_frames = 1;
+
+               if (vi->sq[0].napi.weight)
+                       ec->tx_max_coalesced_frames = 1;
+       }
+
+       return 0;
+}
+
 static void virtnet_init_settings(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        .set_link_ksettings = virtnet_set_link_ksettings,
        .set_coalesce = virtnet_set_coalesce,
        .get_coalesce = virtnet_get_coalesce,
+       .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
+       .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
        .get_rxfh_key_size = virtnet_get_rxfh_key_size,
        .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
        .get_rxfh = virtnet_get_rxfh,