return err;
 }
 
+static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
+                              struct genl_info *info, u32 index)
+{
+       struct virtio_net_config config = {};
+       u64 features;
+       u16 max_vqp;
+       u8 status;
+       int err;
+
+       status = vdev->config->get_status(vdev);
+       if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+               NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
+               return -EAGAIN;
+       }
+       vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
+
+       max_vqp = le16_to_cpu(config.max_virtqueue_pairs);
+       if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
+               return -EMSGSIZE;
+
+       features = vdev->config->get_driver_features(vdev);
+       if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
+                             features, VDPA_ATTR_PAD))
+               return -EMSGSIZE;
+
+       if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
+               return -EMSGSIZE;
+
+       err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
+                            struct genl_info *info, u32 index)
+{
+       int err;
+
+       mutex_lock(&vdev->cf_mutex);
+       if (!vdev->config->get_vendor_vq_stats) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       err = vdpa_fill_stats_rec(vdev, msg, info, index);
+out:
+       mutex_unlock(&vdev->cf_mutex);
+       return err;
+}
+
+static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
+                                     struct sk_buff *msg,
+                                     struct genl_info *info, u32 index)
+{
+       u32 device_id;
+       void *hdr;
+       int err;
+       u32 portid = info->snd_portid;
+       u32 seq = info->snd_seq;
+       u32 flags = 0;
+
+       hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
+                         VDPA_CMD_DEV_VSTATS_GET);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
+               err = -EMSGSIZE;
+               goto undo_msg;
+       }
+
+       device_id = vdev->config->get_device_id(vdev);
+       if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
+               err = -EMSGSIZE;
+               goto undo_msg;
+       }
+
+       switch (device_id) {
+       case VIRTIO_ID_NET:
+               if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
+                       NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
+                       err = -ERANGE;
+                       break;
+               }
+
+               err = vendor_stats_fill(vdev, msg, info, index);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+       genlmsg_end(msg, hdr);
+
+       return err;
+
+undo_msg:
+       genlmsg_cancel(msg, hdr);
+       return err;
+}
+
 static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
 {
        struct vdpa_device *vdev;
        return msg->len;
 }
 
+static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
+                                         struct genl_info *info)
+{
+       struct vdpa_device *vdev;
+       struct sk_buff *msg;
+       const char *devname;
+       struct device *dev;
+       u32 index;
+       int err;
+
+       if (!info->attrs[VDPA_ATTR_DEV_NAME])
+               return -EINVAL;
+
+       if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
+               return -EINVAL;
+
+       devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
+       mutex_lock(&vdpa_dev_mutex);
+       dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
+       if (!dev) {
+               NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+               err = -ENODEV;
+               goto dev_err;
+       }
+       vdev = container_of(dev, struct vdpa_device, dev);
+       if (!vdev->mdev) {
+               NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+               err = -EINVAL;
+               goto mdev_err;
+       }
+       err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
+       if (err)
+               goto mdev_err;
+
+       err = genlmsg_reply(msg, info);
+
+       put_device(dev);
+       mutex_unlock(&vdpa_dev_mutex);
+
+       return err;
+
+mdev_err:
+       put_device(dev);
+dev_err:
+       nlmsg_free(msg);
+       mutex_unlock(&vdpa_dev_mutex);
+       return err;
+}
+
 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
        [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
        [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
                .doit = vdpa_nl_cmd_dev_config_get_doit,
                .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
        },
+       {
+               .cmd = VDPA_CMD_DEV_VSTATS_GET,
+               .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+               .doit = vdpa_nl_cmd_dev_stats_get_doit,
+               .flags = GENL_ADMIN_PERM,
+       },
 };
 
 static struct genl_family vdpa_nl_family __ro_after_init = {