&cfg->queue_used_hi);
}
+/*
+ * vp_modern_set_queue_enable - enable a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ * @enable: whether the virtqueue is enable or not
+ */
+static void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 index, bool enable)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+ vp_iowrite16(enable, &mdev->common->queue_enable);
+}
+
+/*
+ * vp_modern_get_queue_enable - enable a virtqueue
+ * @mdev: the modern virtio-pci device
+ * @index: the queue index
+ *
+ * Returns whether a virtqueue is enabled or not
+ */
+static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 index)
+{
+ vp_iowrite16(index, &mdev->common->queue_select);
+
+ return vp_ioread16(&mdev->common->queue_enable);
+}
+
/* virtio config->finalize_features() implementation */
static int vp_finalize_features(struct virtio_device *vdev)
{
/* Check if queue is either not available or already active. */
num = vp_ioread16(&cfg->queue_size);
- if (!num || vp_ioread16(&cfg->queue_enable))
+ if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
if (num & (num - 1)) {
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_common_cfg __iomem *cfg = vp_dev->mdev.common;
struct virtqueue *vq;
int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
/* Select and activate all queues. Has to be done last: once we do
* this, there's no way to go back except reset.
*/
- list_for_each_entry(vq, &vdev->vqs, list) {
- vp_iowrite16(vq->index, &cfg->queue_select);
- vp_iowrite16(1, &cfg->queue_enable);
- }
+ list_for_each_entry(vq, &vdev->vqs, list)
+ vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
return 0;
}