]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 27 Nov 2024 21:11:58 +0000 (13:11 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 27 Nov 2024 21:11:58 +0000 (13:11 -0800)
Pull virtio updates from Michael Tsirkin:
 "A small number of improvements all over the place"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio_vdpa: remove redundant check on desc
  virtio_fs: store actual queue index in mq_map
  virtio_fs: add informative log for new tag discovery
  virtio: Make vring_new_virtqueue support packed vring
  virtio_pmem: Add freeze/restore callbacks
  vdpa/mlx5: Fix suboptimal range on iotlb iteration

1  2 
drivers/virtio/virtio_ring.c
fs/fuse/virtio_fs.c

index 8167be01b400159c649f0b48a2f81934c4c0326c,48b297f88abac2aaa6fa3403ac3fff01a8ac0acd..82a7d2cbc70450e4fc0ce5c38bdc01d649ff336a
@@@ -1135,6 -1129,66 +1126,64 @@@ static int vring_alloc_queue_split(stru
        return 0;
  }
  
 -      vq->premapped = false;
 -      vq->do_unmap = vq->use_dma_api;
+ static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
+                                              struct vring_virtqueue_split *vring_split,
+                                              struct virtio_device *vdev,
+                                              bool weak_barriers,
+                                              bool context,
+                                              bool (*notify)(struct virtqueue *),
+                                              void (*callback)(struct virtqueue *),
+                                              const char *name,
+                                              struct device *dma_dev)
+ {
+       struct vring_virtqueue *vq;
+       int err;
+       vq = kmalloc(sizeof(*vq), GFP_KERNEL);
+       if (!vq)
+               return NULL;
+       vq->packed_ring = false;
+       vq->vq.callback = callback;
+       vq->vq.vdev = vdev;
+       vq->vq.name = name;
+       vq->vq.index = index;
+       vq->vq.reset = false;
+       vq->we_own_ring = false;
+       vq->notify = notify;
+       vq->weak_barriers = weak_barriers;
+ #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+       vq->broken = true;
+ #else
+       vq->broken = false;
+ #endif
+       vq->dma_dev = dma_dev;
+       vq->use_dma_api = vring_use_dma_api(vdev);
+       vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
+               !context;
+       vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+       if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
+               vq->weak_barriers = false;
+       err = vring_alloc_state_extra_split(vring_split);
+       if (err) {
+               kfree(vq);
+               return NULL;
+       }
+       virtqueue_vring_init_split(vring_split, vq);
+       virtqueue_init(vq, vring_split->vring.num);
+       virtqueue_vring_attach_split(vq, vring_split);
+       spin_lock(&vdev->vqs_list_lock);
+       list_add_tail(&vq->vq.list, &vdev->vqs);
+       spin_unlock(&vdev->vqs_list_lock);
+       return &vq->vq;
+ }
  static struct virtqueue *vring_create_virtqueue_split(
        unsigned int index,
        unsigned int num,
@@@ -2650,68 -2668,8 +2718,6 @@@ irqreturn_t vring_interrupt(int irq, vo
  }
  EXPORT_SYMBOL_GPL(vring_interrupt);
  
- /* Only available for split ring */
- static struct virtqueue *__vring_new_virtqueue(unsigned int index,
-                                              struct vring_virtqueue_split *vring_split,
-                                              struct virtio_device *vdev,
-                                              bool weak_barriers,
-                                              bool context,
-                                              bool (*notify)(struct virtqueue *),
-                                              void (*callback)(struct virtqueue *),
-                                              const char *name,
-                                              struct device *dma_dev)
- {
-       struct vring_virtqueue *vq;
-       int err;
-       if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
-               return NULL;
-       vq = kmalloc(sizeof(*vq), GFP_KERNEL);
-       if (!vq)
-               return NULL;
-       vq->packed_ring = false;
-       vq->vq.callback = callback;
-       vq->vq.vdev = vdev;
-       vq->vq.name = name;
-       vq->vq.index = index;
-       vq->vq.reset = false;
-       vq->we_own_ring = false;
-       vq->notify = notify;
-       vq->weak_barriers = weak_barriers;
- #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
-       vq->broken = true;
- #else
-       vq->broken = false;
- #endif
-       vq->dma_dev = dma_dev;
-       vq->use_dma_api = vring_use_dma_api(vdev);
-       vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
-               !context;
-       vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
-       if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
-               vq->weak_barriers = false;
-       err = vring_alloc_state_extra_split(vring_split);
-       if (err) {
-               kfree(vq);
-               return NULL;
-       }
-       virtqueue_vring_init_split(vring_split, vq);
-       virtqueue_init(vq, vring_split->vring.num);
-       virtqueue_vring_attach_split(vq, vring_split);
--
-       spin_lock(&vdev->vqs_list_lock);
-       list_add_tail(&vq->vq.list, &vdev->vqs);
-       spin_unlock(&vdev->vqs_list_lock);
-       return &vq->vq;
- }
--
  struct virtqueue *vring_create_virtqueue(
        unsigned int index,
        unsigned int num,
Simple merge