struct list_head iommu_drivers_list;
struct mutex iommu_drivers_lock;
struct list_head group_list;
- struct idr group_idr;
+ struct xarray groups;
struct mutex group_lock;
struct cdev group_cdev;
dev_t group_devt;
*/
static int vfio_alloc_group_minor(struct vfio_group *group)
{
- return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
+ return xa_alloc(&vfio.groups, &group->minor, group,
+ XA_LIMIT(0, MINORMASK), GFP_KERNEL);
}
static void vfio_free_group_minor(int minor)
{
- idr_remove(&vfio.group_idr, minor);
+ xa_erase(&vfio.groups, minor);
}
static int vfio_iommu_group_notifier(struct notifier_block *nb,
{
struct vfio_group *group, *tmp;
struct device *dev;
- int ret, minor;
+ int ret;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
}
}
- minor = vfio_alloc_group_minor(group);
- if (minor < 0) {
+ ret = vfio_alloc_group_minor(group);
+ if (ret < 0) {
vfio_group_unlock_and_free(group);
- return ERR_PTR(minor);
+ return ERR_PTR(ret);
}
dev = device_create(vfio.class, NULL,
- MKDEV(MAJOR(vfio.group_devt), minor),
+ MKDEV(MAJOR(vfio.group_devt), group->minor),
group, "%s%d", group->noiommu ? "noiommu-" : "",
iommu_group_id(iommu_group));
if (IS_ERR(dev)) {
- vfio_free_group_minor(minor);
+ vfio_free_group_minor(group->minor);
vfio_group_unlock_and_free(group);
return ERR_CAST(dev);
}
- group->minor = minor;
group->dev = dev;
list_add(&group->vfio_next, &vfio.group_list);
struct vfio_group *group;
mutex_lock(&vfio.group_lock);
- group = idr_find(&vfio.group_idr, minor);
+ group = xa_load(&vfio.groups, minor);
if (!group) {
mutex_unlock(&vfio.group_lock);
return NULL;
{
int ret;
- idr_init(&vfio.group_idr);
+ xa_init_flags(&vfio.groups, XA_FLAGS_ALLOC);
mutex_init(&vfio.group_lock);
mutex_init(&vfio.iommu_drivers_lock);
INIT_LIST_HEAD(&vfio.group_list);
#ifdef CONFIG_VFIO_NOIOMMU
vfio_unregister_iommu_driver(&vfio_noiommu_ops);
#endif
- idr_destroy(&vfio.group_idr);
cdev_del(&vfio.group_cdev);
unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
class_destroy(vfio.class);