struct dma_fence_cb cb;
 };
 
+/**
+ * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: amdgpu_vm pointer
+ * @pasid: the pasid the VM is using on this GPU
+ *
+ * Set the pasid this VM is using on this GPU, can also be used to remove the
+ * pasid by passing in zero.
+ *
+ */
+int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                       u32 pasid)
+{
+       int r;
+
+       if (vm->pasid == pasid)
+               return 0;
+
+       if (vm->pasid) {
+               r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
+               if (r < 0)
+                       return r;
+
+               vm->pasid = 0;
+       }
+
+       if (pasid) {
+               r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
+                                       GFP_KERNEL));
+               if (r < 0)
+                       return r;
+
+               vm->pasid = pasid;
+       }
+
+
+       return 0;
+}
+
 /*
  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
  * happens while holding this lock anywhere to prevent deadlocks when
 
        amdgpu_bo_unreserve(vm->root.bo);
 
-       if (pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
-                             GFP_ATOMIC);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-               if (r < 0)
-                       goto error_free_root;
-
-               vm->pasid = pasid;
-       }
+       r = amdgpu_vm_set_pasid(adev, vm, pasid);
+       if (r)
+               goto error_free_root;
 
        INIT_KFIFO(vm->faults);
 
        if (r)
                goto unreserve_bo;
 
-       if (pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
-                             GFP_ATOMIC);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+       /* Free the original amdgpu allocated pasid,
+        * will be replaced with kfd allocated pasid.
+        */
+       if (vm->pasid)
+               amdgpu_pasid_free(vm->pasid);
 
-               if (r == -ENOSPC)
-                       goto unreserve_bo;
-               r = 0;
-       }
+       r = amdgpu_vm_set_pasid(adev, vm, pasid);
+       if (r)
+               goto unreserve_bo;
 
        /* Check if PD needs to be reinitialized and do it before
         * changing any other state, in case it fails.
                                       to_amdgpu_bo_vm(vm->root.bo),
                                       false);
                if (r)
-                       goto free_idr;
+                       goto free_pasid_entry;
        }
 
        /* Update VM state */
                r = amdgpu_bo_sync_wait(vm->root.bo,
                                        AMDGPU_FENCE_OWNER_UNDEFINED, true);
                if (r)
-                       goto free_idr;
+                       goto free_pasid_entry;
 
                vm->update_funcs = &amdgpu_vm_cpu_funcs;
        } else {
        vm->last_update = NULL;
        vm->is_compute_context = true;
 
-       if (vm->pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-
-               /* Free the original amdgpu allocated pasid
-                * Will be replaced with kfd allocated pasid
-                */
-               amdgpu_pasid_free(vm->pasid);
-               vm->pasid = 0;
-       }
-
        /* Free the shadow bo for compute VM */
        amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
 
-       if (pasid)
-               vm->pasid = pasid;
-
        goto unreserve_bo;
 
-free_idr:
-       if (pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               idr_remove(&adev->vm_manager.pasid_idr, pasid);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-       }
+free_pasid_entry:
+       amdgpu_vm_set_pasid(adev, vm, 0);
 unreserve_bo:
        amdgpu_bo_unreserve(vm->root.bo);
        return r;
  */
 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
-       if (vm->pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-       }
-       vm->pasid = 0;
+       amdgpu_vm_set_pasid(adev, vm, 0);
        vm->is_compute_context = false;
 }
 
 
        root = amdgpu_bo_ref(vm->root.bo);
        amdgpu_bo_reserve(root, true);
-       if (vm->pasid) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
-               idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
-               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-               vm->pasid = 0;
-       }
-
+       amdgpu_vm_set_pasid(adev, vm, 0);
        dma_fence_wait(vm->last_unlocked, false);
        dma_fence_put(vm->last_unlocked);
 
        adev->vm_manager.vm_update_mode = 0;
 #endif
 
-       idr_init(&adev->vm_manager.pasid_idr);
-       spin_lock_init(&adev->vm_manager.pasid_lock);
+       xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
 }
 
 /**
  */
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
 {
-       WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
-       idr_destroy(&adev->vm_manager.pasid_idr);
+       WARN_ON(!xa_empty(&adev->vm_manager.pasids));
+       xa_destroy(&adev->vm_manager.pasids);
 
        amdgpu_vmid_mgr_fini(adev);
 }
        struct amdgpu_vm *vm;
        unsigned long flags;
 
-       spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+       xa_lock_irqsave(&adev->vm_manager.pasids, flags);
 
-       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+       vm = xa_load(&adev->vm_manager.pasids, pasid);
        if (vm)
                *task_info = vm->task_info;
 
-       spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+       xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
 }
 
 /**
        struct amdgpu_vm *vm;
        int r;
 
-       spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
-       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+       xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
+       vm = xa_load(&adev->vm_manager.pasids, pasid);
        if (vm) {
                root = amdgpu_bo_ref(vm->root.bo);
                is_compute_context = vm->is_compute_context;
        } else {
                root = NULL;
        }
-       spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
+       xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
 
        if (!root)
                return false;
                goto error_unref;
 
        /* Double check that the VM still exists */
-       spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
-       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+       xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
+       vm = xa_load(&adev->vm_manager.pasids, pasid);
        if (vm && vm->root.bo != root)
                vm = NULL;
-       spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
+       xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
        if (!vm)
                goto error_unlock;