struct hl_vm_phys_pg_pack *phys_pg_pack;
u64 paddr = 0, total_size, num_pgs, i;
u32 num_curr_pgs, page_size, page_shift;
- int handle, rc;
+ int rc;
bool contiguous;
num_curr_pgs = 0;
}
}
- spin_lock(&vm->idr_lock);
- handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
- GFP_ATOMIC);
- spin_unlock(&vm->idr_lock);
-
- if (handle < 0) {
+ rc = xa_alloc(&vm->phys_pg_pack_handles, &phys_pg_pack->handle,
+ phys_pg_pack, xa_limit_31b, GFP_KERNEL);
+ if (rc < 0) {
dev_err(hdev->dev, "Failed to get handle for page\n");
rc = -EFAULT;
- goto idr_err;
+ goto xa_err;
}
for (i = 0 ; i < num_pgs ; i++)
kref_get(&vm->dram_pg_pool_refcount);
- phys_pg_pack->handle = handle;
-
atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
- *ret_handle = handle;
+ *ret_handle = phys_pg_pack->handle;
return 0;
-idr_err:
+xa_err:
page_err:
if (!phys_pg_pack->contiguous)
for (i = 0 ; i < num_curr_pgs ; i++)
struct hl_vm *vm = container_of(ref, struct hl_vm,
dram_pg_pool_refcount);
- /*
- * free the idr here as only here we know for sure that there are no
- * allocated physical pages and hence there are no handles in use
- */
- idr_destroy(&vm->phys_pg_pack_handles);
gen_pool_destroy(vm->dram_pg_pool);
}
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
- spin_lock(&vm->idr_lock);
- phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ xa_lock(&vm->phys_pg_pack_handles);
+ phys_pg_pack = xa_load(&vm->phys_pg_pack_handles, handle);
if (phys_pg_pack) {
if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
handle);
- spin_unlock(&vm->idr_lock);
+ xa_unlock(&vm->phys_pg_pack_handles);
return -EINVAL;
}
* pages as the refcount of the pool is also the trigger of the
* idr destroy
*/
- idr_remove(&vm->phys_pg_pack_handles, handle);
- spin_unlock(&vm->idr_lock);
+ __xa_erase(&vm->phys_pg_pack_handles, handle);
+ xa_unlock(&vm->phys_pg_pack_handles);
atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
free_phys_pg_pack(hdev, phys_pg_pack);
} else {
- spin_unlock(&vm->idr_lock);
+ xa_unlock(&vm->phys_pg_pack_handles);
dev_err(hdev->dev,
"free device memory failed, no match for handle %u\n",
handle);
u32 handle;
handle = lower_32_bits(args->map_device.handle);
- spin_lock(&vm->idr_lock);
- phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ xa_lock(&vm->phys_pg_pack_handles);
+ phys_pg_pack = xa_load(&vm->phys_pg_pack_handles, handle);
if (!phys_pg_pack) {
- spin_unlock(&vm->idr_lock);
+ xa_unlock(&vm->phys_pg_pack_handles);
dev_err(hdev->dev, "no match for handle %u\n", handle);
return -EINVAL;
}
*paddr = phys_pg_pack->pages[0];
- spin_unlock(&vm->idr_lock);
+ xa_unlock(&vm->phys_pg_pack_handles);
return 0;
}
} else {
handle = lower_32_bits(args->map_device.handle);
- spin_lock(&vm->idr_lock);
- phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+ xa_lock(&vm->phys_pg_pack_handles);
+ phys_pg_pack = xa_load(&vm->phys_pg_pack_handles, handle);
if (!phys_pg_pack) {
- spin_unlock(&vm->idr_lock);
+ xa_unlock(&vm->phys_pg_pack_handles);
dev_err(hdev->dev,
"no match for handle %u\n", handle);
return -EINVAL;
/* increment now to avoid freeing device memory while mapping */
atomic_inc(&phys_pg_pack->mapping_cnt);
- spin_unlock(&vm->idr_lock);
+ xa_unlock(&vm->phys_pg_pack_handles);
vm_type = (enum vm_type_t *) phys_pg_pack;
struct hl_vm_phys_pg_pack *phys_pg_list;
struct hl_vm_hash_node *hnode;
struct hlist_node *tmp_node;
- int i;
+ unsigned long i;
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
unmap_device_va(ctx, hnode->vaddr);
}
- spin_lock(&vm->idr_lock);
- idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
+ xa_lock(&vm->phys_pg_pack_handles);
+ xa_for_each(&vm->phys_pg_pack_handles, i, phys_pg_list)
if (phys_pg_list->asid == ctx->asid) {
dev_dbg(hdev->dev,
"page list 0x%p of asid %d is still alive\n",
phys_pg_list, ctx->asid);
free_phys_pg_pack(hdev, phys_pg_list);
- idr_remove(&vm->phys_pg_pack_handles, i);
+ __xa_erase(&vm->phys_pg_pack_handles, i);
}
- spin_unlock(&vm->idr_lock);
+ xa_unlock(&vm->phys_pg_pack_handles);
hl_va_range_fini(hdev, &ctx->dram_va_range);
hl_va_range_fini(hdev, &ctx->host_va_range);
goto pool_add_err;
}
- spin_lock_init(&vm->idr_lock);
- idr_init(&vm->phys_pg_pack_handles);
+ xa_init_flags(&vm->phys_pg_pack_handles, XA_FLAGS_ALLOC1);
atomic64_set(&hdev->dram_used_mem, 0);