]> www.infradead.org Git - users/willy/xarray.git/commitdiff
habanalabs: Convert phys_pg_pack_handles to XArray
authorMatthew Wilcox <willy@infradead.org>
Fri, 15 Mar 2019 12:37:16 +0000 (08:37 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 9 Aug 2019 01:38:19 +0000 (21:38 -0400)
Signed-off-by: Matthew Wilcox <willy@infradead.org>
drivers/misc/habanalabs/debugfs.c
drivers/misc/habanalabs/habanalabs.h
drivers/misc/habanalabs/memory.c

index 18e499c900c7fdfd0cb5d0b23a68189394a6ca2f..f83fea5e2e56f5cab1f50a61a1ca87de0d930f13 100644 (file)
@@ -233,7 +233,7 @@ static int vm_show(struct seq_file *s, void *data)
        enum vm_type_t *vm_type;
        bool once = true;
        u64 j;
-       int i;
+       unsigned long i;
 
        if (!dev_entry->hdev->mmu_enable)
                return 0;
@@ -269,12 +269,12 @@ static int vm_show(struct seq_file *s, void *data)
                mutex_unlock(&ctx->mem_hash_lock);
 
                vm = &ctx->hdev->vm;
-               spin_lock(&vm->idr_lock);
+               xa_lock(&vm->phys_pg_pack_handles);
 
-               if (!idr_is_empty(&vm->phys_pg_pack_handles))
+               if (!xa_empty(&vm->phys_pg_pack_handles))
                        seq_puts(s, "\n\nallocations:\n");
 
-               idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
+               xa_for_each(&vm->phys_pg_pack_handles, i, phys_pg_pack) {
                        if (phys_pg_pack->asid != ctx->asid)
                                continue;
 
@@ -288,7 +288,7 @@ static int vm_show(struct seq_file *s, void *data)
                                                phys_pg_pack->pages[j]);
                        }
                }
-               spin_unlock(&vm->idr_lock);
+               xa_unlock(&vm->phys_pg_pack_handles);
 
        }
 
index d91332a9b876b0919690e7083aa90c3220504f9b..e215bdc9dee780c3aed424faabd13744bb5577ae 100644 (file)
@@ -852,16 +852,14 @@ struct hl_vm_va_block {
  * struct hl_vm - virtual memory manager for MMU.
  * @dram_pg_pool: pool for DRAM physical pages of 2MB.
  * @dram_pg_pool_refcount: reference counter for the pool usage.
- * @idr_lock: protects the phys_pg_list_handles.
- * @phys_pg_pack_handles: idr to hold all device allocations handles.
+ * @phys_pg_pack_handles: hold all device allocations handles.
  * @init_done: whether initialization was done. We need this because VM
  *             initialization might be skipped during device initialization.
  */
 struct hl_vm {
        struct gen_pool         *dram_pg_pool;
        struct kref             dram_pg_pool_refcount;
-       spinlock_t              idr_lock;
-       struct idr              phys_pg_pack_handles;
+       struct xarray           phys_pg_pack_handles;
        u8                      init_done;
 };
 
index 42d237cae1dc571419ee1a107960996b2b6f3d85..bc640255e130120eb5ccc4abcef88f128275a011 100644 (file)
@@ -58,7 +58,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        struct hl_vm_phys_pg_pack *phys_pg_pack;
        u64 paddr = 0, total_size, num_pgs, i;
        u32 num_curr_pgs, page_size, page_shift;
-       int handle, rc;
+       int rc;
        bool contiguous;
 
        num_curr_pgs = 0;
@@ -118,30 +118,25 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                }
        }
 
-       spin_lock(&vm->idr_lock);
-       handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
-                               GFP_ATOMIC);
-       spin_unlock(&vm->idr_lock);
-
-       if (handle < 0) {
+       rc = xa_alloc(&vm->phys_pg_pack_handles, &phys_pg_pack->handle,
+                       phys_pg_pack, xa_limit_31b, GFP_KERNEL);
+       if (rc < 0) {
                dev_err(hdev->dev, "Failed to get handle for page\n");
                rc = -EFAULT;
-               goto idr_err;
+               goto xa_err;
        }
 
        for (i = 0 ; i < num_pgs ; i++)
                kref_get(&vm->dram_pg_pool_refcount);
 
-       phys_pg_pack->handle = handle;
-
        atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
        atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
 
-       *ret_handle = handle;
+       *ret_handle = phys_pg_pack->handle;
 
        return 0;
 
-idr_err:
+xa_err:
 page_err:
        if (!phys_pg_pack->contiguous)
                for (i = 0 ; i < num_curr_pgs ; i++)
@@ -244,11 +239,6 @@ static void dram_pg_pool_do_release(struct kref *ref)
        struct hl_vm *vm = container_of(ref, struct hl_vm,
                        dram_pg_pool_refcount);
 
-       /*
-        * free the idr here as only here we know for sure that there are no
-        * allocated physical pages and hence there are no handles in use
-        */
-       idr_destroy(&vm->phys_pg_pack_handles);
        gen_pool_destroy(vm->dram_pg_pool);
 }
 
@@ -307,13 +297,13 @@ static int free_device_memory(struct hl_ctx *ctx, u32 handle)
        struct hl_vm *vm = &hdev->vm;
        struct hl_vm_phys_pg_pack *phys_pg_pack;
 
-       spin_lock(&vm->idr_lock);
-       phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+       xa_lock(&vm->phys_pg_pack_handles);
+       phys_pg_pack = xa_load(&vm->phys_pg_pack_handles, handle);
        if (phys_pg_pack) {
                if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
                        dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
                                handle);
-                       spin_unlock(&vm->idr_lock);
+                       xa_unlock(&vm->phys_pg_pack_handles);
                        return -EINVAL;
                }
 
@@ -322,15 +312,15 @@ static int free_device_memory(struct hl_ctx *ctx, u32 handle)
                 * pages as the refcount of the pool is also the trigger of the
                 * idr destroy
                 */
-               idr_remove(&vm->phys_pg_pack_handles, handle);
-               spin_unlock(&vm->idr_lock);
+               __xa_erase(&vm->phys_pg_pack_handles, handle);
+               xa_unlock(&vm->phys_pg_pack_handles);
 
                atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
                atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
 
                free_phys_pg_pack(hdev, phys_pg_pack);
        } else {
-               spin_unlock(&vm->idr_lock);
+               xa_unlock(&vm->phys_pg_pack_handles);
                dev_err(hdev->dev,
                        "free device memory failed, no match for handle %u\n",
                        handle);
@@ -792,17 +782,17 @@ static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
        u32 handle;
 
        handle = lower_32_bits(args->map_device.handle);
-       spin_lock(&vm->idr_lock);
-       phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+       xa_lock(&vm->phys_pg_pack_handles);
+       phys_pg_pack = xa_load(&vm->phys_pg_pack_handles, handle);
        if (!phys_pg_pack) {
-               spin_unlock(&vm->idr_lock);
+               xa_unlock(&vm->phys_pg_pack_handles);
                dev_err(hdev->dev, "no match for handle %u\n", handle);
                return -EINVAL;
        }
 
        *paddr = phys_pg_pack->pages[0];
 
-       spin_unlock(&vm->idr_lock);
+       xa_unlock(&vm->phys_pg_pack_handles);
 
        return 0;
 }
@@ -859,10 +849,10 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
        } else {
                handle = lower_32_bits(args->map_device.handle);
 
-               spin_lock(&vm->idr_lock);
-               phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
+               xa_lock(&vm->phys_pg_pack_handles);
+               phys_pg_pack = xa_load(&vm->phys_pg_pack_handles, handle);
                if (!phys_pg_pack) {
-                       spin_unlock(&vm->idr_lock);
+                       xa_unlock(&vm->phys_pg_pack_handles);
                        dev_err(hdev->dev,
                                "no match for handle %u\n", handle);
                        return -EINVAL;
@@ -871,7 +861,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
                /* increment now to avoid freeing device memory while mapping */
                atomic_inc(&phys_pg_pack->mapping_cnt);
 
-               spin_unlock(&vm->idr_lock);
+               xa_unlock(&vm->phys_pg_pack_handles);
 
                vm_type = (enum vm_type_t *) phys_pg_pack;
 
@@ -1609,7 +1599,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
        struct hl_vm_phys_pg_pack *phys_pg_list;
        struct hl_vm_hash_node *hnode;
        struct hlist_node *tmp_node;
-       int i;
+       unsigned long i;
 
        hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
 
@@ -1623,16 +1613,16 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
                unmap_device_va(ctx, hnode->vaddr);
        }
 
-       spin_lock(&vm->idr_lock);
-       idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
+       xa_lock(&vm->phys_pg_pack_handles);
+       xa_for_each(&vm->phys_pg_pack_handles, i, phys_pg_list)
                if (phys_pg_list->asid == ctx->asid) {
                        dev_dbg(hdev->dev,
                                "page list 0x%p of asid %d is still alive\n",
                                phys_pg_list, ctx->asid);
                        free_phys_pg_pack(hdev, phys_pg_list);
-                       idr_remove(&vm->phys_pg_pack_handles, i);
+                       __xa_erase(&vm->phys_pg_pack_handles, i);
                }
-       spin_unlock(&vm->idr_lock);
+       xa_unlock(&vm->phys_pg_pack_handles);
 
        hl_va_range_fini(hdev, &ctx->dram_va_range);
        hl_va_range_fini(hdev, &ctx->host_va_range);
@@ -1675,8 +1665,7 @@ int hl_vm_init(struct hl_device *hdev)
                goto pool_add_err;
        }
 
-       spin_lock_init(&vm->idr_lock);
-       idr_init(&vm->phys_pg_pack_handles);
+       xa_init_flags(&vm->phys_pg_pack_handles, XA_FLAGS_ALLOC1);
 
        atomic64_set(&hdev->dram_used_mem, 0);