]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/amdkfd: remap unaligned svm ranges that have split
authorAlex Sierra <alex.sierra@amd.com>
Tue, 17 Oct 2023 22:08:56 +0000 (17:08 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 20 Oct 2023 19:11:29 +0000 (15:11 -0400)
Split SVM ranges that have been mapped into 2MB page table entries,
require to be remap in case the split has happened in a non-aligned
VA.
[WHY]:
This condition causes the 2MB page table entries be split into 4KB
PTEs.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index ad4e1387e0b091cc76f6d17bb7cb2d02bbfda800..bda88dc6e2fa4e379a9529883e6ad080b3c74a64 100644 (file)
@@ -1106,26 +1106,32 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
 }
 
 static int
-svm_range_split_tail(struct svm_range *prange,
-                    uint64_t new_last, struct list_head *insert_list)
+svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
+                    struct list_head *insert_list, struct list_head *remap_list)
 {
        struct svm_range *tail;
        int r = svm_range_split(prange, prange->start, new_last, &tail);
 
-       if (!r)
+       if (!r) {
                list_add(&tail->list, insert_list);
+               if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
+                       list_add(&tail->update_list, remap_list);
+       }
        return r;
 }
 
 static int
-svm_range_split_head(struct svm_range *prange,
-                    uint64_t new_start, struct list_head *insert_list)
+svm_range_split_head(struct svm_range *prange, uint64_t new_start,
+                    struct list_head *insert_list, struct list_head *remap_list)
 {
        struct svm_range *head;
        int r = svm_range_split(prange, new_start, prange->last, &head);
 
-       if (!r)
+       if (!r) {
                list_add(&head->list, insert_list);
+               if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
+                       list_add(&head->update_list, remap_list);
+       }
        return r;
 }
 
@@ -2052,7 +2058,7 @@ static int
 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
              uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
              struct list_head *update_list, struct list_head *insert_list,
-             struct list_head *remove_list)
+             struct list_head *remove_list, struct list_head *remap_list)
 {
        unsigned long last = start + size - 1UL;
        struct svm_range_list *svms = &p->svms;
@@ -2068,6 +2074,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
        INIT_LIST_HEAD(insert_list);
        INIT_LIST_HEAD(remove_list);
        INIT_LIST_HEAD(&new_list);
+       INIT_LIST_HEAD(remap_list);
 
        node = interval_tree_iter_first(&svms->objects, start, last);
        while (node) {
@@ -2104,14 +2111,14 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
                        if (node->start < start) {
                                pr_debug("change old range start\n");
                                r = svm_range_split_head(prange, start,
-                                                        insert_list);
+                                                        insert_list, remap_list);
                                if (r)
                                        goto out;
                        }
                        if (node->last > last) {
                                pr_debug("change old range last\n");
                                r = svm_range_split_tail(prange, last,
-                                                        insert_list);
+                                                        insert_list, remap_list);
                                if (r)
                                        goto out;
                        }
@@ -3501,6 +3508,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
        struct list_head update_list;
        struct list_head insert_list;
        struct list_head remove_list;
+       struct list_head remap_list;
        struct svm_range_list *svms;
        struct svm_range *prange;
        struct svm_range *next;
@@ -3532,7 +3540,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
 
        /* Add new range and split existing ranges as needed */
        r = svm_range_add(p, start, size, nattr, attrs, &update_list,
-                         &insert_list, &remove_list);
+                         &insert_list, &remove_list, &remap_list);
        if (r) {
                mutex_unlock(&svms->lock);
                mmap_write_unlock(mm);
@@ -3597,6 +3605,19 @@ out_unlock_range:
                        ret = r;
        }
 
+       list_for_each_entry(prange, &remap_list, update_list) {
+               pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
+                        prange, prange->start, prange->last);
+               mutex_lock(&prange->migrate_mutex);
+               r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
+                                              true, true, prange->mapped_to_gpu);
+               if (r)
+                       pr_debug("failed %d on remap svm range\n", r);
+               mutex_unlock(&prange->migrate_mutex);
+               if (r)
+                       ret = r;
+       }
+
        dynamic_svm_range_dump(svms);
 
        mutex_unlock(&svms->lock);