svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
                                    const struct mmu_notifier_range *range,
                                    unsigned long cur_seq);
-
+static int
+svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
+                  uint64_t *bo_s, uint64_t *bo_l);
 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
        .invalidate = svm_range_cpu_invalidate_pagetables,
 };
 
        return -1;
 }
+
 static int
 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
                                unsigned long *start, unsigned long *last)
                  vma->vm_end >> PAGE_SHIFT, *last);
 
        return 0;
+}
+
+static int
+svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
+                          uint64_t *bo_s, uint64_t *bo_l)
+{
+       struct amdgpu_bo_va_mapping *mapping;
+       struct interval_tree_node *node;
+       struct amdgpu_bo *bo = NULL;
+       unsigned long userptr;
+       uint32_t i;
+       int r;
 
+       for (i = 0; i < p->n_pdds; i++) {
+               struct amdgpu_vm *vm;
+
+               if (!p->pdds[i]->drm_priv)
+                       continue;
+
+               vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
+               r = amdgpu_bo_reserve(vm->root.bo, false);
+               if (r)
+                       return r;
+
+               /* Check userptr by searching entire vm->va interval tree */
+               node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
+               while (node) {
+                       mapping = container_of((struct rb_node *)node,
+                                              struct amdgpu_bo_va_mapping, rb);
+                       bo = mapping->bo_va->base.bo;
+
+                       if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
+                                                        start << PAGE_SHIFT,
+                                                        last << PAGE_SHIFT,
+                                                        &userptr)) {
+                               node = interval_tree_iter_next(node, 0, ~0ULL);
+                               continue;
+                       }
+
+                       pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
+                                start, last);
+                       if (bo_s && bo_l) {
+                               *bo_s = userptr >> PAGE_SHIFT;
+                               *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
+                       }
+                       amdgpu_bo_unreserve(vm->root.bo);
+                       return -EADDRINUSE;
+               }
+               amdgpu_bo_unreserve(vm->root.bo);
+       }
+       return 0;
 }
+
 static struct
 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
                                                struct kfd_process *p,
        struct svm_range *prange = NULL;
        unsigned long start, last;
        uint32_t gpuid, gpuidx;
+       uint64_t bo_s = 0;
+       uint64_t bo_l = 0;
+       int r;
 
        if (svm_range_get_range_boundaries(p, addr, &start, &last))
                return NULL;
 
+       r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
+       if (r != -EADDRINUSE)
+               r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
+
+       if (r == -EADDRINUSE) {
+               if (addr >= bo_s && addr <= bo_l)
+                       return NULL;
+
+               /* Create one page svm range if 2MB range overlapping */
+               start = addr;
+               last = addr;
+       }
+
        prange = svm_range_new(&p->svms, start, last);
        if (!prange) {
                pr_debug("Failed to create prange in address [0x%llx]\n", addr);
  * @p: current kfd_process
  * @start: range start address, in pages
  * @last: range last address, in pages
+ * @bo_s: mapping start address in pages if address range already mapped
+ * @bo_l: mapping last address in pages if address range already mapped
  *
  * The purpose is to avoid virtual address ranges already allocated by
  * kfd_ioctl_alloc_memory_of_gpu ioctl.
  * a signal. Release all buffer reservations and return to user-space.
  */
 static int
-svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last)
+svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
+                  uint64_t *bo_s, uint64_t *bo_l)
 {
+       struct amdgpu_bo_va_mapping *mapping;
+       struct interval_tree_node *node;
        uint32_t i;
        int r;
 
                r = amdgpu_bo_reserve(vm->root.bo, false);
                if (r)
                        return r;
-               if (interval_tree_iter_first(&vm->va, start, last)) {
-                       pr_debug("Range [0x%llx 0x%llx] already mapped\n", start, last);
+
+               node = interval_tree_iter_first(&vm->va, start, last);
+               if (node) {
+                       pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
+                                start, last);
+                       mapping = container_of((struct rb_node *)node,
+                                              struct amdgpu_bo_va_mapping, rb);
+                       if (bo_s && bo_l) {
+                               *bo_s = mapping->start;
+                               *bo_l = mapping->last;
+                       }
                        amdgpu_bo_unreserve(vm->root.bo);
                        return -EADDRINUSE;
                }
                start = min(end, vma->vm_end);
        } while (start < end);
 
-       return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT);
+       return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
+                                 NULL);
 }
 
 /**