goto out_free;
        }
        if (cpages != npages)
-               pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+               pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
                         cpages, npages);
        else
-               pr_debug("0x%lx pages migrated\n", cpages);
+               pr_debug("0x%lx pages collected\n", cpages);
 
        r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
        migrate_vma_pages(&migrate);
  * svm_migrate_ram_to_vram - migrate svm range from system to device
  * @prange: range structure
  * @best_loc: the device to migrate to
+ * @start_mgr: start page to migrate
+ * @last_mgr: last page to migrate
  * @mm: the process mm structure
  * @trigger: reason of migration
  *
  */
 static int
 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+                       unsigned long start_mgr, unsigned long last_mgr,
                        struct mm_struct *mm, uint32_t trigger)
 {
        unsigned long addr, start, end;
        unsigned long cpages = 0;
        long r = 0;
 
-       if (prange->actual_loc == best_loc) {
-               pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
-                        prange->svms, prange->start, prange->last, best_loc);
-               return 0;
+       if (start_mgr < prange->start || last_mgr > prange->last) {
+               pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
+                        start_mgr, last_mgr, prange->start, prange->last);
+               return -EFAULT;
        }
 
        node = svm_range_get_node_by_id(prange, best_loc);
                return -ENODEV;
        }
 
-       pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
-                prange->start, prange->last, best_loc);
+       pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
+               prange->svms, start_mgr, last_mgr, prange->start, prange->last,
+               best_loc);
 
-       start = prange->start << PAGE_SHIFT;
-       end = (prange->last + 1) << PAGE_SHIFT;
+       start = start_mgr << PAGE_SHIFT;
+       end = (last_mgr + 1) << PAGE_SHIFT;
 
        r = svm_range_vram_node_new(node, prange, true);
        if (r) {
                dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
                return r;
        }
-       ttm_res_offset = prange->offset << PAGE_SHIFT;
+       ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
 
        for (addr = start; addr < end;) {
                unsigned long next;
 
        if (cpages) {
                prange->actual_loc = best_loc;
-               svm_range_dma_unmap(prange);
-       } else {
+               prange->vram_pages = prange->vram_pages + cpages;
+       } else if (!prange->actual_loc) {
+               /* if no page migrated and all pages from prange are at
+                * sys ram drop svm_bo got from svm_range_vram_node_new
+                */
                svm_range_vram_node_free(prange);
        }
 
  * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
  *
  * Return:
- *   0 - success with all pages migrated
  *   negative values - indicate error
- *   positive values - partial migration, number of pages not migrated
+ *   positive values or zero - number of pages got migrated
  */
 static long
 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
        uint64_t npages = (end - start) >> PAGE_SHIFT;
        unsigned long upages = npages;
        unsigned long cpages = 0;
+       unsigned long mpages = 0;
        struct amdgpu_device *adev = node->adev;
        struct kfd_process_device *pdd;
        struct dma_fence *mfence = NULL;
                goto out_free;
        }
        if (cpages != npages)
-               pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+               pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
                         cpages, npages);
        else
-               pr_debug("0x%lx pages migrated\n", cpages);
+               pr_debug("0x%lx pages collected\n", cpages);
 
        r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
                                    scratch, npages);
        kvfree(buf);
 out:
        if (!r && cpages) {
+               mpages = cpages - upages;
                pdd = svm_range_get_pdd_by_node(prange, node);
                if (pdd)
-                       WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
+                       WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
        }
-       return r ? r : upages;
+
+       return r ? r : mpages;
 }
 
 /**
  * svm_migrate_vram_to_ram - migrate svm range from device to system
  * @prange: range structure
  * @mm: process mm, use current->mm if NULL
+ * @start_mgr: start page need be migrated to sys ram
+ * @last_mgr: last page need be migrated to sys ram
  * @trigger: reason of migration
  * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
  *
  * 0 - OK, otherwise error code
  */
 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
+                           unsigned long start_mgr, unsigned long last_mgr,
                            uint32_t trigger, struct page *fault_page)
 {
        struct kfd_node *node;
        unsigned long addr;
        unsigned long start;
        unsigned long end;
-       unsigned long upages = 0;
+       unsigned long mpages = 0;
        long r = 0;
 
+       /* this pragne has no any vram page to migrate to sys ram */
        if (!prange->actual_loc) {
                pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
                         prange->start, prange->last);
                return 0;
        }
 
+       if (start_mgr < prange->start || last_mgr > prange->last) {
+               pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
+                        start_mgr, last_mgr, prange->start, prange->last);
+               return -EFAULT;
+       }
+
        node = svm_range_get_node_by_id(prange, prange->actual_loc);
        if (!node) {
                pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
                return -ENODEV;
        }
        pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
-                prange->svms, prange, prange->start, prange->last,
+                prange->svms, prange, start_mgr, last_mgr,
                 prange->actual_loc);
 
-       start = prange->start << PAGE_SHIFT;
-       end = (prange->last + 1) << PAGE_SHIFT;
+       start = start_mgr << PAGE_SHIFT;
+       end = (last_mgr + 1) << PAGE_SHIFT;
 
        for (addr = start; addr < end;) {
                unsigned long next;
                        pr_debug("failed %ld to migrate prange %p\n", r, prange);
                        break;
                } else {
-                       upages += r;
+                       mpages += r;
                }
                addr = next;
        }
 
-       if (r >= 0 && !upages) {
-               svm_range_vram_node_free(prange);
-               prange->actual_loc = 0;
+       if (r >= 0) {
+               prange->vram_pages -= mpages;
+
+               /* prange does not have vram page set its actual_loc to system
+                * and drop its svm_bo ref
+                */
+               if (prange->vram_pages == 0 && prange->ttm_res) {
+                       prange->actual_loc = 0;
+                       svm_range_vram_node_free(prange);
+               }
        }
 
        return r < 0 ? r : 0;
  * svm_migrate_vram_to_vram - migrate svm range from device to device
  * @prange: range structure
  * @best_loc: the device to migrate to
+ * @start: start page need be migrated to sys ram
+ * @last: last page need be migrated to sys ram
  * @mm: process mm, use current->mm if NULL
  * @trigger: reason of migration
  *
  * Context: Process context, caller hold mmap read lock, svms lock, prange lock
  *
+ * migrate all vram pages in prange to sys ram, then migrate
+ * [start, last] pages from sys ram to gpu node best_loc.
+ *
  * Return:
  * 0 - OK, otherwise error code
  */
 static int
 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
-                        struct mm_struct *mm, uint32_t trigger)
+                       unsigned long start, unsigned long last,
+                       struct mm_struct *mm, uint32_t trigger)
 {
        int r, retries = 3;
 
        pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
 
        do {
-               r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
+               r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
+                                           trigger, NULL);
                if (r)
                        return r;
        } while (prange->actual_loc && --retries);
        if (prange->actual_loc)
                return -EDEADLK;
 
-       return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
+       return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
 }
 
 int
 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
+                   unsigned long start, unsigned long last,
                    struct mm_struct *mm, uint32_t trigger)
 {
-       if  (!prange->actual_loc)
-               return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
+       if  (!prange->actual_loc || prange->actual_loc == best_loc)
+               return svm_migrate_ram_to_vram(prange, best_loc, start, last,
+                                              mm, trigger);
+
        else
-               return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
+               return svm_migrate_vram_to_vram(prange, best_loc, start, last,
+                                               mm, trigger);
 
 }
 
  */
 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
 {
+       unsigned long start, last, size;
        unsigned long addr = vmf->address;
        struct svm_range_bo *svm_bo;
-       enum svm_work_list_ops op;
-       struct svm_range *parent;
        struct svm_range *prange;
        struct kfd_process *p;
        struct mm_struct *mm;
 
        mutex_lock(&p->svms.lock);
 
-       prange = svm_range_from_addr(&p->svms, addr, &parent);
+       prange = svm_range_from_addr(&p->svms, addr, NULL);
        if (!prange) {
                pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
                r = -EFAULT;
                goto out_unlock_svms;
        }
 
-       mutex_lock(&parent->migrate_mutex);
-       if (prange != parent)
-               mutex_lock_nested(&prange->migrate_mutex, 1);
+       mutex_lock(&prange->migrate_mutex);
 
        if (!prange->actual_loc)
                goto out_unlock_prange;
 
-       svm_range_lock(parent);
-       if (prange != parent)
-               mutex_lock_nested(&prange->lock, 1);
-       r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
-       if (prange != parent)
-               mutex_unlock(&prange->lock);
-       svm_range_unlock(parent);
-       if (r) {
-               pr_debug("failed %d to split range by granularity\n", r);
-               goto out_unlock_prange;
-       }
+       /* Align migration range start and size to granularity size */
+       size = 1UL << prange->granularity;
+       start = max(ALIGN_DOWN(addr, size), prange->start);
+       last = min(ALIGN(addr + 1, size) - 1, prange->last);
 
-       r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
-                                   KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
-                                   vmf->page);
+       r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
+                                   KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
        if (r)
                pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
-                        r, prange->svms, prange, prange->start, prange->last);
-
-       /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
-       if (p->xnack_enabled && parent == prange)
-               op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
-       else
-               op = SVM_OP_UPDATE_RANGE_NOTIFIER;
-       svm_range_add_list_work(&p->svms, parent, mm, op);
-       schedule_deferred_list_work(&p->svms);
+                       r, prange->svms, prange, start, last);
 
 out_unlock_prange:
-       if (prange != parent)
-               mutex_unlock(&prange->migrate_mutex);
-       mutex_unlock(&parent->migrate_mutex);
+       mutex_unlock(&prange->migrate_mutex);
 out_unlock_svms:
        mutex_unlock(&p->svms.lock);
 out_unref_process:
 
 static int
 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
                      unsigned long offset, unsigned long npages,
-                     unsigned long *hmm_pfns, uint32_t gpuidx)
+                     unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages)
 {
        enum dma_data_direction dir = DMA_BIDIRECTIONAL;
        dma_addr_t *addr = prange->dma_addr[gpuidx];
        struct device *dev = adev->dev;
        struct page *page;
+       uint64_t vram_pages_dev;
        int i, r;
 
        if (!addr) {
                prange->dma_addr[gpuidx] = addr;
        }
 
+       vram_pages_dev = 0;
        addr += offset;
        for (i = 0; i < npages; i++) {
                if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
                if (is_zone_device_page(page)) {
                        struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
 
+                       vram_pages_dev++;
                        addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
                                   bo_adev->vm_manager.vram_base_offset -
                                   bo_adev->kfd.pgmap.range.start;
                pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
                                     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
        }
+       *vram_pages = vram_pages_dev;
        return 0;
 }
 
 static int
 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
                  unsigned long offset, unsigned long npages,
-                 unsigned long *hmm_pfns)
+                 unsigned long *hmm_pfns, uint64_t *vram_pages)
 {
        struct kfd_process *p;
        uint32_t gpuidx;
                }
 
                r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
-                                         hmm_pfns, gpuidx);
+                                         hmm_pfns, gpuidx, vram_pages);
                if (r)
                        break;
        }
        INIT_LIST_HEAD(&prange->child_list);
        atomic_set(&prange->invalid, 0);
        prange->validate_timestamp = 0;
+       prange->vram_pages = 0;
        mutex_init(&prange->migrate_mutex);
        mutex_init(&prange->lock);
 
                         prange->start, prange->last);
                mutex_lock(&prange->lock);
                prange->svm_bo = NULL;
+               /* prange should not hold vram page now */
+               WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
                mutex_unlock(&prange->lock);
 
                spin_lock(&svm_bo->list_lock);
        new->svm_bo = svm_range_bo_ref(old->svm_bo);
        new->ttm_res = old->ttm_res;
 
+       /* set new's vram_pages as old range's now, the acurate vram_pages
+        * will be updated during mapping
+        */
+       new->vram_pages = min(old->vram_pages, new->npages);
+
        spin_lock(&new->svm_bo->list_lock);
        list_add(&new->svm_bo_list, &new->svm_bo->range_list);
        spin_unlock(&new->svm_bo->list_lock);
        list_add_tail(&pchild->child_list, &prange->child_list);
 }
 
-/**
- * svm_range_split_by_granularity - collect ranges within granularity boundary
- *
- * @p: the process with svms list
- * @mm: mm structure
- * @addr: the vm fault address in pages, to split the prange
- * @parent: parent range if prange is from child list
- * @prange: prange to split
- *
- * Trims @prange to be a single aligned block of prange->granularity if
- * possible. The head and tail are added to the child_list in @parent.
- *
- * Context: caller must hold mmap_read_lock and prange->lock
- *
- * Return:
- * 0 - OK, otherwise error code
- */
-int
-svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
-                              unsigned long addr, struct svm_range *parent,
-                              struct svm_range *prange)
-{
-       struct svm_range *head, *tail;
-       unsigned long start, last, size;
-       int r;
-
-       /* Align splited range start and size to granularity size, then a single
-        * PTE will be used for whole range, this reduces the number of PTE
-        * updated and the L1 TLB space used for translation.
-        */
-       size = 1UL << prange->granularity;
-       start = ALIGN_DOWN(addr, size);
-       last = ALIGN(addr + 1, size) - 1;
-
-       pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
-                prange->svms, prange->start, prange->last, start, last, size);
-
-       if (start > prange->start) {
-               r = svm_range_split(prange, start, prange->last, &head);
-               if (r)
-                       return r;
-               svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
-       }
-
-       if (last < prange->last) {
-               r = svm_range_split(prange, prange->start, last, &tail);
-               if (r)
-                       return r;
-               svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
-       }
-
-       /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
-       if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
-               prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
-               pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
-                        prange, prange->start, prange->last,
-                        SVM_OP_ADD_RANGE_AND_MAP);
-       }
-       return 0;
-}
 static bool
 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
 {
  * 5. Release page table (and SVM BO) reservation
  */
 static int svm_range_validate_and_map(struct mm_struct *mm,
+                                     unsigned long map_start, unsigned long map_last,
                                      struct svm_range *prange, int32_t gpuidx,
                                      bool intr, bool wait, bool flush_tlb)
 {
        struct svm_validate_context *ctx;
        unsigned long start, end, addr;
        struct kfd_process *p;
+       uint64_t vram_pages;
        void *owner;
        int32_t idx;
        int r = 0;
                }
        }
 
+       vram_pages = 0;
        start = prange->start << PAGE_SHIFT;
        end = (prange->last + 1) << PAGE_SHIFT;
        for (addr = start; !r && addr < end; ) {
                struct hmm_range *hmm_range;
+               unsigned long map_start_vma;
+               unsigned long map_last_vma;
                struct vm_area_struct *vma;
+               uint64_t vram_pages_vma;
                unsigned long next = 0;
                unsigned long offset;
                unsigned long npages;
                if (!r) {
                        offset = (addr - start) >> PAGE_SHIFT;
                        r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
-                                             hmm_range->hmm_pfns);
+                                             hmm_range->hmm_pfns, &vram_pages_vma);
                        if (r)
                                pr_debug("failed %d to dma map range\n", r);
+                       else
+                               vram_pages += vram_pages_vma;
                }
 
                svm_range_lock(prange);
                        r = -EAGAIN;
                }
 
-               if (!r)
-                       r = svm_range_map_to_gpus(prange, offset, npages, readonly,
-                                                 ctx->bitmap, wait, flush_tlb);
+               if (!r) {
+                       map_start_vma = max(map_start, prange->start + offset);
+                       map_last_vma = min(map_last, prange->start + offset + npages - 1);
+                       if (map_start_vma <= map_last_vma) {
+                               offset = map_start_vma - prange->start;
+                               npages = map_last_vma - map_start_vma + 1;
+                               r = svm_range_map_to_gpus(prange, offset, npages, readonly,
+                                                         ctx->bitmap, wait, flush_tlb);
+                       }
+               }
 
                if (!r && next == end)
                        prange->mapped_to_gpu = true;
                addr = next;
        }
 
+       if (addr == end) {
+               prange->vram_pages = vram_pages;
+
+               /* if prange does not include any vram page and it
+                * has not released svm_bo drop its svm_bo reference
+                * and set its actaul_loc to sys ram
+                */
+               if (!vram_pages && prange->ttm_res) {
+                       prange->actual_loc = 0;
+                       svm_range_vram_node_free(prange);
+               }
+       }
+
        svm_range_unreserve_bos(ctx);
        if (!r)
                prange->validate_timestamp = ktime_get_boottime();
                 */
                mutex_lock(&prange->migrate_mutex);
 
-               r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
-                                              false, true, false);
+               r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+                                              MAX_GPU_INSTANCE, false, true, false);
                if (r)
                        pr_debug("failed %d to map 0x%lx to gpus\n", r,
                                 prange->start);
        new->actual_loc = old->actual_loc;
        new->granularity = old->granularity;
        new->mapped_to_gpu = old->mapped_to_gpu;
+       new->vram_pages = old->vram_pages;
        bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
        bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
 
                        uint32_t vmid, uint32_t node_id,
                        uint64_t addr, bool write_fault)
 {
+       unsigned long start, last, size;
        struct mm_struct *mm = NULL;
        struct svm_range_list *svms;
        struct svm_range *prange;
        kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
                                       write_fault, timestamp);
 
-       if (prange->actual_loc != best_loc) {
+       /* Align migration range start and size to granularity size */
+       size = 1UL << prange->granularity;
+       start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
+       last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
+       if (prange->actual_loc != 0 || best_loc != 0) {
                migration = true;
+
                if (best_loc) {
-                       r = svm_migrate_to_vram(prange, best_loc, mm,
-                                       KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+                       r = svm_migrate_to_vram(prange, best_loc, start, last,
+                                       mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
                        if (r) {
                                pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
                                         r, addr);
                                /* Fallback to system memory if migration to
                                 * VRAM failed
                                 */
-                               if (prange->actual_loc)
-                                       r = svm_migrate_vram_to_ram(prange, mm,
-                                          KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
-                                          NULL);
+                               if (prange->actual_loc && prange->actual_loc != best_loc)
+                                       r = svm_migrate_vram_to_ram(prange, mm, start, last,
+                                               KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
                                else
                                        r = 0;
                        }
                } else {
-                       r = svm_migrate_vram_to_ram(prange, mm,
-                                       KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
-                                       NULL);
+                       r = svm_migrate_vram_to_ram(prange, mm, start, last,
+                                       KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
                }
                if (r) {
                        pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
-                                r, svms, prange->start, prange->last);
+                                r, svms, start, last);
                        goto out_unlock_range;
                }
        }
 
-       r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
+       r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
+                                      false, false);
        if (r)
                pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
-                        r, svms, prange->start, prange->last);
+                        r, svms, start, last);
 
        kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
                                     migration);
        *migrated = false;
        best_loc = svm_range_best_prefetch_location(prange);
 
-       if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
-           best_loc == prange->actual_loc)
+       /* when best_loc is a gpu node and same as prange->actual_loc
+        * we still need do migration as prange->actual_loc !=0 does
+        * not mean all pages in prange are vram. hmm migrate will pick
+        * up right pages during migration.
+        */
+       if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
+           (best_loc == 0 && prange->actual_loc == 0))
                return 0;
 
        if (!best_loc) {
-               r = svm_migrate_vram_to_ram(prange, mm,
+               r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
                                        KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
                *migrated = !r;
                return r;
        }
 
-       r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+       r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
+                               mm, KFD_MIGRATE_TRIGGER_PREFETCH);
        *migrated = !r;
 
        return r;
 
                mutex_lock(&prange->migrate_mutex);
                do {
+                       /* migrate all vram pages in this prange to sys ram
+                        * after that prange->actual_loc should be zero
+                        */
                        r = svm_migrate_vram_to_ram(prange, mm,
+                                       prange->start, prange->last,
                                        KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
                } while (!r && prange->actual_loc && --retries);
 
 
                flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
 
-               r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
-                                              true, true, flush_tlb);
+               r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+                                              MAX_GPU_INSTANCE, true, true, flush_tlb);
                if (r)
                        pr_debug("failed %d to map svm range\n", r);
 
                pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
                         prange, prange->start, prange->last);
                mutex_lock(&prange->migrate_mutex);
-               r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
-                                              true, true, prange->mapped_to_gpu);
+               r = svm_range_validate_and_map(mm,  prange->start, prange->last, prange,
+                                              MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
                if (r)
                        pr_debug("failed %d on remap svm range\n", r);
                mutex_unlock(&prange->migrate_mutex);