list_add(&entry->tv.head, validated);
 }
 
+/**
+ * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
+ *
+ * @adev: amdgpu device pointer
+ * @vm: vm providing the BOs
+ *
+ * Move all BOs to the end of LRU and remember their positions to put them
+ * together.
+ */
+void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
+                               struct amdgpu_vm *vm)
+{
+       struct ttm_bo_global *glob = adev->mman.bdev.glob;
+       struct amdgpu_vm_bo_base *bo_base;
+
+       if (vm->bulk_moveable) {
+               spin_lock(&glob->lru_lock);
+               ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
+               spin_unlock(&glob->lru_lock);
+               return;
+       }
+
+       memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
+
+       spin_lock(&glob->lru_lock);
+       list_for_each_entry(bo_base, &vm->idle, vm_status) {
+               struct amdgpu_bo *bo = bo_base->bo;
+
+               if (!bo->parent)
+                       continue;
+
+               ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
+               if (bo->shadow)
+                       ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
+                                               &vm->lru_bulk_move);
+       }
+       spin_unlock(&glob->lru_lock);
+
+       vm->bulk_moveable = true;
+}
+
 /**
  * amdgpu_vm_validate_pt_bos - validate the page table BOs
  *
                              int (*validate)(void *p, struct amdgpu_bo *bo),
                              void *param)
 {
-       struct ttm_bo_global *glob = adev->mman.bdev.glob;
        struct amdgpu_vm_bo_base *bo_base, *tmp;
        int r = 0;
 
+       vm->bulk_moveable &= list_empty(&vm->evicted);
+
        list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
 
                if (r)
                        break;
 
-               if (bo->parent) {
-                       spin_lock(&glob->lru_lock);
-                       ttm_bo_move_to_lru_tail(&bo->tbo, NULL);
-                       if (bo->shadow)
-                               ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL);
-                       spin_unlock(&glob->lru_lock);
-               }
-
                if (bo->tbo.type != ttm_bo_type_kernel) {
                        spin_lock(&vm->moved_lock);
                        list_move(&bo_base->vm_status, &vm->moved);
                }
        }
 
-       spin_lock(&glob->lru_lock);
-       list_for_each_entry(bo_base, &vm->idle, vm_status) {
-               struct amdgpu_bo *bo = bo_base->bo;
-
-               if (!bo->parent)
-                       continue;
-
-               ttm_bo_move_to_lru_tail(&bo->tbo, NULL);
-               if (bo->shadow)
-                       ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL);
-       }
-       spin_unlock(&glob->lru_lock);
-
        return r;
 }
 
                return r;
 
        vm->pte_support_ats = false;
+       vm->bulk_moveable = true;
 
        if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
                vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
 
 #include <linux/rbtree.h>
 #include <drm/gpu_scheduler.h>
 #include <drm/drm_file.h>
+#include <drm/ttm/ttm_bo_driver.h>
 
 #include "amdgpu_sync.h"
 #include "amdgpu_ring.h"
 
        /* Some basic info about the task */
        struct amdgpu_task_info task_info;
+
+       /* Store positions of group of BOs */
+       struct ttm_lru_bulk_move lru_bulk_move;
+       /* mark whether can do the bulk move */
+       bool                    bulk_moveable;
 };
 
 struct amdgpu_vm_manager {
 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
 
 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
-                        struct amdgpu_task_info *task_info);
+                            struct amdgpu_task_info *task_info);
 
 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
 
+void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
+                               struct amdgpu_vm *vm);
+
 #endif