struct list_head *validated,
                         struct amdgpu_bo_list_entry *entry);
 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+                                 struct amdgpu_vm *vm);
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
 
        r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
 
 error_validate:
-       if (r)
+       if (r) {
+               amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
                ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+       }
 
 error_reserve:
        if (need_mmap_lock)
  **/
 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
 {
+       struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
        unsigned i;
 
+       amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
+
        if (!error) {
                /* Sort the buffer list from the smallest to largest buffer,
                 * which affects the order of buffers in the LRU list.
 
 
                list_add(&entry->tv.head, duplicates);
        }
+
+}
+
+/**
+ * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
+ *
+ * @adev: amdgpu device instance
+ * @vm: vm providing the BOs
+ *
+ * Move the PT BOs to the tail of the LRU.
+ */
+void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
+                                 struct amdgpu_vm *vm)
+{
+       struct ttm_bo_global *glob = adev->mman.bdev.glob;
+       unsigned i;
+
+       spin_lock(&glob->lru_lock);
+       for (i = 0; i <= vm->max_pde_used; ++i) {
+               struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+
+               if (!entry->robj)
+                       continue;
+
+               ttm_bo_move_to_lru_tail(&entry->robj->tbo);
+       }
+       spin_unlock(&glob->lru_lock);
 }
 
 /**