* later stage when it is scheduled by another ioctl called by
                 * CRIU master process for the target pid for restore.
                 */
-               atomic_inc(&mem->invalid);
+               mutex_lock(&process_info->notifier_lock);
+               mem->invalid++;
+               mutex_unlock(&process_info->notifier_lock);
                mutex_unlock(&process_info->lock);
                return 0;
        }
                        return -ENOMEM;
 
                mutex_init(&info->lock);
+               mutex_init(&info->notifier_lock);
                INIT_LIST_HEAD(&info->vm_list_head);
                INIT_LIST_HEAD(&info->kfd_bo_list);
                INIT_LIST_HEAD(&info->userptr_valid_list);
                }
 
                info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
-               atomic_set(&info->evicted_bos, 0);
                INIT_DELAYED_WORK(&info->restore_userptr_work,
                                  amdgpu_amdkfd_restore_userptr_worker);
 
                put_pid(info->pid);
 create_evict_fence_fail:
                mutex_destroy(&info->lock);
+               mutex_destroy(&info->notifier_lock);
                kfree(info);
        }
        return ret;
                cancel_delayed_work_sync(&process_info->restore_userptr_work);
                put_pid(process_info->pid);
                mutex_destroy(&process_info->lock);
+               mutex_destroy(&process_info->notifier_lock);
                kfree(process_info);
        }
 }
 
        mutex_lock(&pinfo->lock);
        pr_debug("scheduling work\n");
-       atomic_inc(&pinfo->evicted_bos);
+       mutex_lock(&pinfo->notifier_lock);
+       pinfo->evicted_bos++;
+       mutex_unlock(&pinfo->notifier_lock);
        if (!READ_ONCE(pinfo->block_mmu_notifications)) {
                ret = -EINVAL;
                goto out_unlock;
        list_del(&bo_list_entry->head);
        mutex_unlock(&process_info->lock);
 
-       /* No more MMU notifiers */
-       amdgpu_hmm_unregister(mem->bo);
+       /* Cleanup user pages and MMU notifiers */
+       if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
+               amdgpu_hmm_unregister(mem->bo);
+               mutex_lock(&process_info->notifier_lock);
+               amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+               mutex_unlock(&process_info->notifier_lock);
+       }
 
        ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
        if (unlikely(ret))
         */
        mutex_lock(&mem->process_info->lock);
 
+       /* Lock notifier lock. If we find an invalid userptr BO, we can be
+        * sure that the MMU notifier is no longer running
+        * concurrently and the queues are actually stopped
+        */
+       if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+               mutex_lock(&mem->process_info->notifier_lock);
+               is_invalid_userptr = !!mem->invalid;
+               mutex_unlock(&mem->process_info->notifier_lock);
+       }
+
        mutex_lock(&mem->lock);
 
        domain = mem->domain;
  *
  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
  * cannot do any memory allocations, and cannot take any locks that
- * are held elsewhere while allocating memory. Therefore this is as
- * simple as possible, using atomic counters.
+ * are held elsewhere while allocating memory.
  *
  * It doesn't do anything to the BO itself. The real work happens in
  * restore, where we get updated page addresses. This function only
  * ensures that GPU access to the BO is stopped.
  */
-int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
-                               struct mm_struct *mm)
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+                               unsigned long cur_seq, struct kgd_mem *mem)
 {
        struct amdkfd_process_info *process_info = mem->process_info;
-       int evicted_bos;
        int r = 0;
 
-       /* Do not process MMU notifications until stage-4 IOCTL is received */
+       /* Do not process MMU notifications during CRIU restore until
+        * KFD_CRIU_OP_RESUME IOCTL is received
+        */
        if (READ_ONCE(process_info->block_mmu_notifications))
                return 0;
 
-       atomic_inc(&mem->invalid);
-       evicted_bos = atomic_inc_return(&process_info->evicted_bos);
-       if (evicted_bos == 1) {
+       mutex_lock(&process_info->notifier_lock);
+       mmu_interval_set_seq(mni, cur_seq);
+
+       mem->invalid++;
+       if (++process_info->evicted_bos == 1) {
                /* First eviction, stop the queues */
-               r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
+               r = kgd2kfd_quiesce_mm(mni->mm,
+                                      KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
                if (r)
                        pr_err("Failed to quiesce KFD\n");
                schedule_delayed_work(&process_info->restore_userptr_work,
                        msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
        }
+       mutex_unlock(&process_info->notifier_lock);
 
        return r;
 }
        struct kgd_mem *mem, *tmp_mem;
        struct amdgpu_bo *bo;
        struct ttm_operation_ctx ctx = { false, false };
-       int invalid, ret;
+       uint32_t invalid;
+       int ret = 0;
 
-       /* Move all invalidated BOs to the userptr_inval_list and
-        * release their user pages by migration to the CPU domain
-        */
+       mutex_lock(&process_info->notifier_lock);
+
+       /* Move all invalidated BOs to the userptr_inval_list */
        list_for_each_entry_safe(mem, tmp_mem,
                                 &process_info->userptr_valid_list,
-                                validate_list.head) {
-               if (!atomic_read(&mem->invalid))
-                       continue; /* BO is still valid */
-
-               bo = mem->bo;
-
-               if (amdgpu_bo_reserve(bo, true))
-                       return -EAGAIN;
-               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
-               ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-               amdgpu_bo_unreserve(bo);
-               if (ret) {
-                       pr_err("%s: Failed to invalidate userptr BO\n",
-                              __func__);
-                       return -EAGAIN;
-               }
-
-               list_move_tail(&mem->validate_list.head,
-                              &process_info->userptr_inval_list);
-       }
-
-       if (list_empty(&process_info->userptr_inval_list))
-               return 0; /* All evicted userptr BOs were freed */
+                                validate_list.head)
+               if (mem->invalid)
+                       list_move_tail(&mem->validate_list.head,
+                                      &process_info->userptr_inval_list);
 
        /* Go through userptr_inval_list and update any invalid user_pages */
        list_for_each_entry(mem, &process_info->userptr_inval_list,
                            validate_list.head) {
-               struct hmm_range *range;
-
-               invalid = atomic_read(&mem->invalid);
+               invalid = mem->invalid;
                if (!invalid)
                        /* BO hasn't been invalidated since the last
-                        * revalidation attempt. Keep its BO list.
+                        * revalidation attempt. Keep its page list.
                         */
                        continue;
 
                bo = mem->bo;
 
+               amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+               mem->range = NULL;
+
+               /* BO reservations and getting user pages (hmm_range_fault)
+                * must happen outside the notifier lock
+                */
+               mutex_unlock(&process_info->notifier_lock);
+
+               /* Move the BO to system (CPU) domain if necessary to unmap
+                * and free the SG table
+                */
+               if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
+                       if (amdgpu_bo_reserve(bo, true))
+                               return -EAGAIN;
+                       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+                       ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+                       amdgpu_bo_unreserve(bo);
+                       if (ret) {
+                               pr_err("%s: Failed to invalidate userptr BO\n",
+                                      __func__);
+                               return -EAGAIN;
+                       }
+               }
+
                /* Get updated user pages */
                ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
-                                                  &range);
+                                                  &mem->range);
                if (ret) {
                        pr_debug("Failed %d to get user pages\n", ret);
 
                         */
                        if (ret != -EFAULT)
                                return ret;
-               } else {
 
-                       /*
-                        * FIXME: Cannot ignore the return code, must hold
-                        * notifier_lock
-                        */
-                       amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+                       ret = 0;
                }
 
+               mutex_lock(&process_info->notifier_lock);
+
                /* Mark the BO as valid unless it was invalidated
                 * again concurrently.
                 */
-               if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
-                       return -EAGAIN;
+               if (mem->invalid != invalid) {
+                       ret = -EAGAIN;
+                       goto unlock_out;
+               }
+               mem->invalid = 0;
        }
 
-       return 0;
+unlock_out:
+       mutex_unlock(&process_info->notifier_lock);
+
+       return ret;
 }
 
 /* Validate invalid userptr BOs
  *
- * Validates BOs on the userptr_inval_list, and moves them back to the
- * userptr_valid_list. Also updates GPUVM page tables with new page
- * addresses and waits for the page table updates to complete.
+ * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
+ * with new page addresses and waits for the page table updates to complete.
  */
 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
 {
                        }
                }
 
-               list_move_tail(&mem->validate_list.head,
-                              &process_info->userptr_valid_list);
-
                /* Update mapping. If the BO was not validated
                 * (because we couldn't get user pages), this will
                 * clear the page table entries, which will result in
                        if (ret) {
                                pr_err("%s: update PTE failed\n", __func__);
                                /* make sure this gets validated again */
-                               atomic_inc(&mem->invalid);
+                               mutex_lock(&process_info->notifier_lock);
+                               mem->invalid++;
+                               mutex_unlock(&process_info->notifier_lock);
                                goto unreserve_out;
                        }
                }
        return ret;
 }
 
+/* Confirm that all user pages are valid while holding the notifier lock
+ *
+ * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
+ */
+static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
+{
+       struct kgd_mem *mem, *tmp_mem;
+       int ret = 0;
+
+       list_for_each_entry_safe(mem, tmp_mem,
+                                &process_info->userptr_inval_list,
+                                validate_list.head) {
+               bool valid = amdgpu_ttm_tt_get_user_pages_done(
+                               mem->bo->tbo.ttm, mem->range);
+
+               mem->range = NULL;
+               if (!valid) {
+                       WARN(!mem->invalid, "Invalid BO not marked invalid");
+                       ret = -EAGAIN;
+                       continue;
+               }
+               WARN(mem->invalid, "Valid BO is marked invalid");
+
+               list_move_tail(&mem->validate_list.head,
+                              &process_info->userptr_valid_list);
+       }
+
+       return ret;
+}
+
 /* Worker callback to restore evicted userptr BOs
  *
  * Tries to update and validate all userptr BOs. If successful and no
                             restore_userptr_work);
        struct task_struct *usertask;
        struct mm_struct *mm;
-       int evicted_bos;
+       uint32_t evicted_bos;
 
-       evicted_bos = atomic_read(&process_info->evicted_bos);
+       mutex_lock(&process_info->notifier_lock);
+       evicted_bos = process_info->evicted_bos;
+       mutex_unlock(&process_info->notifier_lock);
        if (!evicted_bos)
                return;
 
         * and we can just restart the queues.
         */
        if (!list_empty(&process_info->userptr_inval_list)) {
-               if (atomic_read(&process_info->evicted_bos) != evicted_bos)
-                       goto unlock_out; /* Concurrent eviction, try again */
-
                if (validate_invalid_user_pages(process_info))
                        goto unlock_out;
        }
         * be a first eviction that calls quiesce_mm. The eviction
         * reference counting inside KFD will handle this case.
         */
-       if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
-           evicted_bos)
-               goto unlock_out;
-       evicted_bos = 0;
+       mutex_lock(&process_info->notifier_lock);
+       if (process_info->evicted_bos != evicted_bos)
+               goto unlock_notifier_out;
+
+       if (confirm_valid_user_pages_locked(process_info)) {
+               WARN(1, "User pages unexpectedly invalid");
+               goto unlock_notifier_out;
+       }
+
+       process_info->evicted_bos = evicted_bos = 0;
+
        if (kgd2kfd_resume_mm(mm)) {
                pr_err("%s: Failed to resume KFD\n", __func__);
                /* No recovery from this failure. Probably the CP is
                 */
        }
 
+unlock_notifier_out:
+       mutex_unlock(&process_info->notifier_lock);
 unlock_out:
        mutex_unlock(&process_info->lock);