return lret;
 }
 
+/**
+ * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
+ * up in system memory.
+ * @bo: The buffer object to prepare.
+ *
+ * On successful completion, the object backup pages are allocated. Expectation
+ * is that this is called from the PM notifier, prior to suspend/hibernation.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
+{
+       struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+       struct xe_bo *backup;
+       int ret = 0;
+
+       xe_bo_lock(bo, false);
+
+       xe_assert(xe, !bo->backup_obj);
+
+       /*
+        * Since this is called from the PM notifier we might have raced with
+        * someone unpinning this after we dropped the pinned list lock and
+        * grabbing the above bo lock.
+        */
+       if (!xe_bo_is_pinned(bo))
+               goto out_unlock_bo;
+
+       if (!xe_bo_is_vram(bo))
+               goto out_unlock_bo;
+
+       if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+               goto out_unlock_bo;
+
+       backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+                                       DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+                                       XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+                                       XE_BO_FLAG_PINNED);
+       if (IS_ERR(backup)) {
+               ret = PTR_ERR(backup);
+               goto out_unlock_bo;
+       }
+
+       backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+       ttm_bo_pin(&backup->ttm);
+       bo->backup_obj = backup;
+
+out_unlock_bo:
+       xe_bo_unlock(bo);
+       return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
+ * @bo: The buffer object to undo the prepare for.
+ *
+ * Always returns 0. The backup object is removed, if still present. Expectation
+ * it that this called from the PM notifier when undoing the prepare step.
+ *
+ * Return: Always returns 0.
+ */
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
+{
+       xe_bo_lock(bo, false);
+       if (bo->backup_obj) {
+               ttm_bo_unpin(&bo->backup_obj->ttm);
+               xe_bo_put(bo->backup_obj);
+               bo->backup_obj = NULL;
+       }
+       xe_bo_unlock(bo);
+
+       return 0;
+}
+
 /**
  * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
  * @bo: The buffer object to move.
 int xe_bo_evict_pinned(struct xe_bo *bo)
 {
        struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
-       struct xe_bo *backup;
+       struct xe_bo *backup = bo->backup_obj;
+       bool backup_created = false;
        bool unmap = false;
        int ret = 0;
 
        if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
                goto out_unlock_bo;
 
-       backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
-                                       DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
-                                       XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
-                                       XE_BO_FLAG_PINNED);
-       if (IS_ERR(backup)) {
-               ret = PTR_ERR(backup);
-               goto out_unlock_bo;
+       if (!backup) {
+               backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+                                               DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+                                               XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+                                               XE_BO_FLAG_PINNED);
+               if (IS_ERR(backup)) {
+                       ret = PTR_ERR(backup);
+                       goto out_unlock_bo;
+               }
+               backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+               backup_created = true;
        }
-       backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
 
        if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
                struct xe_migrate *migrate;
                                   bo->size);
        }
 
-       bo->backup_obj = backup;
+       if (!bo->backup_obj)
+               bo->backup_obj = backup;
 
 out_backup:
        xe_bo_vunmap(backup);
-       if (ret)
+       if (ret && backup_created)
                xe_bo_put(backup);
 out_unlock_bo:
        if (unmap)
 
        xe_bo_lock(bo, false);
 
-       ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
-       if (ret)
-               goto out_backup;
+       if (!xe_bo_is_pinned(backup)) {
+               ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
+               if (ret)
+                       goto out_unlock_bo;
+       }
 
        if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
                struct xe_migrate *migrate;
                if (iosys_map_is_null(&bo->vmap)) {
                        ret = xe_bo_vmap(bo);
                        if (ret)
-                               goto out_unlock_bo;
+                               goto out_backup;
                        unmap = true;
                }
 
 
 out_backup:
        xe_bo_vunmap(backup);
-       if (!bo->backup_obj)
+       if (!bo->backup_obj) {
+               if (xe_bo_is_pinned(backup))
+                       ttm_bo_unpin(&backup->ttm);
                xe_bo_put(backup);
+       }
 out_unlock_bo:
        if (unmap)
                xe_bo_vunmap(bo);
                xe_assert(xe, !list_empty(&bo->pinned_link));
                list_del_init(&bo->pinned_link);
                spin_unlock(&xe->pinned.lock);
+
+               if (bo->backup_obj) {
+                       if (xe_bo_is_pinned(bo->backup_obj))
+                               ttm_bo_unpin(&bo->backup_obj->ttm);
+                       xe_bo_put(bo->backup_obj);
+                       bo->backup_obj = NULL;
+               }
        }
        ttm_bo_unpin(&bo->ttm);
        if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
 
                ret = pinned_fn(bo);
                if (ret && pinned_list != new_list) {
                        spin_lock(&xe->pinned.lock);
-                       list_move(&bo->pinned_link, pinned_list);
+                       /*
+                        * We might no longer be pinned, since PM notifier can
+                        * call this. If the pinned link is now empty, keep it
+                        * that way.
+                        */
+                       if (!list_empty(&bo->pinned_link))
+                               list_move(&bo->pinned_link, pinned_list);
                        spin_unlock(&xe->pinned.lock);
                }
                xe_bo_put(bo);
        return ret;
 }
 
+/**
+ * xe_bo_notifier_prepare_all_pinned() - Pre-allocate the backing pages for all
+ * pinned VRAM objects which need to be saved.
+ * @xe: xe device
+ *
+ * Should be called from PM notifier when preparing for s3/s4.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe)
+{
+       int ret;
+
+       ret = xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+                                   &xe->pinned.early.kernel_bo_present,
+                                   xe_bo_notifier_prepare_pinned);
+       if (!ret)
+               ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+                                           &xe->pinned.late.kernel_bo_present,
+                                           xe_bo_notifier_prepare_pinned);
+
+       return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_all_pinned() - Remove the backing pages for all
+ * pinned VRAM objects which have been restored.
+ * @xe: xe device
+ *
+ * Should be called from PM notifier after exiting s3/s4 (either on success or
+ * failure).
+ */
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe)
+{
+       (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+                                   &xe->pinned.early.kernel_bo_present,
+                                   xe_bo_notifier_unprepare_pinned);
+
+       (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+                                   &xe->pinned.late.kernel_bo_present,
+                                   xe_bo_notifier_unprepare_pinned);
+}
+
 /**
  * xe_bo_evict_all_user - evict all non-pinned user BOs from VRAM
  * @xe: xe device