{
        struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
                                                     bdev);
-       struct ttm_resource_manager *man =
-               ttm_manager_type(bo->bdev, bo->resource->mem_type);
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
        unsigned long ccs_pages = 0;
        enum ttm_caching caching;
        if (!i915_tt)
                return NULL;
 
-       if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
-           man->use_tt)
+       if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
+           ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
                page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
 
        caching = i915_ttm_select_tt_caching(obj);
                return VM_FAULT_SIGBUS;
        }
 
-       if (!i915_ttm_resource_mappable(bo->resource)) {
+       /*
+        * This must be swapped out with shmem ttm_tt (pipeline-gutting).
+        * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
+        * far as far doing a ttm_bo_move_null(), which should skip all the
+        * other junk.
+        */
+       if (!bo->resource) {
+               struct ttm_operation_ctx ctx = {
+                       .interruptible = true,
+                       .no_wait_gpu = true, /* should be idle already */
+               };
+
+               GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+
+               ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+               if (ret) {
+                       dma_resv_unlock(bo->base.resv);
+                       return VM_FAULT_SIGBUS;
+               }
+       } else if (!i915_ttm_resource_mappable(bo->resource)) {
                int err = -ENODEV;
                int i;
 
 
 {
        struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
        unsigned int cache_level;
+       unsigned int mem_flags;
        unsigned int i;
+       int mem_type;
+
+       /*
+        * We might have been purged (or swapped out) if the resource is NULL,
+        * in which case the SYSTEM placement is the closest match to describe
+        * the current domain. If the object is ever used in this state then we
+        * will require moving it again.
+        */
+       if (!bo->resource) {
+               mem_flags = I915_BO_FLAG_STRUCT_PAGE;
+               mem_type = I915_PL_SYSTEM;
+               cache_level = I915_CACHE_NONE;
+       } else {
+               mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
+                       I915_BO_FLAG_STRUCT_PAGE;
+               mem_type = bo->resource->mem_type;
+               cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
+                                                  bo->ttm);
+       }
 
        /*
         * If object was moved to an allowable region, update the object
         * in an allowable region, it's evicted and we don't update the
         * object region.
         */
-       if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
+       if (intel_region_to_ttm_type(obj->mm.region) != mem_type) {
                for (i = 0; i < obj->mm.n_placements; ++i) {
                        struct intel_memory_region *mr = obj->mm.placements[i];
 
-                       if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+                       if (intel_region_to_ttm_type(mr) == mem_type &&
                            mr != obj->mm.region) {
                                i915_gem_object_release_memory_region(obj);
                                i915_gem_object_init_memory_region(obj, mr);
        }
 
        obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
+       obj->mem_flags |= mem_flags;
 
-       obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
-               I915_BO_FLAG_STRUCT_PAGE;
-
-       cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
-                                          bo->ttm);
        i915_gem_object_set_cache_coherency(obj, cache_level);
 }
 
                return 0;
        }
 
+       if (!bo->resource) {
+               if (dst_mem->mem_type != TTM_PL_SYSTEM) {
+                       hop->mem_type = TTM_PL_SYSTEM;
+                       hop->flags = TTM_PL_FLAG_TEMPORARY;
+                       return -EMULTIHOP;
+               }
+
+               /*
+                * This is only reached when first creating the object, or if
+                * the object was purged or swapped out (pipeline-gutting). For
+                * the former we can safely skip all of the below since we are
+                * only using a dummy SYSTEM placement here. And with the latter
+                * we will always re-enter here with bo->resource set correctly
+                * (as per the above), since this is part of a multi-hop
+                * sequence, where at the end we can do the move for real.
+                *
+                * The special case here is when the dst_mem is TTM_PL_SYSTEM,
+                * which doens't require any kind of move, so it should be safe
+                * to skip all the below and call ttm_bo_move_null() here, where
+                * the caller in __i915_ttm_get_pages() will take care of the
+                * rest, since we should have a valid ttm_tt.
+                */
+               ttm_bo_move_null(bo, dst_mem);
+               return 0;
+       }
+
        ret = i915_ttm_move_notify(bo);
        if (ret)
                return ret;