If the user doesn't require CPU access for the buffer, then
ALLOC_GPU_ONLY should be used, in order to prioritise allocating in the
non-mappable portion of LMEM, on devices with small BAR.
v2(Thomas):
  - The BO_ALLOC_TOPDOWN naming here is poor, since this is pure lies on
    systems that don't even have small BAR. A better name is GPU_ONLY,
    which is accurate regardless of the configuration.
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Acked-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220225145502.331818-3-matthew.auld@intel.com
 #define I915_BO_ALLOC_PM_VOLATILE BIT(4)
 /* Object needs to be restored early using memcpy during resume */
 #define I915_BO_ALLOC_PM_EARLY    BIT(5)
+/*
+ * Object is likely never accessed by the CPU. This will prioritise the BO to be
+ * allocated in the non-mappable portion of lmem. This is merely a hint, and if
+ * dealing with userspace objects the CPU fault handler is free to ignore this.
+ */
+#define I915_BO_ALLOC_GPU_ONLY   BIT(6)
 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
                             I915_BO_ALLOC_VOLATILE | \
                             I915_BO_ALLOC_CPU_CLEAR | \
                             I915_BO_ALLOC_USER | \
                             I915_BO_ALLOC_PM_VOLATILE | \
-                            I915_BO_ALLOC_PM_EARLY)
-#define I915_BO_READONLY          BIT(6)
-#define I915_TILING_QUIRK_BIT     7 /* unknown swizzling; do not release! */
-#define I915_BO_PROTECTED         BIT(8)
-#define I915_BO_WAS_BOUND_BIT     9
+                            I915_BO_ALLOC_PM_EARLY | \
+                            I915_BO_ALLOC_GPU_ONLY)
+#define I915_BO_READONLY          BIT(7)
+#define I915_TILING_QUIRK_BIT     8 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED         BIT(9)
+#define I915_BO_WAS_BOUND_BIT     10
        /**
         * @mem_flags - Mutable placement-related flags
         *
 
            !i915_gem_object_has_iomem(obj))
                return ERR_PTR(-ENXIO);
 
+       if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
+               return ERR_PTR(-EINVAL);
+
        assert_object_held(obj);
 
        pinned = !(type & I915_MAP_OVERRIDE);
 
 
        GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
 
+       if (WARN_ON_ONCE(flags & I915_BO_ALLOC_GPU_ONLY &&
+                        (flags & I915_BO_ALLOC_CPU_CLEAR ||
+                         flags & I915_BO_ALLOC_PM_EARLY)))
+               return ERR_PTR(-EINVAL);
+
        if (!mem)
                return ERR_PTR(-ENODEV);
 
 
        place->mem_type = intel_region_to_ttm_type(mr);
 
        if (flags & I915_BO_ALLOC_CONTIGUOUS)
-               place->flags = TTM_PL_FLAG_CONTIGUOUS;
+               place->flags |= TTM_PL_FLAG_CONTIGUOUS;
        if (mr->io_size && mr->io_size < mr->total) {
-               place->fpfn = 0;
-               place->lpfn = mr->io_size >> PAGE_SHIFT;
+               if (flags & I915_BO_ALLOC_GPU_ONLY) {
+                       place->flags |= TTM_PL_FLAG_TOPDOWN;
+               } else {
+                       place->fpfn = 0;
+                       place->lpfn = mr->io_size >> PAGE_SHIFT;
+               }
        }
 }
 
        if (!obj)
                return VM_FAULT_SIGBUS;
 
+       if (obj->flags & I915_BO_ALLOC_GPU_ONLY)
+               return -EINVAL;
+
        /* Sanity check that we allow writing into this object */
        if (unlikely(i915_gem_object_is_readonly(obj) &&
                     area->vm_flags & VM_WRITE))
 
        struct i915_vma *vma;
        int ret;
 
-       obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
+       obj = i915_gem_object_create_lmem(i915, size,
+                                         I915_BO_ALLOC_VOLATILE |
+                                         I915_BO_ALLOC_GPU_ONLY);
        if (IS_ERR(obj))
                obj = i915_gem_object_create_stolen(i915, size);
        if (IS_ERR(obj))
 
        void __iomem *ptr;
        int err;
 
+       if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
+               return IO_ERR_PTR(-EINVAL);
+
        if (!i915_gem_object_is_lmem(vma->obj)) {
                if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
                        err = -ENODEV;
 
        struct ttm_resource *res;
        int ret;
 
+       if (flags & I915_BO_ALLOC_CONTIGUOUS)
+               place.flags |= TTM_PL_FLAG_CONTIGUOUS;
        if (mem->io_size && mem->io_size < mem->total) {
-               place.fpfn = 0;
-               place.lpfn = mem->io_size >> PAGE_SHIFT;
+               if (flags & I915_BO_ALLOC_GPU_ONLY) {
+                       place.flags |= TTM_PL_FLAG_TOPDOWN;
+               } else {
+                       place.fpfn = 0;
+                       place.lpfn = mem->io_size >> PAGE_SHIFT;
+               }
        }
 
        mock_bo.base.size = size;
        mock_bo.bdev = &mem->i915->bdev;
-       place.flags = flags;
 
        ret = man->func->alloc(man, &mock_bo, &place, &res);
        if (ret == -ENOSPC)
 
 
 static int mock_region_get_pages(struct drm_i915_gem_object *obj)
 {
-       unsigned int flags;
        struct sg_table *pages;
        int err;
 
-       flags = 0;
-       if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
-               flags |= TTM_PL_FLAG_CONTIGUOUS;
-
        obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
                                                      obj->base.size,
-                                                     flags);
+                                                     obj->flags);
        if (IS_ERR(obj->mm.res))
                return PTR_ERR(obj->mm.res);