bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
                p->bytes_moved_vis += ctx.bytes_moved;
 
-       if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+       if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains &&
+           !(bo->flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
                domain = bo->allowed_domains;
                goto retry;
        }
 
        drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
        INIT_LIST_HEAD(&bo->shadow_list);
        INIT_LIST_HEAD(&bo->va);
+       bo->preferred_domains = preferred_domains;
+       bo->allowed_domains = allowed_domains;
 
        bo->flags = flags;
 
        r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
                                 &bo->placement, page_align, &ctx, acc_size,
                                 NULL, resv, &amdgpu_ttm_bo_destroy);
-       if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) {
+       if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device &&
+           !(flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
                if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
                        flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
                        goto retry;
 
 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID      (1 << 6)
 /* Flag that BO sharing will be explicitly synchronized */
 #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC                (1 << 7)
+/* Flag that BO doesn't need fallback */
+#define AMDGPU_GEM_CREATE_NO_FALLBACK          (1 << 8)
 
 struct drm_amdgpu_gem_create_in  {
        /** the requested memory size */