if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
            bo->flags & XE_BO_INTERNAL_TEST)) {
                struct ttm_place *place = &(bo->placements[0]);
-               bool vram;
 
                if (mem_type_is_vram(place->mem_type)) {
                        XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
 
-                       place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) -
+                       place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
                                       vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
                        place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
 
  * address, such as printing debug information, but not in cases where memory is
  * written based on this result.
  */
-dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset,
-                     size_t page_size, bool *is_vram)
+dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
 {
        struct xe_res_cursor cur;
        u64 page;
        page = offset >> PAGE_SHIFT;
        offset &= (PAGE_SIZE - 1);
 
-       *is_vram = xe_bo_is_vram(bo);
-
-       if (!*is_vram && !xe_bo_is_stolen(bo)) {
+       if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
                XE_BUG_ON(!bo->ttm.ttm);
 
                xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
        }
 }
 
-dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
-                     size_t page_size, bool *is_vram)
+dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
 {
        if (!READ_ONCE(bo->ttm.pin_count))
                xe_bo_assert_held(bo);
-       return __xe_bo_addr(bo, offset, page_size, is_vram);
+       return __xe_bo_addr(bo, offset, page_size);
 }
 
 int xe_bo_vmap(struct xe_bo *bo)
 
 }
 
 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
-dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset,
-                     size_t page_size, bool *is_vram);
-dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
-                     size_t page_size, bool *is_vram);
+dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
+dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
 
 static inline dma_addr_t
 xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
 {
-       bool is_vram;
-
-       return xe_bo_addr(bo, 0, page_size, &is_vram);
+       return xe_bo_addr(bo, 0, page_size);
 }
 
 static inline u32
 
 {
        struct xe_device *xe = xe_bo_device(bo);
        u64 pte;
-       bool is_vram;
 
-       pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE, &is_vram);
+       pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
        pte |= XE_PAGE_PRESENT;
 
-       if (is_vram)
+       if (xe_bo_is_vram(bo))
                pte |= XE_GGTT_PTE_LM;
 
        /* FIXME: vfunc + pass in caching rules */
 
        struct xe_device *xe = vm->xe;
        size_t cleared_size;
        u64 vram_addr;
-       bool is_vram;
 
        if (!xe_device_has_flat_ccs(xe))
                return 0;
                return PTR_ERR(m->cleared_bo);
 
        xe_map_memset(xe, &m->cleared_bo->vmap, 0, 0x00, cleared_size);
-       vram_addr = xe_bo_addr(m->cleared_bo, 0, XE_PAGE_SIZE, &is_vram);
-       XE_BUG_ON(!is_vram);
+       vram_addr = xe_bo_addr(m->cleared_bo, 0, XE_PAGE_SIZE);
        m->cleared_vram_ofs = xe_migrate_vram_ofs(vram_addr);
 
        return 0;
                        level++;
                }
        } else {
-               bool is_vram;
-               u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE, &is_vram);
+               u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
 
                m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
 
                if (xe->info.supports_usm) {
                        batch = tile->primary_gt->usm.bb_pool->bo;
-                       batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE,
-                                               &is_vram);
+                       batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
                        m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
                }
        }
         */
        XE_BUG_ON(update->qwords > 0x1ff);
        if (!ppgtt_ofs) {
-               bool is_vram;
-
                ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
-                                                          XE_PAGE_SIZE,
-                                                          &is_vram));
-               XE_BUG_ON(!is_vram);
+                                                          XE_PAGE_SIZE));
        }
 
        do {
 
                  const enum xe_cache_level level)
 {
        u64 pde;
-       bool is_vram;
 
-       pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE, &is_vram);
+       pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
        pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
 
-       XE_WARN_ON(IS_DGFX(xe_bo_device(bo)) && !is_vram);
-
        /* FIXME: I don't think the PPAT handling is correct for MTL */
 
        if (level != XE_CACHE_NONE)
                  u32 pt_level)
 {
        u64 pte;
-       bool is_vram;
 
-       pte = xe_bo_addr(bo, offset, XE_PAGE_SIZE, &is_vram);
-       if (is_vram)
+       pte = xe_bo_addr(bo, offset, XE_PAGE_SIZE);
+       if (xe_bo_is_vram(bo))
                pte |= XE_PPGTT_PTE_LM;
 
        return __pte_encode(pte, cache, NULL, pt_level);
 
                return 0;
        }
        if (vm->pt_root[gt_id]) {
-               addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
-                                 &is_vram);
-               drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
+               addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
+               is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
+               drm_printf(p, " VM root: A:0x%llx %s\n", addr,
+                          is_vram ? "VRAM" : "SYS");
        }
 
        drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
                                addr = 0;
                        }
                } else {
-                       addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE, &is_vram);
+                       addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
+                       is_vram = xe_bo_is_vram(xe_vma_bo(vma));
                }
                drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
                           xe_vma_start(vma), xe_vma_end(vma) - 1,