if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
- struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_vram_region *vram = xe_device_get_root_tile(xe)->mem.vram;
/*
* If we need to able to access the clear-color value stored in
* accessible. This is important on small-bar systems where
* only some subset of VRAM is CPU accessible.
*/
- if (tile->mem.vram.io_size < tile->mem.vram.usable_size) {
+ if (xe_vram_region_io_size(vram) < xe_vram_region_usable_size(vram)) {
ret = -EINVAL;
goto err;
}
* We don't currently expect this to ever be placed in the
* stolen portion.
*/
- if (phys_base >= tile0->mem.vram.usable_size) {
+ if (phys_base >= xe_vram_region_usable_size(tile0->mem.vram)) {
drm_err(&xe->drm,
"Initial plane programming using invalid range, phys_base=%pa\n",
&phys_base);
#include "xe_gt_types.h"
#include "xe_step.h"
+#include "xe_vram.h"
/**
* DOC: Xe Asserts
const struct xe_tile *__tile = (tile); \
char __buf[10] __maybe_unused; \
xe_assert_msg(tile_to_xe(__tile), condition, "tile: %u VRAM %s\n" msg, \
- __tile->id, ({ string_get_size(__tile->mem.vram.actual_physical_size, 1, \
+ __tile->id, ({ string_get_size( \
+ xe_vram_region_actual_physical_size(__tile->mem.vram), 1, \
STRING_UNITS_2, __buf, sizeof(__buf)); __buf; }), ## arg); \
})
}
}
+static int xe_device_vram_alloc(struct xe_device *xe)
+{
+ struct xe_vram_region *vram;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ xe->mem.vram = vram;
+ return 0;
+}
+
/**
* xe_device_probe_early: Device early probe
* @xe: xe device instance
xe->wedged.mode = xe_modparam.wedged_mode;
+ err = xe_device_vram_alloc(xe);
+ if (err)
+ return err;
+
return 0;
}
ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
* device, such as HBM memory or CXL extension memory.
*/
struct xe_vram_region {
+ /** @tile: Back pointer to tile */
+ struct xe_tile *tile;
/** @io_start: IO start address of this VRAM instance */
resource_size_t io_start;
/**
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
*/
- struct xe_vram_region vram;
+ struct xe_vram_region *vram;
/** @mem.ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
/** @mem: memory info for device */
struct {
/** @mem.vram: VRAM info for device */
- struct xe_vram_region vram;
+ struct xe_vram_region *vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
/** @mem.sys_mgr: system memory shrinker. */
{
struct xe_tile *tile = gt->tile;
- return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
+ return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager);
}
static u64 pf_query_max_lmem(struct xe_gt *gt)
#include "xe_sync.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
+#include "xe_vram.h"
/**
* struct xe_migrate - migrate context.
u64 identity_offset = IDENTITY_OFFSET;
if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
- identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
+ (xe->mem.vram), SZ_1G);
- addr -= xe->mem.vram.dpa_base;
+ addr -= xe_vram_region_dpa_base(xe->mem.vram);
return addr + (identity_offset << xe_pt_shift(2));
}
static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
{
+ struct xe_vram_region *vram = xe->mem.vram;
+ resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
u64 pos, ofs, flags;
u64 entry;
/* XXX: Unclear if this should be usable_size? */
- u64 vram_limit = xe->mem.vram.actual_physical_size +
- xe->mem.vram.dpa_base;
+ u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
u32 level = 2;
ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
true, 0);
- xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+ xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
/*
* Use 1GB pages when possible, last chunk always use 2M
* pages as mixing reserved memory (stolen, WOCPM) with a single
* mapping is not allowed on certain platforms.
*/
- for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+ for (pos = dpa_base; pos < vram_limit;
pos += SZ_1G, ofs += 8) {
if (pos + SZ_1G >= vram_limit) {
entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
+ resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
pat_index, pt30_ofs);
- xe_assert(xe, xe->mem.vram.actual_physical_size <=
- (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
+ xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
/*
* Identity map the entire vram for compressed pat_index for xe2+
if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
u64 vram_offset = IDENTITY_OFFSET +
- DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
- xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
- IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
+ xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
+ IDENTITY_OFFSET / 2) * SZ_1G);
xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
comp_pat_index, pt31_ofs);
}
* All of these together determine the overall GT count.
*/
for_each_tile(tile, xe, id) {
+ int err;
+
gt = tile->primary_gt;
gt->info.type = XE_GT_TYPE_MAIN;
gt->info.id = tile->id * xe->info.max_gt_per_tile;
gt->info.engine_mask = graphics_desc->hw_engine_mask;
xe->info.gt_count++;
+ err = xe_tile_alloc_vram(tile);
+ if (err)
+ return err;
+
if (MEDIA_VER(xe) < 13 && media_desc)
gt->info.engine_mask |= media_desc->hw_engine_mask;
config->num_params = num_params;
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
xe->info.devid | (xe->info.revid << 16);
- if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
+ if (xe->mem.vram)
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
}
-static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
-{
- return container_of(vr, struct xe_tile, mem.vram);
-}
-
static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
struct page *page)
{
u64 dpa;
- struct xe_tile *tile = vr_to_tile(vr);
+ struct xe_tile *tile = vr->tile;
u64 pfn = page_to_pfn(page);
u64 offset;
if (!vr && spage) {
vr = page_to_vr(spage);
- tile = vr_to_tile(vr);
+ tile = vr->tile;
}
XE_WARN_ON(spage && page_to_vr(spage) != vr);
static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
{
- return &tile->mem.vram.ttm.mm;
+ return &tile->mem.vram->ttm.mm;
}
static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
list_for_each_entry(block, blocks, link) {
struct xe_vram_region *vr = block->private;
- struct xe_tile *tile = vr_to_tile(vr);
+ struct xe_tile *tile = vr->tile;
struct drm_buddy *buddy = tile_to_buddy(tile);
u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
int i;
}
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
-{
- return &tile->mem.vram;
-}
-
static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms)
{
- struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
+ struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct xe_tile *tile = vr->tile;
struct xe_device *xe = tile_to_xe(tile);
struct device *dev = xe->drm.dev;
- struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
struct list_head *blocks;
struct xe_bo *bo;
drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
&dpagemap_devmem_ops,
- &tile->mem.vram.dpagemap,
+ &tile->mem.vram->dpagemap,
end - start);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
+#include "xe_vram.h"
/**
* DOC: Multi-tile Design
return 0;
}
+/**
+ * xe_tile_alloc_vram - Perform per-tile VRAM structs allocation
+ * @tile: Tile to perform allocations for
+ *
+ * Allocates VRAM per-tile data structures using DRM-managed allocations.
+ * Does not touch the hardware.
+ *
+ * Returns -ENOMEM if allocations fail, otherwise 0.
+ */
+int xe_tile_alloc_vram(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_vram_region *vram;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ vram->tile = tile;
+ tile->mem.vram = vram;
+
+ return 0;
+}
+
/**
* xe_tile_init_early - Initialize the tile and primary GT
* @tile: Tile to initialize
struct xe_device *xe = tile_to_xe(tile);
int err;
- if (tile->mem.vram.usable_size) {
- err = xe_ttm_vram_mgr_init(tile, &tile->mem.vram.ttm);
+ if (tile->mem.vram) {
+ err = xe_ttm_vram_mgr_init(tile, &tile->mem.vram->ttm);
if (err)
return err;
xe->info.mem_region_mask |= BIT(tile->id) << 1;
xe_wa_apply_tile_workarounds(tile);
if (xe->info.has_usm && IS_DGFX(xe))
- xe_devm_add(tile, &tile->mem.vram);
+ xe_devm_add(tile, tile->mem.vram);
return xe_tile_sysfs_init(tile);
}
int xe_tile_init_noalloc(struct xe_tile *tile);
int xe_tile_init(struct xe_tile *tile);
+int xe_tile_alloc_vram(struct xe_tile *tile);
+
void xe_tile_migrate_wait(struct xe_tile *tile);
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
{
- return &tile->mem.vram.dpagemap;
+ return &tile->mem.vram->dpagemap;
}
#else
static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
+#include "xe_vram.h"
struct xe_ttm_stolen_mgr {
struct xe_ttm_vram_mgr base;
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
- struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_vram_region *tile_vram = xe_device_get_root_tile(xe)->mem.vram;
+ resource_size_t tile_io_start = xe_vram_region_io_start(tile_vram);
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
- tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start;
- tile_size = tile->mem.vram.actual_physical_size;
+ tile_offset = tile_io_start - xe_vram_region_io_start(xe->mem.vram);
+ tile_size = xe_vram_region_actual_physical_size(tile_vram);
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = (xe_mmio_read64_2x32(mmio, DSMBASE) & BDSM_MASK) - tile_offset;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
- mgr->io_base = tile->mem.vram.io_start + mgr->stolen_base;
+ mgr->io_base = tile_io_start + mgr->stolen_base;
/*
* There may be few KB of platform dependent reserved memory at the end
int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
{
struct xe_device *xe = tile_to_xe(tile);
- struct xe_vram_region *vram = &tile->mem.vram;
+ struct xe_vram_region *vram = tile->mem.vram;
return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
- vram->usable_size, vram->io_size,
+ xe_vram_region_usable_size(vram),
+ xe_vram_region_io_size(vram),
PAGE_SIZE);
}
*/
xe_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
- phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
+ phys_addr_t phys = cursor.start + xe_vram_region_io_start(tile->mem.vram);
size_t size = min_t(u64, cursor.size, SZ_2G);
dma_addr_t addr;
* Copyright © 2021-2024 Intel Corporation
*/
+#include <kunit/visibility.h>
#include <linux/pci.h>
#include <drm/drm_managed.h>
resize_vram_bar(xe);
- xe->mem.vram.io_start = pci_resource_start(pdev, LMEM_BAR);
- xe->mem.vram.io_size = pci_resource_len(pdev, LMEM_BAR);
- if (!xe->mem.vram.io_size)
+ xe->mem.vram->io_start = pci_resource_start(pdev, LMEM_BAR);
+ xe->mem.vram->io_size = pci_resource_len(pdev, LMEM_BAR);
+ if (!xe->mem.vram->io_size)
return -EIO;
/* XXX: Need to change when xe link code is ready */
- xe->mem.vram.dpa_base = 0;
+ xe->mem.vram->dpa_base = 0;
/* set up a map to the total memory area. */
- xe->mem.vram.mapping = devm_ioremap_wc(&pdev->dev, xe->mem.vram.io_start,
- xe->mem.vram.io_size);
+ xe->mem.vram->mapping = devm_ioremap_wc(&pdev->dev, xe->mem.vram->io_start,
+ xe->mem.vram->io_size);
return 0;
}
struct xe_tile *tile;
int id;
- xe->mem.vram.mapping = NULL;
+ xe->mem.vram->mapping = NULL;
for_each_tile(tile, xe, id)
- tile->mem.vram.mapping = NULL;
+ tile->mem.vram->mapping = NULL;
}
/**
if (err)
return err;
- drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &xe->mem.vram.io_size);
+ drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe->mem.vram->io_start,
+ &xe->mem.vram->io_size);
- io_size = xe->mem.vram.io_size;
+ io_size = xe->mem.vram->io_size;
/* tile specific ranges */
for_each_tile(tile, xe, id) {
if (err)
return err;
- tile->mem.vram.actual_physical_size = tile_size;
- tile->mem.vram.io_start = xe->mem.vram.io_start + tile_offset;
- tile->mem.vram.io_size = min_t(u64, vram_size, io_size);
+ tile->mem.vram->actual_physical_size = tile_size;
+ tile->mem.vram->io_start = xe->mem.vram->io_start + tile_offset;
+ tile->mem.vram->io_size = min_t(u64, vram_size, io_size);
- if (!tile->mem.vram.io_size) {
+ if (!tile->mem.vram->io_size) {
drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
return -ENODEV;
}
- tile->mem.vram.dpa_base = xe->mem.vram.dpa_base + tile_offset;
- tile->mem.vram.usable_size = vram_size;
- tile->mem.vram.mapping = xe->mem.vram.mapping + tile_offset;
+ tile->mem.vram->dpa_base = xe->mem.vram->dpa_base + tile_offset;
+ tile->mem.vram->usable_size = vram_size;
+ tile->mem.vram->mapping = xe->mem.vram->mapping + tile_offset;
- if (tile->mem.vram.io_size < tile->mem.vram.usable_size)
+ if (tile->mem.vram->io_size < tile->mem.vram->usable_size)
drm_info(&xe->drm, "Small BAR device\n");
- drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
- tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
- drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
- &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
- &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
+ drm_info(&xe->drm,
+ "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n",
+ id, tile->id, &tile->mem.vram->actual_physical_size,
+ &tile->mem.vram->usable_size, &tile->mem.vram->io_size);
+ drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n",
+ id, tile->id, &tile->mem.vram->dpa_base,
+ tile->mem.vram->dpa_base + (u64)tile->mem.vram->actual_physical_size,
+ &tile->mem.vram->io_start,
+ tile->mem.vram->io_start + (u64)tile->mem.vram->io_size);
/* calculate total size using tile size to get the correct HW sizing */
total_size += tile_size;
available_size += vram_size;
- if (total_size > xe->mem.vram.io_size) {
+ if (total_size > xe->mem.vram->io_size) {
drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
- &total_size, &xe->mem.vram.io_size);
+ &total_size, &xe->mem.vram->io_size);
}
io_size -= min_t(u64, tile_size, io_size);
}
- xe->mem.vram.actual_physical_size = total_size;
+ xe->mem.vram->actual_physical_size = total_size;
- drm_info(&xe->drm, "Total VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &xe->mem.vram.actual_physical_size);
- drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
+ drm_info(&xe->drm, "Total VRAM: %pa, %pa\n", &xe->mem.vram->io_start,
+ &xe->mem.vram->actual_physical_size);
+ drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram->io_start,
&available_size);
return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe);
}
+
+/**
+ * xe_vram_region_io_start - Get the IO start of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the IO start of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram)
+{
+ return vram ? vram->io_start : 0;
+}
+
+/**
+ * xe_vram_region_io_size - Get the IO size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the IO size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->io_size : 0;
+}
+
+/**
+ * xe_vram_region_dpa_base - Get the DPA base of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the DPA base of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram)
+{
+ return vram ? vram->dpa_base : 0;
+}
+
+/**
+ * xe_vram_region_usable_size - Get the usable size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the usable size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_usable_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->usable_size : 0;
+}
+
+/**
+ * xe_vram_region_actual_physical_size - Get the actual physical size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the actual physical size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_actual_physical_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->actual_physical_size : 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_vram_region_actual_physical_size);
#ifndef _XE_VRAM_H_
#define _XE_VRAM_H_
+#include <linux/types.h>
+
struct xe_device;
+struct xe_vram_region;
int xe_vram_probe(struct xe_device *xe);
+resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_usable_size(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_actual_physical_size(const struct xe_vram_region *vram);
+
#endif