return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
}
+static void vram_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ int id;
+
+ if (xe->mem.vram.mapping)
+ iounmap(xe->mem.vram.mapping);
+
+ xe->mem.vram.mapping = NULL;
+
+ for_each_tile(tile, xe, id)
+ tile->mem.vram.mapping = NULL;
+}
+
int xe_mmio_probe_vram(struct xe_device *xe)
{
struct xe_tile *tile;
drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
&available_size);
- return 0;
+ return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe);
}
-void xe_mmio_probe_tiles(struct xe_device *xe)
+static void tiles_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ int id;
+
+ for_each_tile(tile, xe, id)
+ tile->mmio.regs = NULL;
+}
+
+int xe_mmio_probe_tiles(struct xe_device *xe)
{
size_t tile_mmio_size = SZ_16M, tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
u8 id, tile_count = xe->info.tile_count;
regs += tile_mmio_ext_size;
}
}
+
+ return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
}
static void mmio_fini(void *arg)
struct xe_device *xe = arg;
pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
- if (xe->mem.vram.mapping)
- iounmap(xe->mem.vram.mapping);
-
- xe->mem.vram.mapping = NULL;
xe->mmio.regs = NULL;
}