]> www.infradead.org Git - users/hch/configfs.git/commitdiff
drm/xe/migrate: Handle clear ccs logic for xe2 dgfx
authorAkshata Jahagirdar <akshata.jahagirdar@intel.com>
Wed, 17 Jul 2024 21:10:02 +0000 (21:10 +0000)
committerMatt Roper <matthew.d.roper@intel.com>
Thu, 18 Jul 2024 00:02:27 +0000 (17:02 -0700)
For Xe2 dGPU, we clear the bo by modifying the VRAM using an
uncompressed pat index which then indirectly updates the
compression status as uncompressed i.e zeroed CCS.
So xe_migrate_clear() should be updated for BMG to not
emit CCS surf copy commands.

v2: Moved xe_device_needs_ccs_emit() to xe_migrate.c and changed
name to xe_migrate_needs_ccs_emit() since its very specific to
migration.(Matt)

Signed-off-by: Akshata Jahagirdar <akshata.jahagirdar@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/8dd869dd8dda5e17ace28c04f1a48675f5540874.1721250309.git.akshata.jahagirdar@intel.com
drivers/gpu/drm/xe/xe_migrate.c

index fa23a7e7ec4352470762b3404c9e3316077dc06d..85eec95c9bc271131223c511e46a94daa81019cf 100644 (file)
@@ -347,6 +347,11 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
        return logical_mask;
 }
 
+static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
+{
+       return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
+}
+
 /**
  * xe_migrate_init() - Initialize a migrate context
  * @tile: Back-pointer to the tile we're initializing for.
@@ -420,7 +425,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
                return ERR_PTR(err);
 
        if (IS_DGFX(xe)) {
-               if (xe_device_has_flat_ccs(xe))
+               if (xe_migrate_needs_ccs_emit(xe))
                        /* min chunk size corresponds to 4K of CCS Metadata */
                        m->min_chunk_size = SZ_4K * SZ_64K /
                                xe_device_ccs_bytes(xe, SZ_64K);
@@ -1034,7 +1039,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                                        clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
                                        avail_pts);
 
-               if (xe_device_has_flat_ccs(xe))
+               if (xe_migrate_needs_ccs_emit(xe))
                        batch_size += EMIT_COPY_CCS_DW;
 
                /* Clear commands */
@@ -1062,7 +1067,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                if (!clear_system_ccs)
                        emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
 
-               if (xe_device_has_flat_ccs(xe)) {
+               if (xe_migrate_needs_ccs_emit(xe)) {
                        emit_copy_ccs(gt, bb, clear_L0_ofs, true,
                                      m->cleared_mem_ofs, false, clear_L0);
                        flush_flags = MI_FLUSH_DW_CCS;