void xe_gt_debugfs_register(struct xe_gt *gt)
 {
+       struct xe_device *xe = gt_to_xe(gt);
        struct drm_minor *minor = gt_to_xe(gt)->drm.primary;
        struct dentry *root;
        struct drm_info_list *local;
        sprintf(name, "gt%d", gt->info.id);
        root = debugfs_create_dir(name, minor->debugfs_root);
        if (IS_ERR(root)) {
-               XE_WARN_ON("Create GT directory failed");
+               drm_warn(&xe->drm, "Create GT directory failed");
                return;
        }
 
         * passed in (e.g. can't define this on the stack).
         */
 #define DEBUGFS_SIZE   (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
-       local = drmm_kmalloc(>_to_xe(gt)->drm, DEBUGFS_SIZE, GFP_KERNEL);
+       local = drmm_kmalloc(&xe->drm, DEBUGFS_SIZE, GFP_KERNEL);
        if (!local)
                return;
 
 
 int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
 {
        struct xe_gt *gt = guc_to_gt(guc);
+       struct xe_device *xe = gt_to_xe(gt);
        struct pf_queue *pf_queue;
        unsigned long flags;
        u32 asid;
                pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW;
                queue_work(gt->usm.pf_wq, &pf_queue->worker);
        } else {
-               XE_WARN_ON("PF Queue full, shouldn't be possible");
+               drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
        }
        spin_unlock_irqrestore(&pf_queue->lock, flags);
 
 
                                                           adj_len);
                break;
        default:
-               XE_WARN_ON("NOT_POSSIBLE");
+               drm_warn(&xe->drm, "NOT_POSSIBLE");
        }
 
        if (ret)
 
                                          struct xe_exec_queue *q)
 {
        MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
+       struct xe_device *xe = guc_to_xe(guc);
        int ret;
 
        set_min_preemption_timeout(guc, q);
        if (!ret) {
                struct xe_gpu_scheduler *sched = &q->guc->sched;
 
-               XE_WARN_ON("Pending enable failed to respond");
+               drm_warn(&xe->drm, "Pending enable failed to respond");
                xe_sched_submission_start(sched);
                xe_gt_reset_async(q->gt);
                xe_sched_tdr_queue_imm(sched);
        struct xe_guc_exec_queue *ge =
                container_of(w, struct xe_guc_exec_queue, lr_tdr);
        struct xe_exec_queue *q = ge->q;
+       struct xe_guc *guc = exec_queue_to_guc(q);
+       struct xe_device *xe = guc_to_xe(guc);
        struct xe_gpu_scheduler *sched = &ge->sched;
 
        XE_WARN_ON(!xe_exec_queue_is_lr(q));
                                         !exec_queue_pending_disable(q) ||
                                         guc_read_stopped(guc), HZ * 5);
                if (!ret) {
-                       XE_WARN_ON("Schedule disable failed to respond");
+                       drm_warn(&xe->drm, "Schedule disable failed to respond");
                        xe_sched_submission_start(sched);
                        xe_gt_reset_async(q->gt);
                        return;
                                         !exec_queue_pending_disable(q) ||
                                         guc_read_stopped(guc), HZ * 5);
                if (!ret || guc_read_stopped(guc)) {
-                       XE_WARN_ON("Schedule disable failed to respond");
+                       drm_warn(&xe->drm, "Schedule disable failed to respond");
                        xe_sched_add_pending_job(sched, job);
                        xe_sched_submission_start(sched);
                        xe_gt_reset_async(q->gt);
 
        }
 
        if (copy_to_user(address, &capture, sizeof(capture)))
-               XE_WARN_ON("Copy to user failed");
+               drm_warn(&vm->xe->drm, "Copy to user failed");
 
        if (in_kthread) {
                kthread_unuse_mm(vm->async_ops.error_capture.mm);
                        return -ENODATA;
                break;
        default:
-               XE_WARN_ON("NOT POSSIBLE");
+               drm_warn(&xe->drm, "NOT POSSIBLE");
                return -EINVAL;
        }
 
                       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
                break;
        default:
-               XE_WARN_ON("NOT POSSIBLE");
+               drm_warn(&xe->drm, "NOT POSSIBLE");
        }
 }
 #else
                }
                break;
        default:
-               XE_WARN_ON("NOT POSSIBLE");
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
                ops = ERR_PTR(-EINVAL);
        }
 
                op->flags |= XE_VMA_OP_COMMITTED;
                break;
        default:
-               XE_WARN_ON("NOT POSSIBLE");
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
        }
 
        return err;
                        /* Nothing to do */
                        break;
                default:
-                       XE_WARN_ON("NOT POSSIBLE");
+                       drm_warn(&vm->xe->drm, "NOT POSSIBLE");
                }
 
                last_op = op;
                                     op->flags & XE_VMA_OP_LAST);
                break;
        default:
-               XE_WARN_ON("NOT POSSIBLE");
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
        }
 
        if (err)
                                          op);
                break;
        default:
-               XE_WARN_ON("NOT POSSIBLE");
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
        }
 
        return ret;
                /* Nothing to do */
                break;
        default:
-               XE_WARN_ON("NOT POSSIBLE");
+               drm_warn(&vm->xe->drm, "NOT POSSIBLE");
        }
 }
 
 
 static int madvise_pin(struct xe_device *xe, struct xe_vm *vm,
                       struct xe_vma **vmas, int num_vmas, u64 value)
 {
-       XE_WARN_ON("NIY");
+       drm_warn(&xe->drm, "NIY");
        return 0;
 }