*/
int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
gt->tlb_invalidation.seqno = 1;
INIT_LIST_HEAD(>->tlb_invalidation.pending_fences);
spin_lock_init(>->tlb_invalidation.pending_lock);
INIT_DELAYED_WORK(>->tlb_invalidation.fence_tdr,
xe_gt_tlb_fence_timeout);
+ err = drmm_mutex_init(&xe->drm, >->tlb_invalidation.seqno_lock);
+ if (err)
+ return err;
+
gt->tlb_invalidation.job_wq =
drmm_alloc_ordered_workqueue(>_to_xe(gt)->drm, "gt-tbl-inval-job-wq",
WQ_MEM_RECLAIM);
* appear.
*/
- mutex_lock(>->uc.guc.ct.lock);
+ mutex_lock(>->tlb_invalidation.seqno_lock);
spin_lock_irq(>->tlb_invalidation.pending_lock);
cancel_delayed_work(>->tlb_invalidation.fence_tdr);
/*
>->tlb_invalidation.pending_fences, link)
invalidation_fence_signal(gt_to_xe(gt), fence);
spin_unlock_irq(>->tlb_invalidation.pending_lock);
- mutex_unlock(>->uc.guc.ct.lock);
+ mutex_unlock(>->tlb_invalidation.seqno_lock);
}
static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
* need to be updated.
*/
- mutex_lock(&guc->ct.lock);
+ mutex_lock(>->tlb_invalidation.seqno_lock);
seqno = gt->tlb_invalidation.seqno;
fence->seqno = seqno;
trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
action[1] = seqno;
- ret = xe_guc_ct_send_locked(&guc->ct, action, len,
- G2H_LEN_DW_TLB_INVALIDATE, 1);
+ ret = xe_guc_ct_send(&guc->ct, action, len,
+ G2H_LEN_DW_TLB_INVALIDATE, 1);
if (!ret) {
spin_lock_irq(>->tlb_invalidation.pending_lock);
/*
if (!gt->tlb_invalidation.seqno)
gt->tlb_invalidation.seqno = 1;
}
- mutex_unlock(&guc->ct.lock);
+ mutex_unlock(>->tlb_invalidation.seqno_lock);
xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
return ret;